blob: b6194e082e48a7fc9eaca27f95cedf440f6fbe56 [file] [log] [blame]
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001/*
2 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
Song Liub4c625c2016-11-17 15:24:43 -08003 * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
Shaohua Lif6bed0e2015-08-13 14:31:59 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15#include <linux/kernel.h>
16#include <linux/wait.h>
17#include <linux/blkdev.h>
18#include <linux/slab.h>
19#include <linux/raid/md_p.h>
Shaohua Li5cb2fbd2015-10-28 08:41:25 -070020#include <linux/crc32c.h>
Shaohua Lif6bed0e2015-08-13 14:31:59 -070021#include <linux/random.h>
Shaohua Lice1ccd02016-11-21 10:29:18 -080022#include <linux/kthread.h>
Song Liu03b047f2017-01-11 13:39:14 -080023#include <linux/types.h>
Shaohua Lif6bed0e2015-08-13 14:31:59 -070024#include "md.h"
25#include "raid5.h"
Song Liu1e6d6902016-11-17 15:24:39 -080026#include "bitmap.h"
Shaohua Lif6bed0e2015-08-13 14:31:59 -070027
28/*
29 * metadata/data stored in disk with 4k size unit (a block) regardless
30 * underneath hardware sector size. only works with PAGE_SIZE == 4096
31 */
32#define BLOCK_SECTORS (8)
Song Liueffe6ee2017-03-07 16:49:17 -080033#define BLOCK_SECTOR_SHIFT (3)
Shaohua Lif6bed0e2015-08-13 14:31:59 -070034
Shaohua Li0576b1c2015-08-13 14:32:00 -070035/*
Song Liua39f7af2016-11-17 15:24:40 -080036 * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
37 *
38 * In write through mode, the reclaim runs every log->max_free_space.
39 * This can prevent the recovery scans for too long
Shaohua Li0576b1c2015-08-13 14:32:00 -070040 */
41#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
42#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
43
Song Liua39f7af2016-11-17 15:24:40 -080044/* wake up reclaim thread periodically */
45#define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
46/* start flush with these full stripes */
Shaohua Li84890c02017-02-15 19:58:05 -080047#define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4)
Song Liua39f7af2016-11-17 15:24:40 -080048/* reclaim stripes in groups */
49#define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
50
Christoph Hellwigc38d29b2015-12-21 10:51:02 +110051/*
52 * We only need 2 bios per I/O unit to make progress, but ensure we
53 * have a few more available to not get too tight.
54 */
55#define R5L_POOL_SIZE 4
56
Song Liu2ded3702016-11-17 15:24:38 -080057/*
58 * r5c journal modes of the array: write-back or write-through.
59 * write-through mode has identical behavior as existing log only
60 * implementation.
61 */
62enum r5c_journal_mode {
63 R5C_JOURNAL_MODE_WRITE_THROUGH = 0,
64 R5C_JOURNAL_MODE_WRITE_BACK = 1,
65};
66
Song Liu2c7da142016-11-17 15:24:41 -080067static char *r5c_journal_mode_str[] = {"write-through",
68 "write-back"};
Song Liu2ded3702016-11-17 15:24:38 -080069/*
70 * raid5 cache state machine
71 *
JackieLiu9b691732016-11-28 16:19:18 +080072 * With the RAID cache, each stripe works in two phases:
Song Liu2ded3702016-11-17 15:24:38 -080073 * - caching phase
74 * - writing-out phase
75 *
76 * These two phases are controlled by bit STRIPE_R5C_CACHING:
77 * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
78 * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
79 *
80 * When there is no journal, or the journal is in write-through mode,
81 * the stripe is always in writing-out phase.
82 *
83 * For write-back journal, the stripe is sent to caching phase on write
84 * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
85 * the write-out phase by clearing STRIPE_R5C_CACHING.
86 *
87 * Stripes in caching phase do not write the raid disks. Instead, all
88 * writes are committed from the log device. Therefore, a stripe in
89 * caching phase handles writes as:
90 * - write to log device
91 * - return IO
92 *
93 * Stripes in writing-out phase handle writes as:
94 * - calculate parity
95 * - write pending data and parity to journal
96 * - write data and parity to raid disks
97 * - return IO for pending writes
98 */
99
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700100struct r5l_log {
101 struct md_rdev *rdev;
102
103 u32 uuid_checksum;
104
105 sector_t device_size; /* log device size, round to
106 * BLOCK_SECTORS */
Shaohua Li0576b1c2015-08-13 14:32:00 -0700107 sector_t max_free_space; /* reclaim run if free space is at
108 * this size */
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700109
110 sector_t last_checkpoint; /* log tail. where recovery scan
111 * starts from */
112 u64 last_cp_seq; /* log tail sequence */
113
114 sector_t log_start; /* log head. where new data appends */
115 u64 seq; /* log head sequence */
116
Christoph Hellwig17036462015-10-05 09:31:06 +0200117 sector_t next_checkpoint;
Christoph Hellwig17036462015-10-05 09:31:06 +0200118
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700119 struct mutex io_mutex;
120 struct r5l_io_unit *current_io; /* current io_unit accepting new data */
121
122 spinlock_t io_list_lock;
123 struct list_head running_ios; /* io_units which are still running,
124 * and have not yet been completely
125 * written to the log */
126 struct list_head io_end_ios; /* io_units which have been completely
127 * written to the log but not yet written
128 * to the RAID */
Shaohua Lia8c34f92015-09-02 13:49:46 -0700129 struct list_head flushing_ios; /* io_units which are waiting for log
130 * cache flush */
Christoph Hellwig04732f72015-10-05 09:31:07 +0200131 struct list_head finished_ios; /* io_units which settle down in log disk */
Shaohua Lia8c34f92015-09-02 13:49:46 -0700132 struct bio flush_bio;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700133
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100134 struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */
135
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700136 struct kmem_cache *io_kc;
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100137 mempool_t *io_pool;
Christoph Hellwigc38d29b2015-12-21 10:51:02 +1100138 struct bio_set *bs;
Christoph Hellwige8deb632015-12-21 10:51:02 +1100139 mempool_t *meta_pool;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700140
Shaohua Li0576b1c2015-08-13 14:32:00 -0700141 struct md_thread *reclaim_thread;
142 unsigned long reclaim_target; /* number of space that need to be
143 * reclaimed. if it's 0, reclaim spaces
144 * used by io_units which are in
145 * IO_UNIT_STRIPE_END state (eg, reclaim
146 * dones't wait for specific io_unit
147 * switching to IO_UNIT_STRIPE_END
148 * state) */
Shaohua Li0fd22b42015-09-02 13:49:47 -0700149 wait_queue_head_t iounit_wait;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700150
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700151 struct list_head no_space_stripes; /* pending stripes, log has no space */
152 spinlock_t no_space_stripes_lock;
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200153
154 bool need_cache_flush;
Song Liu2ded3702016-11-17 15:24:38 -0800155
156 /* for r5c_cache */
157 enum r5c_journal_mode r5c_journal_mode;
Song Liua39f7af2016-11-17 15:24:40 -0800158
159 /* all stripes in r5cache, in the order of seq at sh->log_start */
160 struct list_head stripe_in_journal_list;
161
162 spinlock_t stripe_in_journal_lock;
163 atomic_t stripe_in_journal_count;
Song Liu3bddb7f2016-11-18 16:46:50 -0800164
165 /* to submit async io_units, to fulfill ordering of flush */
166 struct work_struct deferred_io_work;
Song Liu2e38a372017-01-24 10:45:30 -0800167 /* to disable write back during in degraded mode */
168 struct work_struct disable_writeback_work;
Song Liu03b047f2017-01-11 13:39:14 -0800169
170 /* to for chunk_aligned_read in writeback mode, details below */
171 spinlock_t tree_lock;
172 struct radix_tree_root big_stripe_tree;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700173};
174
175/*
Song Liu03b047f2017-01-11 13:39:14 -0800176 * Enable chunk_aligned_read() with write back cache.
177 *
178 * Each chunk may contain more than one stripe (for example, a 256kB
179 * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For
180 * chunk_aligned_read, these stripes are grouped into one "big_stripe".
181 * For each big_stripe, we count how many stripes of this big_stripe
182 * are in the write back cache. These data are tracked in a radix tree
183 * (big_stripe_tree). We use radix_tree item pointer as the counter.
184 * r5c_tree_index() is used to calculate keys for the radix tree.
185 *
186 * chunk_aligned_read() calls r5c_big_stripe_cached() to look up
187 * big_stripe of each chunk in the tree. If this big_stripe is in the
188 * tree, chunk_aligned_read() aborts. This look up is protected by
189 * rcu_read_lock().
190 *
191 * It is necessary to remember whether a stripe is counted in
192 * big_stripe_tree. Instead of adding new flag, we reuses existing flags:
193 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these
194 * two flags are set, the stripe is counted in big_stripe_tree. This
195 * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to
196 * r5c_try_caching_write(); and moving clear_bit of
197 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to
198 * r5c_finish_stripe_write_out().
199 */
200
201/*
202 * radix tree requests lowest 2 bits of data pointer to be 2b'00.
203 * So it is necessary to left shift the counter by 2 bits before using it
204 * as data pointer of the tree.
205 */
206#define R5C_RADIX_COUNT_SHIFT 2
207
208/*
209 * calculate key for big_stripe_tree
210 *
211 * sect: align_bi->bi_iter.bi_sector or sh->sector
212 */
213static inline sector_t r5c_tree_index(struct r5conf *conf,
214 sector_t sect)
215{
216 sector_t offset;
217
218 offset = sector_div(sect, conf->chunk_sectors);
219 return sect;
220}
221
222/*
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700223 * an IO range starts from a meta data block and end at the next meta data
224 * block. The io unit's the meta data block tracks data/parity followed it. io
225 * unit is written to log disk with normal write, as we always flush log disk
226 * first and then start move data to raid disks, there is no requirement to
227 * write io unit with FLUSH/FUA
228 */
229struct r5l_io_unit {
230 struct r5l_log *log;
231
232 struct page *meta_page; /* store meta block */
233 int meta_offset; /* current offset in meta_page */
234
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700235 struct bio *current_bio;/* current_bio accepting new data */
236
237 atomic_t pending_stripe;/* how many stripes not flushed to raid */
238 u64 seq; /* seq number of the metablock */
239 sector_t log_start; /* where the io_unit starts */
240 sector_t log_end; /* where the io_unit ends */
241 struct list_head log_sibling; /* log->running_ios */
242 struct list_head stripe_list; /* stripes added to the io_unit */
243
244 int state;
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200245 bool need_split_bio;
Song Liu3bddb7f2016-11-18 16:46:50 -0800246 struct bio *split_bio;
247
248 unsigned int has_flush:1; /* include flush request */
249 unsigned int has_fua:1; /* include fua request */
250 unsigned int has_null_flush:1; /* include empty flush request */
251 /*
252 * io isn't sent yet, flush/fua request can only be submitted till it's
253 * the first IO in running_ios list
254 */
255 unsigned int io_deferred:1;
256
257 struct bio_list flush_barriers; /* size == 0 flush bios */
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700258};
259
260/* r5l_io_unit state */
261enum r5l_io_unit_state {
262 IO_UNIT_RUNNING = 0, /* accepting new IO */
263 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
264 * don't accepting new bio */
265 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
Shaohua Lia8c34f92015-09-02 13:49:46 -0700266 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700267};
268
Song Liu2ded3702016-11-17 15:24:38 -0800269bool r5c_is_writeback(struct r5l_log *log)
270{
271 return (log != NULL &&
272 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
273}
274
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700275static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
276{
277 start += inc;
278 if (start >= log->device_size)
279 start = start - log->device_size;
280 return start;
281}
282
283static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
284 sector_t end)
285{
286 if (end >= start)
287 return end - start;
288 else
289 return end + log->device_size - start;
290}
291
292static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
293{
294 sector_t used_size;
295
296 used_size = r5l_ring_distance(log, log->last_checkpoint,
297 log->log_start);
298
299 return log->device_size > used_size + size;
300}
301
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700302static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
303 enum r5l_io_unit_state state)
304{
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700305 if (WARN_ON(io->state >= state))
306 return;
307 io->state = state;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700308}
309
Song Liu1e6d6902016-11-17 15:24:39 -0800310static void
NeilBrownbd83d0a2017-03-15 14:05:12 +1100311r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
Song Liu1e6d6902016-11-17 15:24:39 -0800312{
313 struct bio *wbi, *wbi2;
314
315 wbi = dev->written;
316 dev->written = NULL;
317 while (wbi && wbi->bi_iter.bi_sector <
318 dev->sector + STRIPE_SECTORS) {
319 wbi2 = r5_next_bio(wbi, dev->sector);
NeilBrown49728052017-03-15 14:05:12 +1100320 md_write_end(conf->mddev);
NeilBrown016c76a2017-03-15 14:05:13 +1100321 bio_endio(wbi);
Song Liu1e6d6902016-11-17 15:24:39 -0800322 wbi = wbi2;
323 }
324}
325
326void r5c_handle_cached_data_endio(struct r5conf *conf,
NeilBrownbd83d0a2017-03-15 14:05:12 +1100327 struct stripe_head *sh, int disks)
Song Liu1e6d6902016-11-17 15:24:39 -0800328{
329 int i;
330
331 for (i = sh->disks; i--; ) {
332 if (sh->dev[i].written) {
333 set_bit(R5_UPTODATE, &sh->dev[i].flags);
NeilBrownbd83d0a2017-03-15 14:05:12 +1100334 r5c_return_dev_pending_writes(conf, &sh->dev[i]);
Song Liu1e6d6902016-11-17 15:24:39 -0800335 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
336 STRIPE_SECTORS,
337 !test_bit(STRIPE_DEGRADED, &sh->state),
338 0);
339 }
340 }
341}
342
Artur Paszkiewiczff875732017-03-09 09:59:58 +0100343void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
344
Song Liua39f7af2016-11-17 15:24:40 -0800345/* Check whether we should flush some stripes to free up stripe cache */
346void r5c_check_stripe_cache_usage(struct r5conf *conf)
347{
348 int total_cached;
349
350 if (!r5c_is_writeback(conf->log))
351 return;
352
353 total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
354 atomic_read(&conf->r5c_cached_full_stripes);
355
356 /*
357 * The following condition is true for either of the following:
358 * - stripe cache pressure high:
359 * total_cached > 3/4 min_nr_stripes ||
360 * empty_inactive_list_nr > 0
361 * - stripe cache pressure moderate:
362 * total_cached > 1/2 min_nr_stripes
363 */
364 if (total_cached > conf->min_nr_stripes * 1 / 2 ||
365 atomic_read(&conf->empty_inactive_list_nr) > 0)
366 r5l_wake_reclaim(conf->log, 0);
367}
368
369/*
370 * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
371 * stripes in the cache
372 */
373void r5c_check_cached_full_stripe(struct r5conf *conf)
374{
375 if (!r5c_is_writeback(conf->log))
376 return;
377
378 /*
379 * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
380 * or a full stripe (chunk size / 4k stripes).
381 */
382 if (atomic_read(&conf->r5c_cached_full_stripes) >=
Shaohua Li84890c02017-02-15 19:58:05 -0800383 min(R5C_FULL_STRIPE_FLUSH_BATCH(conf),
Song Liua39f7af2016-11-17 15:24:40 -0800384 conf->chunk_sectors >> STRIPE_SHIFT))
385 r5l_wake_reclaim(conf->log, 0);
386}
387
388/*
389 * Total log space (in sectors) needed to flush all data in cache
390 *
Song Liu39b99582017-01-24 14:08:23 -0800391 * To avoid deadlock due to log space, it is necessary to reserve log
392 * space to flush critical stripes (stripes that occupying log space near
393 * last_checkpoint). This function helps check how much log space is
394 * required to flush all cached stripes.
Song Liua39f7af2016-11-17 15:24:40 -0800395 *
Song Liu39b99582017-01-24 14:08:23 -0800396 * To reduce log space requirements, two mechanisms are used to give cache
397 * flush higher priorities:
398 * 1. In handle_stripe_dirtying() and schedule_reconstruction(),
399 * stripes ALREADY in journal can be flushed w/o pending writes;
400 * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal
401 * can be delayed (r5l_add_no_space_stripe).
Song Liua39f7af2016-11-17 15:24:40 -0800402 *
Song Liu39b99582017-01-24 14:08:23 -0800403 * In cache flush, the stripe goes through 1 and then 2. For a stripe that
404 * already passed 1, flushing it requires at most (conf->max_degraded + 1)
405 * pages of journal space. For stripes that has not passed 1, flushing it
406 * requires (conf->raid_disks + 1) pages of journal space. There are at
407 * most (conf->group_cnt + 1) stripe that passed 1. So total journal space
408 * required to flush all cached stripes (in pages) is:
409 *
410 * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) +
411 * (group_cnt + 1) * (raid_disks + 1)
412 * or
413 * (stripe_in_journal_count) * (max_degraded + 1) +
414 * (group_cnt + 1) * (raid_disks - max_degraded)
Song Liua39f7af2016-11-17 15:24:40 -0800415 */
416static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
417{
418 struct r5l_log *log = conf->log;
419
420 if (!r5c_is_writeback(log))
421 return 0;
422
Song Liu39b99582017-01-24 14:08:23 -0800423 return BLOCK_SECTORS *
424 ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) +
425 (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1));
Song Liua39f7af2016-11-17 15:24:40 -0800426}
427
428/*
429 * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
430 *
431 * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
432 * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
433 * device is less than 2x of reclaim_required_space.
434 */
435static inline void r5c_update_log_state(struct r5l_log *log)
436{
437 struct r5conf *conf = log->rdev->mddev->private;
438 sector_t free_space;
439 sector_t reclaim_space;
Song Liuf687a332016-11-30 16:57:54 -0800440 bool wake_reclaim = false;
Song Liua39f7af2016-11-17 15:24:40 -0800441
442 if (!r5c_is_writeback(log))
443 return;
444
445 free_space = r5l_ring_distance(log, log->log_start,
446 log->last_checkpoint);
447 reclaim_space = r5c_log_required_to_flush_cache(conf);
448 if (free_space < 2 * reclaim_space)
449 set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
Song Liuf687a332016-11-30 16:57:54 -0800450 else {
451 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
452 wake_reclaim = true;
Song Liua39f7af2016-11-17 15:24:40 -0800453 clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
Song Liuf687a332016-11-30 16:57:54 -0800454 }
Song Liua39f7af2016-11-17 15:24:40 -0800455 if (free_space < 3 * reclaim_space)
456 set_bit(R5C_LOG_TIGHT, &conf->cache_state);
457 else
458 clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
Song Liuf687a332016-11-30 16:57:54 -0800459
460 if (wake_reclaim)
461 r5l_wake_reclaim(log, 0);
Song Liua39f7af2016-11-17 15:24:40 -0800462}
463
Song Liu2ded3702016-11-17 15:24:38 -0800464/*
465 * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
466 * This function should only be called in write-back mode.
467 */
Song Liua39f7af2016-11-17 15:24:40 -0800468void r5c_make_stripe_write_out(struct stripe_head *sh)
Song Liu2ded3702016-11-17 15:24:38 -0800469{
470 struct r5conf *conf = sh->raid_conf;
471 struct r5l_log *log = conf->log;
472
473 BUG_ON(!r5c_is_writeback(log));
474
475 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
476 clear_bit(STRIPE_R5C_CACHING, &sh->state);
Song Liu1e6d6902016-11-17 15:24:39 -0800477
478 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
479 atomic_inc(&conf->preread_active_stripes);
Song Liu1e6d6902016-11-17 15:24:39 -0800480}
481
482static void r5c_handle_data_cached(struct stripe_head *sh)
483{
484 int i;
485
486 for (i = sh->disks; i--; )
487 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
488 set_bit(R5_InJournal, &sh->dev[i].flags);
489 clear_bit(R5_LOCKED, &sh->dev[i].flags);
490 }
491 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
492}
493
494/*
495 * this journal write must contain full parity,
496 * it may also contain some data pages
497 */
498static void r5c_handle_parity_cached(struct stripe_head *sh)
499{
500 int i;
501
502 for (i = sh->disks; i--; )
503 if (test_bit(R5_InJournal, &sh->dev[i].flags))
504 set_bit(R5_Wantwrite, &sh->dev[i].flags);
Song Liu2ded3702016-11-17 15:24:38 -0800505}
506
507/*
508 * Setting proper flags after writing (or flushing) data and/or parity to the
509 * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
510 */
511static void r5c_finish_cache_stripe(struct stripe_head *sh)
512{
513 struct r5l_log *log = sh->raid_conf->log;
514
515 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
516 BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
517 /*
518 * Set R5_InJournal for parity dev[pd_idx]. This means
519 * all data AND parity in the journal. For RAID 6, it is
520 * NOT necessary to set the flag for dev[qd_idx], as the
521 * two parities are written out together.
522 */
523 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
Song Liu1e6d6902016-11-17 15:24:39 -0800524 } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
525 r5c_handle_data_cached(sh);
526 } else {
527 r5c_handle_parity_cached(sh);
528 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
529 }
Song Liu2ded3702016-11-17 15:24:38 -0800530}
531
Christoph Hellwigd8858f42015-10-05 09:31:08 +0200532static void r5l_io_run_stripes(struct r5l_io_unit *io)
533{
534 struct stripe_head *sh, *next;
535
536 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
537 list_del_init(&sh->log_list);
Song Liu2ded3702016-11-17 15:24:38 -0800538
539 r5c_finish_cache_stripe(sh);
540
Christoph Hellwigd8858f42015-10-05 09:31:08 +0200541 set_bit(STRIPE_HANDLE, &sh->state);
542 raid5_release_stripe(sh);
543 }
544}
545
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200546static void r5l_log_run_stripes(struct r5l_log *log)
547{
548 struct r5l_io_unit *io, *next;
549
550 assert_spin_locked(&log->io_list_lock);
551
552 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
553 /* don't change list order */
554 if (io->state < IO_UNIT_IO_END)
555 break;
556
557 list_move_tail(&io->log_sibling, &log->finished_ios);
558 r5l_io_run_stripes(io);
559 }
560}
561
Christoph Hellwig3848c0b2015-12-21 10:51:01 +1100562static void r5l_move_to_end_ios(struct r5l_log *log)
563{
564 struct r5l_io_unit *io, *next;
565
566 assert_spin_locked(&log->io_list_lock);
567
568 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
569 /* don't change list order */
570 if (io->state < IO_UNIT_IO_END)
571 break;
572 list_move_tail(&io->log_sibling, &log->io_end_ios);
573 }
574}
575
Song Liu3bddb7f2016-11-18 16:46:50 -0800576static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700577static void r5l_log_endio(struct bio *bio)
578{
579 struct r5l_io_unit *io = bio->bi_private;
Song Liu3bddb7f2016-11-18 16:46:50 -0800580 struct r5l_io_unit *io_deferred;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700581 struct r5l_log *log = io->log;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700582 unsigned long flags;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700583
Shaohua Li6e74a9c2015-10-08 21:54:08 -0700584 if (bio->bi_error)
585 md_error(log->rdev->mddev, log->rdev);
586
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700587 bio_put(bio);
Christoph Hellwige8deb632015-12-21 10:51:02 +1100588 mempool_free(io->meta_page, log->meta_pool);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700589
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700590 spin_lock_irqsave(&log->io_list_lock, flags);
591 __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
Song Liuea174812017-03-09 21:23:39 -0800592 if (log->need_cache_flush && !list_empty(&io->stripe_list))
Christoph Hellwig3848c0b2015-12-21 10:51:01 +1100593 r5l_move_to_end_ios(log);
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200594 else
595 r5l_log_run_stripes(log);
Song Liu3bddb7f2016-11-18 16:46:50 -0800596 if (!list_empty(&log->running_ios)) {
597 /*
598 * FLUSH/FUA io_unit is deferred because of ordering, now we
599 * can dispatch it
600 */
601 io_deferred = list_first_entry(&log->running_ios,
602 struct r5l_io_unit, log_sibling);
603 if (io_deferred->io_deferred)
604 schedule_work(&log->deferred_io_work);
605 }
606
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700607 spin_unlock_irqrestore(&log->io_list_lock, flags);
608
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200609 if (log->need_cache_flush)
610 md_wakeup_thread(log->rdev->mddev->thread);
Song Liu3bddb7f2016-11-18 16:46:50 -0800611
612 if (io->has_null_flush) {
613 struct bio *bi;
614
615 WARN_ON(bio_list_empty(&io->flush_barriers));
616 while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
617 bio_endio(bi);
618 atomic_dec(&io->pending_stripe);
619 }
Song Liu3bddb7f2016-11-18 16:46:50 -0800620 }
Song Liuea174812017-03-09 21:23:39 -0800621
622 /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
623 if (atomic_read(&io->pending_stripe) == 0)
624 __r5l_stripe_write_finished(io);
Song Liu3bddb7f2016-11-18 16:46:50 -0800625}
626
627static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
628{
629 unsigned long flags;
630
631 spin_lock_irqsave(&log->io_list_lock, flags);
632 __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
633 spin_unlock_irqrestore(&log->io_list_lock, flags);
634
635 if (io->has_flush)
Shaohua Li20737732016-12-13 12:40:15 -0800636 io->current_bio->bi_opf |= REQ_PREFLUSH;
Song Liu3bddb7f2016-11-18 16:46:50 -0800637 if (io->has_fua)
Shaohua Li20737732016-12-13 12:40:15 -0800638 io->current_bio->bi_opf |= REQ_FUA;
Song Liu3bddb7f2016-11-18 16:46:50 -0800639 submit_bio(io->current_bio);
640
641 if (!io->split_bio)
642 return;
643
644 if (io->has_flush)
Shaohua Li20737732016-12-13 12:40:15 -0800645 io->split_bio->bi_opf |= REQ_PREFLUSH;
Song Liu3bddb7f2016-11-18 16:46:50 -0800646 if (io->has_fua)
Shaohua Li20737732016-12-13 12:40:15 -0800647 io->split_bio->bi_opf |= REQ_FUA;
Song Liu3bddb7f2016-11-18 16:46:50 -0800648 submit_bio(io->split_bio);
649}
650
651/* deferred io_unit will be dispatched here */
652static void r5l_submit_io_async(struct work_struct *work)
653{
654 struct r5l_log *log = container_of(work, struct r5l_log,
655 deferred_io_work);
656 struct r5l_io_unit *io = NULL;
657 unsigned long flags;
658
659 spin_lock_irqsave(&log->io_list_lock, flags);
660 if (!list_empty(&log->running_ios)) {
661 io = list_first_entry(&log->running_ios, struct r5l_io_unit,
662 log_sibling);
663 if (!io->io_deferred)
664 io = NULL;
665 else
666 io->io_deferred = 0;
667 }
668 spin_unlock_irqrestore(&log->io_list_lock, flags);
669 if (io)
670 r5l_do_submit_io(log, io);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700671}
672
Song Liu2e38a372017-01-24 10:45:30 -0800673static void r5c_disable_writeback_async(struct work_struct *work)
674{
675 struct r5l_log *log = container_of(work, struct r5l_log,
676 disable_writeback_work);
677 struct mddev *mddev = log->rdev->mddev;
678
679 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
680 return;
681 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
682 mdname(mddev));
683 mddev_suspend(mddev);
684 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
685 mddev_resume(mddev);
686}
687
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700688static void r5l_submit_current_io(struct r5l_log *log)
689{
690 struct r5l_io_unit *io = log->current_io;
Song Liu3bddb7f2016-11-18 16:46:50 -0800691 struct bio *bio;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700692 struct r5l_meta_block *block;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700693 unsigned long flags;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700694 u32 crc;
Song Liu3bddb7f2016-11-18 16:46:50 -0800695 bool do_submit = true;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700696
697 if (!io)
698 return;
699
700 block = page_address(io->meta_page);
701 block->meta_size = cpu_to_le32(io->meta_offset);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700702 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700703 block->checksum = cpu_to_le32(crc);
Song Liu3bddb7f2016-11-18 16:46:50 -0800704 bio = io->current_bio;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700705
706 log->current_io = NULL;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700707 spin_lock_irqsave(&log->io_list_lock, flags);
Song Liu3bddb7f2016-11-18 16:46:50 -0800708 if (io->has_flush || io->has_fua) {
709 if (io != list_first_entry(&log->running_ios,
710 struct r5l_io_unit, log_sibling)) {
711 io->io_deferred = 1;
712 do_submit = false;
713 }
714 }
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700715 spin_unlock_irqrestore(&log->io_list_lock, flags);
Song Liu3bddb7f2016-11-18 16:46:50 -0800716 if (do_submit)
717 r5l_do_submit_io(log, io);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700718}
719
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200720static struct bio *r5l_bio_alloc(struct r5l_log *log)
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200721{
Christoph Hellwigc38d29b2015-12-21 10:51:02 +1100722 struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200723
Mike Christie796a5cf2016-06-05 14:32:07 -0500724 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200725 bio->bi_bdev = log->rdev->bdev;
Christoph Hellwig1e932a32015-10-05 09:31:12 +0200726 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200727
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200728 return bio;
729}
730
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200731static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
732{
733 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
734
Song Liua39f7af2016-11-17 15:24:40 -0800735 r5c_update_log_state(log);
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200736 /*
737 * If we filled up the log device start from the beginning again,
738 * which will require a new bio.
739 *
740 * Note: for this to work properly the log size needs to me a multiple
741 * of BLOCK_SECTORS.
742 */
743 if (log->log_start == 0)
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200744 io->need_split_bio = true;
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200745
746 io->log_end = log->log_start;
747}
748
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700749static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
750{
751 struct r5l_io_unit *io;
752 struct r5l_meta_block *block;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700753
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100754 io = mempool_alloc(log->io_pool, GFP_ATOMIC);
755 if (!io)
756 return NULL;
757 memset(io, 0, sizeof(*io));
758
Christoph Hellwig51039cd2015-10-05 09:31:13 +0200759 io->log = log;
Christoph Hellwig51039cd2015-10-05 09:31:13 +0200760 INIT_LIST_HEAD(&io->log_sibling);
761 INIT_LIST_HEAD(&io->stripe_list);
Song Liu3bddb7f2016-11-18 16:46:50 -0800762 bio_list_init(&io->flush_barriers);
Christoph Hellwig51039cd2015-10-05 09:31:13 +0200763 io->state = IO_UNIT_RUNNING;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700764
Christoph Hellwige8deb632015-12-21 10:51:02 +1100765 io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700766 block = page_address(io->meta_page);
Christoph Hellwige8deb632015-12-21 10:51:02 +1100767 clear_page(block);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700768 block->magic = cpu_to_le32(R5LOG_MAGIC);
769 block->version = R5LOG_VERSION;
770 block->seq = cpu_to_le64(log->seq);
771 block->position = cpu_to_le64(log->log_start);
772
773 io->log_start = log->log_start;
774 io->meta_offset = sizeof(struct r5l_meta_block);
Christoph Hellwig2b8ef162015-10-05 09:31:15 +0200775 io->seq = log->seq++;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700776
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200777 io->current_bio = r5l_bio_alloc(log);
778 io->current_bio->bi_end_io = r5l_log_endio;
779 io->current_bio->bi_private = io;
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200780 bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700781
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200782 r5_reserve_log_entry(log, io);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700783
784 spin_lock_irq(&log->io_list_lock);
785 list_add_tail(&io->log_sibling, &log->running_ios);
786 spin_unlock_irq(&log->io_list_lock);
787
788 return io;
789}
790
791static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
792{
Christoph Hellwig22581f52015-10-05 09:31:10 +0200793 if (log->current_io &&
794 log->current_io->meta_offset + payload_size > PAGE_SIZE)
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700795 r5l_submit_current_io(log);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700796
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100797 if (!log->current_io) {
Christoph Hellwig22581f52015-10-05 09:31:10 +0200798 log->current_io = r5l_new_meta(log);
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100799 if (!log->current_io)
800 return -ENOMEM;
801 }
802
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700803 return 0;
804}
805
806static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
807 sector_t location,
808 u32 checksum1, u32 checksum2,
809 bool checksum2_valid)
810{
811 struct r5l_io_unit *io = log->current_io;
812 struct r5l_payload_data_parity *payload;
813
814 payload = page_address(io->meta_page) + io->meta_offset;
815 payload->header.type = cpu_to_le16(type);
816 payload->header.flags = cpu_to_le16(0);
817 payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
818 (PAGE_SHIFT - 9));
819 payload->location = cpu_to_le64(location);
820 payload->checksum[0] = cpu_to_le32(checksum1);
821 if (checksum2_valid)
822 payload->checksum[1] = cpu_to_le32(checksum2);
823
824 io->meta_offset += sizeof(struct r5l_payload_data_parity) +
825 sizeof(__le32) * (1 + !!checksum2_valid);
826}
827
828static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
829{
830 struct r5l_io_unit *io = log->current_io;
831
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200832 if (io->need_split_bio) {
Song Liu3bddb7f2016-11-18 16:46:50 -0800833 BUG_ON(io->split_bio);
834 io->split_bio = io->current_bio;
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200835 io->current_bio = r5l_bio_alloc(log);
Song Liu3bddb7f2016-11-18 16:46:50 -0800836 bio_chain(io->current_bio, io->split_bio);
837 io->need_split_bio = false;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700838 }
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700839
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200840 if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
841 BUG();
842
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200843 r5_reserve_log_entry(log, io);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700844}
845
Song Liuea174812017-03-09 21:23:39 -0800846static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
847{
848 struct mddev *mddev = log->rdev->mddev;
849 struct r5conf *conf = mddev->private;
850 struct r5l_io_unit *io;
851 struct r5l_payload_flush *payload;
852 int meta_size;
853
854 /*
855 * payload_flush requires extra writes to the journal.
856 * To avoid handling the extra IO in quiesce, just skip
857 * flush_payload
858 */
859 if (conf->quiesce)
860 return;
861
862 mutex_lock(&log->io_mutex);
863 meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64);
864
865 if (r5l_get_meta(log, meta_size)) {
866 mutex_unlock(&log->io_mutex);
867 return;
868 }
869
870 /* current implementation is one stripe per flush payload */
871 io = log->current_io;
872 payload = page_address(io->meta_page) + io->meta_offset;
873 payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH);
874 payload->header.flags = cpu_to_le16(0);
875 payload->size = cpu_to_le32(sizeof(__le64));
876 payload->flush_stripes[0] = cpu_to_le64(sect);
877 io->meta_offset += meta_size;
878 mutex_unlock(&log->io_mutex);
879}
880
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100881static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700882 int data_pages, int parity_pages)
883{
884 int i;
885 int meta_size;
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100886 int ret;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700887 struct r5l_io_unit *io;
888
889 meta_size =
890 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
891 * data_pages) +
892 sizeof(struct r5l_payload_data_parity) +
893 sizeof(__le32) * parity_pages;
894
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100895 ret = r5l_get_meta(log, meta_size);
896 if (ret)
897 return ret;
898
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700899 io = log->current_io;
900
Song Liu3bddb7f2016-11-18 16:46:50 -0800901 if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
902 io->has_flush = 1;
903
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700904 for (i = 0; i < sh->disks; i++) {
Song Liu1e6d6902016-11-17 15:24:39 -0800905 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
906 test_bit(R5_InJournal, &sh->dev[i].flags))
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700907 continue;
908 if (i == sh->pd_idx || i == sh->qd_idx)
909 continue;
Song Liu3bddb7f2016-11-18 16:46:50 -0800910 if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
911 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
912 io->has_fua = 1;
913 /*
914 * we need to flush journal to make sure recovery can
915 * reach the data with fua flag
916 */
917 io->has_flush = 1;
918 }
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700919 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
920 raid5_compute_blocknr(sh, i, 0),
921 sh->dev[i].log_checksum, 0, false);
922 r5l_append_payload_page(log, sh->dev[i].page);
923 }
924
Song Liu2ded3702016-11-17 15:24:38 -0800925 if (parity_pages == 2) {
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700926 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
927 sh->sector, sh->dev[sh->pd_idx].log_checksum,
928 sh->dev[sh->qd_idx].log_checksum, true);
929 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
930 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
Song Liu2ded3702016-11-17 15:24:38 -0800931 } else if (parity_pages == 1) {
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700932 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
933 sh->sector, sh->dev[sh->pd_idx].log_checksum,
934 0, false);
935 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
Song Liu2ded3702016-11-17 15:24:38 -0800936 } else /* Just writing data, not parity, in caching phase */
937 BUG_ON(parity_pages != 0);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700938
939 list_add_tail(&sh->log_list, &io->stripe_list);
940 atomic_inc(&io->pending_stripe);
941 sh->log_io = io;
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100942
Song Liua39f7af2016-11-17 15:24:40 -0800943 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
944 return 0;
945
946 if (sh->log_start == MaxSector) {
947 BUG_ON(!list_empty(&sh->r5c));
948 sh->log_start = io->log_start;
949 spin_lock_irq(&log->stripe_in_journal_lock);
950 list_add_tail(&sh->r5c,
951 &log->stripe_in_journal_list);
952 spin_unlock_irq(&log->stripe_in_journal_lock);
953 atomic_inc(&log->stripe_in_journal_count);
954 }
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100955 return 0;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700956}
957
Song Liua39f7af2016-11-17 15:24:40 -0800958/* add stripe to no_space_stripes, and then wake up reclaim */
959static inline void r5l_add_no_space_stripe(struct r5l_log *log,
960 struct stripe_head *sh)
961{
962 spin_lock(&log->no_space_stripes_lock);
963 list_add_tail(&sh->log_list, &log->no_space_stripes);
964 spin_unlock(&log->no_space_stripes_lock);
965}
966
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700967/*
968 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
969 * data from log to raid disks), so we shouldn't wait for reclaim here
970 */
971int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
972{
Song Liua39f7af2016-11-17 15:24:40 -0800973 struct r5conf *conf = sh->raid_conf;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700974 int write_disks = 0;
975 int data_pages, parity_pages;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700976 int reserve;
977 int i;
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100978 int ret = 0;
Song Liua39f7af2016-11-17 15:24:40 -0800979 bool wake_reclaim = false;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700980
981 if (!log)
982 return -EAGAIN;
983 /* Don't support stripe batch */
984 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
985 test_bit(STRIPE_SYNCING, &sh->state)) {
986 /* the stripe is written to log, we start writing it to raid */
987 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
988 return -EAGAIN;
989 }
990
Song Liu2ded3702016-11-17 15:24:38 -0800991 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
992
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700993 for (i = 0; i < sh->disks; i++) {
994 void *addr;
995
Song Liu1e6d6902016-11-17 15:24:39 -0800996 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
997 test_bit(R5_InJournal, &sh->dev[i].flags))
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700998 continue;
Song Liu1e6d6902016-11-17 15:24:39 -0800999
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001000 write_disks++;
1001 /* checksum is already calculated in last run */
1002 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
1003 continue;
1004 addr = kmap_atomic(sh->dev[i].page);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07001005 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
1006 addr, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001007 kunmap_atomic(addr);
1008 }
1009 parity_pages = 1 + !!(sh->qd_idx >= 0);
1010 data_pages = write_disks - parity_pages;
1011
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001012 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
Shaohua Li253f9fd42015-09-04 14:14:16 -07001013 /*
1014 * The stripe must enter state machine again to finish the write, so
1015 * don't delay.
1016 */
1017 clear_bit(STRIPE_DELAYED, &sh->state);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001018 atomic_inc(&sh->count);
1019
1020 mutex_lock(&log->io_mutex);
1021 /* meta + data */
1022 reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001023
Song Liua39f7af2016-11-17 15:24:40 -08001024 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1025 if (!r5l_has_free_space(log, reserve)) {
1026 r5l_add_no_space_stripe(log, sh);
1027 wake_reclaim = true;
1028 } else {
1029 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1030 if (ret) {
1031 spin_lock_irq(&log->io_list_lock);
1032 list_add_tail(&sh->log_list,
1033 &log->no_mem_stripes);
1034 spin_unlock_irq(&log->io_list_lock);
1035 }
1036 }
1037 } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
1038 /*
1039 * log space critical, do not process stripes that are
1040 * not in cache yet (sh->log_start == MaxSector).
1041 */
1042 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
1043 sh->log_start == MaxSector) {
1044 r5l_add_no_space_stripe(log, sh);
1045 wake_reclaim = true;
1046 reserve = 0;
1047 } else if (!r5l_has_free_space(log, reserve)) {
1048 if (sh->log_start == log->last_checkpoint)
1049 BUG();
1050 else
1051 r5l_add_no_space_stripe(log, sh);
1052 } else {
1053 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1054 if (ret) {
1055 spin_lock_irq(&log->io_list_lock);
1056 list_add_tail(&sh->log_list,
1057 &log->no_mem_stripes);
1058 spin_unlock_irq(&log->io_list_lock);
1059 }
Christoph Hellwig5036c3902015-12-21 10:51:02 +11001060 }
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001061 }
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001062
Christoph Hellwig5036c3902015-12-21 10:51:02 +11001063 mutex_unlock(&log->io_mutex);
Song Liua39f7af2016-11-17 15:24:40 -08001064 if (wake_reclaim)
1065 r5l_wake_reclaim(log, reserve);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001066 return 0;
1067}
1068
1069void r5l_write_stripe_run(struct r5l_log *log)
1070{
1071 if (!log)
1072 return;
1073 mutex_lock(&log->io_mutex);
1074 r5l_submit_current_io(log);
1075 mutex_unlock(&log->io_mutex);
1076}
1077
Shaohua Li828cbe92015-09-02 13:49:49 -07001078int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
1079{
1080 if (!log)
1081 return -ENODEV;
Song Liu3bddb7f2016-11-18 16:46:50 -08001082
1083 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1084 /*
1085 * in write through (journal only)
1086 * we flush log disk cache first, then write stripe data to
1087 * raid disks. So if bio is finished, the log disk cache is
1088 * flushed already. The recovery guarantees we can recovery
1089 * the bio from log disk, so we don't need to flush again
1090 */
1091 if (bio->bi_iter.bi_size == 0) {
1092 bio_endio(bio);
1093 return 0;
1094 }
1095 bio->bi_opf &= ~REQ_PREFLUSH;
1096 } else {
1097 /* write back (with cache) */
1098 if (bio->bi_iter.bi_size == 0) {
1099 mutex_lock(&log->io_mutex);
1100 r5l_get_meta(log, 0);
1101 bio_list_add(&log->current_io->flush_barriers, bio);
1102 log->current_io->has_flush = 1;
1103 log->current_io->has_null_flush = 1;
1104 atomic_inc(&log->current_io->pending_stripe);
1105 r5l_submit_current_io(log);
1106 mutex_unlock(&log->io_mutex);
1107 return 0;
1108 }
Shaohua Li828cbe92015-09-02 13:49:49 -07001109 }
Shaohua Li828cbe92015-09-02 13:49:49 -07001110 return -EAGAIN;
1111}
1112
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001113/* This will run after log space is reclaimed */
1114static void r5l_run_no_space_stripes(struct r5l_log *log)
1115{
1116 struct stripe_head *sh;
1117
1118 spin_lock(&log->no_space_stripes_lock);
1119 while (!list_empty(&log->no_space_stripes)) {
1120 sh = list_first_entry(&log->no_space_stripes,
1121 struct stripe_head, log_list);
1122 list_del_init(&sh->log_list);
1123 set_bit(STRIPE_HANDLE, &sh->state);
1124 raid5_release_stripe(sh);
1125 }
1126 spin_unlock(&log->no_space_stripes_lock);
1127}
1128
Song Liua39f7af2016-11-17 15:24:40 -08001129/*
1130 * calculate new last_checkpoint
1131 * for write through mode, returns log->next_checkpoint
1132 * for write back, returns log_start of first sh in stripe_in_journal_list
1133 */
1134static sector_t r5c_calculate_new_cp(struct r5conf *conf)
1135{
1136 struct stripe_head *sh;
1137 struct r5l_log *log = conf->log;
1138 sector_t new_cp;
1139 unsigned long flags;
1140
1141 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
1142 return log->next_checkpoint;
1143
1144 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1145 if (list_empty(&conf->log->stripe_in_journal_list)) {
1146 /* all stripes flushed */
Dan Carpenterd3014e22016-11-24 14:13:04 +03001147 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
Song Liua39f7af2016-11-17 15:24:40 -08001148 return log->next_checkpoint;
1149 }
1150 sh = list_first_entry(&conf->log->stripe_in_journal_list,
1151 struct stripe_head, r5c);
1152 new_cp = sh->log_start;
1153 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1154 return new_cp;
1155}
1156
Christoph Hellwig17036462015-10-05 09:31:06 +02001157static sector_t r5l_reclaimable_space(struct r5l_log *log)
1158{
Song Liua39f7af2016-11-17 15:24:40 -08001159 struct r5conf *conf = log->rdev->mddev->private;
1160
Christoph Hellwig17036462015-10-05 09:31:06 +02001161 return r5l_ring_distance(log, log->last_checkpoint,
Song Liua39f7af2016-11-17 15:24:40 -08001162 r5c_calculate_new_cp(conf));
Christoph Hellwig17036462015-10-05 09:31:06 +02001163}
1164
Christoph Hellwig5036c3902015-12-21 10:51:02 +11001165static void r5l_run_no_mem_stripe(struct r5l_log *log)
1166{
1167 struct stripe_head *sh;
1168
1169 assert_spin_locked(&log->io_list_lock);
1170
1171 if (!list_empty(&log->no_mem_stripes)) {
1172 sh = list_first_entry(&log->no_mem_stripes,
1173 struct stripe_head, log_list);
1174 list_del_init(&sh->log_list);
1175 set_bit(STRIPE_HANDLE, &sh->state);
1176 raid5_release_stripe(sh);
1177 }
1178}
1179
Christoph Hellwig04732f72015-10-05 09:31:07 +02001180static bool r5l_complete_finished_ios(struct r5l_log *log)
Christoph Hellwig17036462015-10-05 09:31:06 +02001181{
1182 struct r5l_io_unit *io, *next;
1183 bool found = false;
1184
1185 assert_spin_locked(&log->io_list_lock);
1186
Christoph Hellwig04732f72015-10-05 09:31:07 +02001187 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
Christoph Hellwig17036462015-10-05 09:31:06 +02001188 /* don't change list order */
1189 if (io->state < IO_UNIT_STRIPE_END)
1190 break;
1191
1192 log->next_checkpoint = io->log_start;
Christoph Hellwig17036462015-10-05 09:31:06 +02001193
1194 list_del(&io->log_sibling);
Christoph Hellwig5036c3902015-12-21 10:51:02 +11001195 mempool_free(io, log->io_pool);
1196 r5l_run_no_mem_stripe(log);
Christoph Hellwig17036462015-10-05 09:31:06 +02001197
1198 found = true;
1199 }
1200
1201 return found;
1202}
1203
Christoph Hellwig509ffec2015-09-02 13:49:48 -07001204static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
1205{
1206 struct r5l_log *log = io->log;
Song Liua39f7af2016-11-17 15:24:40 -08001207 struct r5conf *conf = log->rdev->mddev->private;
Christoph Hellwig509ffec2015-09-02 13:49:48 -07001208 unsigned long flags;
1209
1210 spin_lock_irqsave(&log->io_list_lock, flags);
1211 __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
Christoph Hellwig17036462015-10-05 09:31:06 +02001212
Christoph Hellwig04732f72015-10-05 09:31:07 +02001213 if (!r5l_complete_finished_ios(log)) {
Shaohua Li85f2f9a2015-09-04 14:14:05 -07001214 spin_unlock_irqrestore(&log->io_list_lock, flags);
1215 return;
1216 }
Christoph Hellwig509ffec2015-09-02 13:49:48 -07001217
Song Liua39f7af2016-11-17 15:24:40 -08001218 if (r5l_reclaimable_space(log) > log->max_free_space ||
1219 test_bit(R5C_LOG_TIGHT, &conf->cache_state))
Christoph Hellwig509ffec2015-09-02 13:49:48 -07001220 r5l_wake_reclaim(log, 0);
1221
Christoph Hellwig509ffec2015-09-02 13:49:48 -07001222 spin_unlock_irqrestore(&log->io_list_lock, flags);
1223 wake_up(&log->iounit_wait);
1224}
1225
Shaohua Li0576b1c2015-08-13 14:32:00 -07001226void r5l_stripe_write_finished(struct stripe_head *sh)
1227{
1228 struct r5l_io_unit *io;
1229
Shaohua Li0576b1c2015-08-13 14:32:00 -07001230 io = sh->log_io;
Shaohua Li0576b1c2015-08-13 14:32:00 -07001231 sh->log_io = NULL;
1232
Christoph Hellwig509ffec2015-09-02 13:49:48 -07001233 if (io && atomic_dec_and_test(&io->pending_stripe))
1234 __r5l_stripe_write_finished(io);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001235}
1236
Shaohua Lia8c34f92015-09-02 13:49:46 -07001237static void r5l_log_flush_endio(struct bio *bio)
1238{
1239 struct r5l_log *log = container_of(bio, struct r5l_log,
1240 flush_bio);
1241 unsigned long flags;
1242 struct r5l_io_unit *io;
Shaohua Lia8c34f92015-09-02 13:49:46 -07001243
Shaohua Li6e74a9c2015-10-08 21:54:08 -07001244 if (bio->bi_error)
1245 md_error(log->rdev->mddev, log->rdev);
1246
Shaohua Lia8c34f92015-09-02 13:49:46 -07001247 spin_lock_irqsave(&log->io_list_lock, flags);
Christoph Hellwigd8858f42015-10-05 09:31:08 +02001248 list_for_each_entry(io, &log->flushing_ios, log_sibling)
1249 r5l_io_run_stripes(io);
Christoph Hellwig04732f72015-10-05 09:31:07 +02001250 list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
Shaohua Lia8c34f92015-09-02 13:49:46 -07001251 spin_unlock_irqrestore(&log->io_list_lock, flags);
1252}
1253
Shaohua Li0576b1c2015-08-13 14:32:00 -07001254/*
1255 * Starting dispatch IO to raid.
1256 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
1257 * broken meta in the middle of a log causes recovery can't find meta at the
1258 * head of log. If operations require meta at the head persistent in log, we
1259 * must make sure meta before it persistent in log too. A case is:
1260 *
1261 * stripe data/parity is in log, we start write stripe to raid disks. stripe
1262 * data/parity must be persistent in log before we do the write to raid disks.
1263 *
1264 * The solution is we restrictly maintain io_unit list order. In this case, we
1265 * only write stripes of an io_unit to raid disks till the io_unit is the first
1266 * one whose data/parity is in log.
1267 */
1268void r5l_flush_stripe_to_raid(struct r5l_log *log)
1269{
Shaohua Lia8c34f92015-09-02 13:49:46 -07001270 bool do_flush;
Christoph Hellwig56fef7c2015-10-05 09:31:09 +02001271
1272 if (!log || !log->need_cache_flush)
Shaohua Li0576b1c2015-08-13 14:32:00 -07001273 return;
Shaohua Li0576b1c2015-08-13 14:32:00 -07001274
Shaohua Lia8c34f92015-09-02 13:49:46 -07001275 spin_lock_irq(&log->io_list_lock);
1276 /* flush bio is running */
1277 if (!list_empty(&log->flushing_ios)) {
1278 spin_unlock_irq(&log->io_list_lock);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001279 return;
Shaohua Li0576b1c2015-08-13 14:32:00 -07001280 }
Shaohua Lia8c34f92015-09-02 13:49:46 -07001281 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
1282 do_flush = !list_empty(&log->flushing_ios);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001283 spin_unlock_irq(&log->io_list_lock);
Shaohua Lia8c34f92015-09-02 13:49:46 -07001284
1285 if (!do_flush)
1286 return;
1287 bio_reset(&log->flush_bio);
1288 log->flush_bio.bi_bdev = log->rdev->bdev;
1289 log->flush_bio.bi_end_io = r5l_log_flush_endio;
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001290 log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
Mike Christie4e49ea42016-06-05 14:31:41 -05001291 submit_bio(&log->flush_bio);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001292}
1293
Shaohua Li0576b1c2015-08-13 14:32:00 -07001294static void r5l_write_super(struct r5l_log *log, sector_t cp);
Shaohua Li4b482042015-10-08 21:54:06 -07001295static void r5l_write_super_and_discard_space(struct r5l_log *log,
1296 sector_t end)
1297{
1298 struct block_device *bdev = log->rdev->bdev;
1299 struct mddev *mddev;
1300
1301 r5l_write_super(log, end);
1302
1303 if (!blk_queue_discard(bdev_get_queue(bdev)))
1304 return;
1305
1306 mddev = log->rdev->mddev;
1307 /*
Shaohua Li8e018c22016-08-25 10:09:39 -07001308 * Discard could zero data, so before discard we must make sure
1309 * superblock is updated to new log tail. Updating superblock (either
1310 * directly call md_update_sb() or depend on md thread) must hold
1311 * reconfig mutex. On the other hand, raid5_quiesce is called with
1312 * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
1313 * for all IO finish, hence waitting for reclaim thread, while reclaim
1314 * thread is calling this function and waitting for reconfig mutex. So
1315 * there is a deadlock. We workaround this issue with a trylock.
1316 * FIXME: we could miss discard if we can't take reconfig mutex
Shaohua Li4b482042015-10-08 21:54:06 -07001317 */
Shaohua Li29530792016-12-08 15:48:19 -08001318 set_mask_bits(&mddev->sb_flags, 0,
1319 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
Shaohua Li8e018c22016-08-25 10:09:39 -07001320 if (!mddev_trylock(mddev))
1321 return;
1322 md_update_sb(mddev, 1);
1323 mddev_unlock(mddev);
Shaohua Li4b482042015-10-08 21:54:06 -07001324
Shaohua Li6e74a9c2015-10-08 21:54:08 -07001325 /* discard IO error really doesn't matter, ignore it */
Shaohua Li4b482042015-10-08 21:54:06 -07001326 if (log->last_checkpoint < end) {
1327 blkdev_issue_discard(bdev,
1328 log->last_checkpoint + log->rdev->data_offset,
1329 end - log->last_checkpoint, GFP_NOIO, 0);
1330 } else {
1331 blkdev_issue_discard(bdev,
1332 log->last_checkpoint + log->rdev->data_offset,
1333 log->device_size - log->last_checkpoint,
1334 GFP_NOIO, 0);
1335 blkdev_issue_discard(bdev, log->rdev->data_offset, end,
1336 GFP_NOIO, 0);
1337 }
1338}
1339
Song Liua39f7af2016-11-17 15:24:40 -08001340/*
1341 * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
1342 * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
1343 *
1344 * must hold conf->device_lock
1345 */
1346static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
1347{
1348 BUG_ON(list_empty(&sh->lru));
1349 BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
1350 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
1351
1352 /*
1353 * The stripe is not ON_RELEASE_LIST, so it is safe to call
1354 * raid5_release_stripe() while holding conf->device_lock
1355 */
1356 BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
1357 assert_spin_locked(&conf->device_lock);
1358
1359 list_del_init(&sh->lru);
1360 atomic_inc(&sh->count);
1361
1362 set_bit(STRIPE_HANDLE, &sh->state);
1363 atomic_inc(&conf->active_stripes);
1364 r5c_make_stripe_write_out(sh);
1365
Shaohua Lie33fbb92017-02-10 16:18:09 -08001366 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
1367 atomic_inc(&conf->r5c_flushing_partial_stripes);
1368 else
1369 atomic_inc(&conf->r5c_flushing_full_stripes);
Song Liua39f7af2016-11-17 15:24:40 -08001370 raid5_release_stripe(sh);
1371}
1372
1373/*
1374 * if num == 0, flush all full stripes
1375 * if num > 0, flush all full stripes. If less than num full stripes are
1376 * flushed, flush some partial stripes until totally num stripes are
1377 * flushed or there is no more cached stripes.
1378 */
1379void r5c_flush_cache(struct r5conf *conf, int num)
1380{
1381 int count;
1382 struct stripe_head *sh, *next;
1383
1384 assert_spin_locked(&conf->device_lock);
1385 if (!conf->log)
1386 return;
1387
1388 count = 0;
1389 list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
1390 r5c_flush_stripe(conf, sh);
1391 count++;
1392 }
1393
1394 if (count >= num)
1395 return;
1396 list_for_each_entry_safe(sh, next,
1397 &conf->r5c_partial_stripe_list, lru) {
1398 r5c_flush_stripe(conf, sh);
1399 if (++count >= num)
1400 break;
1401 }
1402}
1403
1404static void r5c_do_reclaim(struct r5conf *conf)
1405{
1406 struct r5l_log *log = conf->log;
1407 struct stripe_head *sh;
1408 int count = 0;
1409 unsigned long flags;
1410 int total_cached;
1411 int stripes_to_flush;
Shaohua Lie33fbb92017-02-10 16:18:09 -08001412 int flushing_partial, flushing_full;
Song Liua39f7af2016-11-17 15:24:40 -08001413
1414 if (!r5c_is_writeback(log))
1415 return;
1416
Shaohua Lie33fbb92017-02-10 16:18:09 -08001417 flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes);
1418 flushing_full = atomic_read(&conf->r5c_flushing_full_stripes);
Song Liua39f7af2016-11-17 15:24:40 -08001419 total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
Shaohua Lie33fbb92017-02-10 16:18:09 -08001420 atomic_read(&conf->r5c_cached_full_stripes) -
1421 flushing_full - flushing_partial;
Song Liua39f7af2016-11-17 15:24:40 -08001422
1423 if (total_cached > conf->min_nr_stripes * 3 / 4 ||
1424 atomic_read(&conf->empty_inactive_list_nr) > 0)
1425 /*
1426 * if stripe cache pressure high, flush all full stripes and
1427 * some partial stripes
1428 */
1429 stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
1430 else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
Shaohua Lie33fbb92017-02-10 16:18:09 -08001431 atomic_read(&conf->r5c_cached_full_stripes) - flushing_full >
Shaohua Li84890c02017-02-15 19:58:05 -08001432 R5C_FULL_STRIPE_FLUSH_BATCH(conf))
Song Liua39f7af2016-11-17 15:24:40 -08001433 /*
1434 * if stripe cache pressure moderate, or if there is many full
1435 * stripes,flush all full stripes
1436 */
1437 stripes_to_flush = 0;
1438 else
1439 /* no need to flush */
1440 stripes_to_flush = -1;
1441
1442 if (stripes_to_flush >= 0) {
1443 spin_lock_irqsave(&conf->device_lock, flags);
1444 r5c_flush_cache(conf, stripes_to_flush);
1445 spin_unlock_irqrestore(&conf->device_lock, flags);
1446 }
1447
1448 /* if log space is tight, flush stripes on stripe_in_journal_list */
1449 if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
1450 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1451 spin_lock(&conf->device_lock);
1452 list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
1453 /*
1454 * stripes on stripe_in_journal_list could be in any
1455 * state of the stripe_cache state machine. In this
1456 * case, we only want to flush stripe on
1457 * r5c_cached_full/partial_stripes. The following
1458 * condition makes sure the stripe is on one of the
1459 * two lists.
1460 */
1461 if (!list_empty(&sh->lru) &&
1462 !test_bit(STRIPE_HANDLE, &sh->state) &&
1463 atomic_read(&sh->count) == 0) {
1464 r5c_flush_stripe(conf, sh);
Shaohua Lie8fd52e2017-02-10 16:18:08 -08001465 if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
1466 break;
Song Liua39f7af2016-11-17 15:24:40 -08001467 }
Song Liua39f7af2016-11-17 15:24:40 -08001468 }
1469 spin_unlock(&conf->device_lock);
1470 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1471 }
Song Liuf687a332016-11-30 16:57:54 -08001472
1473 if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
1474 r5l_run_no_space_stripes(log);
1475
Song Liua39f7af2016-11-17 15:24:40 -08001476 md_wakeup_thread(conf->mddev->thread);
1477}
Shaohua Li4b482042015-10-08 21:54:06 -07001478
Shaohua Li0576b1c2015-08-13 14:32:00 -07001479static void r5l_do_reclaim(struct r5l_log *log)
1480{
Song Liua39f7af2016-11-17 15:24:40 -08001481 struct r5conf *conf = log->rdev->mddev->private;
Shaohua Li0576b1c2015-08-13 14:32:00 -07001482 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
Christoph Hellwig17036462015-10-05 09:31:06 +02001483 sector_t reclaimable;
1484 sector_t next_checkpoint;
Song Liua39f7af2016-11-17 15:24:40 -08001485 bool write_super;
Shaohua Li0576b1c2015-08-13 14:32:00 -07001486
1487 spin_lock_irq(&log->io_list_lock);
Song Liua39f7af2016-11-17 15:24:40 -08001488 write_super = r5l_reclaimable_space(log) > log->max_free_space ||
1489 reclaim_target != 0 || !list_empty(&log->no_space_stripes);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001490 /*
1491 * move proper io_unit to reclaim list. We should not change the order.
1492 * reclaimable/unreclaimable io_unit can be mixed in the list, we
1493 * shouldn't reuse space of an unreclaimable io_unit
1494 */
1495 while (1) {
Christoph Hellwig17036462015-10-05 09:31:06 +02001496 reclaimable = r5l_reclaimable_space(log);
1497 if (reclaimable >= reclaim_target ||
Shaohua Li0576b1c2015-08-13 14:32:00 -07001498 (list_empty(&log->running_ios) &&
1499 list_empty(&log->io_end_ios) &&
Shaohua Lia8c34f92015-09-02 13:49:46 -07001500 list_empty(&log->flushing_ios) &&
Christoph Hellwig04732f72015-10-05 09:31:07 +02001501 list_empty(&log->finished_ios)))
Shaohua Li0576b1c2015-08-13 14:32:00 -07001502 break;
1503
Christoph Hellwig17036462015-10-05 09:31:06 +02001504 md_wakeup_thread(log->rdev->mddev->thread);
1505 wait_event_lock_irq(log->iounit_wait,
1506 r5l_reclaimable_space(log) > reclaimable,
1507 log->io_list_lock);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001508 }
Christoph Hellwig17036462015-10-05 09:31:06 +02001509
Song Liua39f7af2016-11-17 15:24:40 -08001510 next_checkpoint = r5c_calculate_new_cp(conf);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001511 spin_unlock_irq(&log->io_list_lock);
1512
Song Liua39f7af2016-11-17 15:24:40 -08001513 if (reclaimable == 0 || !write_super)
Shaohua Li0576b1c2015-08-13 14:32:00 -07001514 return;
1515
Shaohua Li0576b1c2015-08-13 14:32:00 -07001516 /*
1517 * write_super will flush cache of each raid disk. We must write super
1518 * here, because the log area might be reused soon and we don't want to
1519 * confuse recovery
1520 */
Shaohua Li4b482042015-10-08 21:54:06 -07001521 r5l_write_super_and_discard_space(log, next_checkpoint);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001522
1523 mutex_lock(&log->io_mutex);
Christoph Hellwig17036462015-10-05 09:31:06 +02001524 log->last_checkpoint = next_checkpoint;
Song Liua39f7af2016-11-17 15:24:40 -08001525 r5c_update_log_state(log);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001526 mutex_unlock(&log->io_mutex);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001527
Christoph Hellwig17036462015-10-05 09:31:06 +02001528 r5l_run_no_space_stripes(log);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001529}
1530
1531static void r5l_reclaim_thread(struct md_thread *thread)
1532{
1533 struct mddev *mddev = thread->mddev;
1534 struct r5conf *conf = mddev->private;
1535 struct r5l_log *log = conf->log;
1536
1537 if (!log)
1538 return;
Song Liua39f7af2016-11-17 15:24:40 -08001539 r5c_do_reclaim(conf);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001540 r5l_do_reclaim(log);
1541}
1542
Song Liua39f7af2016-11-17 15:24:40 -08001543void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001544{
Shaohua Li0576b1c2015-08-13 14:32:00 -07001545 unsigned long target;
1546 unsigned long new = (unsigned long)space; /* overflow in theory */
1547
Song Liua39f7af2016-11-17 15:24:40 -08001548 if (!log)
1549 return;
Shaohua Li0576b1c2015-08-13 14:32:00 -07001550 do {
1551 target = log->reclaim_target;
1552 if (new < target)
1553 return;
1554 } while (cmpxchg(&log->reclaim_target, target, new) != target);
1555 md_wakeup_thread(log->reclaim_thread);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001556}
1557
Shaohua Lie6c033f2015-10-04 09:20:12 -07001558void r5l_quiesce(struct r5l_log *log, int state)
1559{
Shaohua Li4b482042015-10-08 21:54:06 -07001560 struct mddev *mddev;
Shaohua Lie6c033f2015-10-04 09:20:12 -07001561 if (!log || state == 2)
1562 return;
Shaohua Lice1ccd02016-11-21 10:29:18 -08001563 if (state == 0)
1564 kthread_unpark(log->reclaim_thread->tsk);
1565 else if (state == 1) {
Shaohua Li4b482042015-10-08 21:54:06 -07001566 /* make sure r5l_write_super_and_discard_space exits */
1567 mddev = log->rdev->mddev;
1568 wake_up(&mddev->sb_wait);
Shaohua Lice1ccd02016-11-21 10:29:18 -08001569 kthread_park(log->reclaim_thread->tsk);
Song Liua39f7af2016-11-17 15:24:40 -08001570 r5l_wake_reclaim(log, MaxSector);
Shaohua Lie6c033f2015-10-04 09:20:12 -07001571 r5l_do_reclaim(log);
1572 }
1573}
1574
Shaohua Li6e74a9c2015-10-08 21:54:08 -07001575bool r5l_log_disk_error(struct r5conf *conf)
1576{
Shaohua Lif6b6ec52015-12-21 10:51:02 +11001577 struct r5l_log *log;
1578 bool ret;
Shaohua Li7dde2ad2015-10-08 21:54:10 -07001579 /* don't allow write if journal disk is missing */
Shaohua Lif6b6ec52015-12-21 10:51:02 +11001580 rcu_read_lock();
1581 log = rcu_dereference(conf->log);
1582
1583 if (!log)
1584 ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
1585 else
1586 ret = test_bit(Faulty, &log->rdev->flags);
1587 rcu_read_unlock();
1588 return ret;
Shaohua Li6e74a9c2015-10-08 21:54:08 -07001589}
1590
Song Liueffe6ee2017-03-07 16:49:17 -08001591#define R5L_RECOVERY_PAGE_POOL_SIZE 256
1592
Shaohua Li355810d2015-08-13 14:32:01 -07001593struct r5l_recovery_ctx {
1594 struct page *meta_page; /* current meta */
1595 sector_t meta_total_blocks; /* total size of current meta and data */
1596 sector_t pos; /* recovery position */
1597 u64 seq; /* recovery position seq */
Song Liub4c625c2016-11-17 15:24:43 -08001598 int data_parity_stripes; /* number of data_parity stripes */
1599 int data_only_stripes; /* number of data_only stripes */
1600 struct list_head cached_list;
Song Liueffe6ee2017-03-07 16:49:17 -08001601
1602 /*
1603 * read ahead page pool (ra_pool)
1604 * in recovery, log is read sequentially. It is not efficient to
1605 * read every page with sync_page_io(). The read ahead page pool
1606 * reads multiple pages with one IO, so further log read can
1607 * just copy data from the pool.
1608 */
1609 struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
1610 sector_t pool_offset; /* offset of first page in the pool */
1611 int total_pages; /* total allocated pages */
1612 int valid_pages; /* pages with valid data */
1613 struct bio *ra_bio; /* bio to do the read ahead */
Shaohua Li355810d2015-08-13 14:32:01 -07001614};
1615
Song Liueffe6ee2017-03-07 16:49:17 -08001616static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
1617 struct r5l_recovery_ctx *ctx)
1618{
1619 struct page *page;
1620
1621 ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, log->bs);
1622 if (!ctx->ra_bio)
1623 return -ENOMEM;
1624
1625 ctx->valid_pages = 0;
1626 ctx->total_pages = 0;
1627 while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
1628 page = alloc_page(GFP_KERNEL);
1629
1630 if (!page)
1631 break;
1632 ctx->ra_pool[ctx->total_pages] = page;
1633 ctx->total_pages += 1;
1634 }
1635
1636 if (ctx->total_pages == 0) {
1637 bio_put(ctx->ra_bio);
1638 return -ENOMEM;
1639 }
1640
1641 ctx->pool_offset = 0;
1642 return 0;
1643}
1644
1645static void r5l_recovery_free_ra_pool(struct r5l_log *log,
1646 struct r5l_recovery_ctx *ctx)
1647{
1648 int i;
1649
1650 for (i = 0; i < ctx->total_pages; ++i)
1651 put_page(ctx->ra_pool[i]);
1652 bio_put(ctx->ra_bio);
1653}
1654
1655/*
1656 * fetch ctx->valid_pages pages from offset
1657 * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
1658 * However, if the offset is close to the end of the journal device,
1659 * ctx->valid_pages could be smaller than ctx->total_pages
1660 */
1661static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
1662 struct r5l_recovery_ctx *ctx,
1663 sector_t offset)
1664{
1665 bio_reset(ctx->ra_bio);
1666 ctx->ra_bio->bi_bdev = log->rdev->bdev;
1667 bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
1668 ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
1669
1670 ctx->valid_pages = 0;
1671 ctx->pool_offset = offset;
1672
1673 while (ctx->valid_pages < ctx->total_pages) {
1674 bio_add_page(ctx->ra_bio,
1675 ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0);
1676 ctx->valid_pages += 1;
1677
1678 offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
1679
1680 if (offset == 0) /* reached end of the device */
1681 break;
1682 }
1683
1684 return submit_bio_wait(ctx->ra_bio);
1685}
1686
1687/*
1688 * try read a page from the read ahead page pool, if the page is not in the
1689 * pool, call r5l_recovery_fetch_ra_pool
1690 */
1691static int r5l_recovery_read_page(struct r5l_log *log,
1692 struct r5l_recovery_ctx *ctx,
1693 struct page *page,
1694 sector_t offset)
1695{
1696 int ret;
1697
1698 if (offset < ctx->pool_offset ||
1699 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) {
1700 ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
1701 if (ret)
1702 return ret;
1703 }
1704
1705 BUG_ON(offset < ctx->pool_offset ||
1706 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS);
1707
1708 memcpy(page_address(page),
1709 page_address(ctx->ra_pool[(offset - ctx->pool_offset) >>
1710 BLOCK_SECTOR_SHIFT]),
1711 PAGE_SIZE);
1712 return 0;
1713}
1714
Song Liu9ed988f52016-11-17 15:24:42 -08001715static int r5l_recovery_read_meta_block(struct r5l_log *log,
1716 struct r5l_recovery_ctx *ctx)
Shaohua Li355810d2015-08-13 14:32:01 -07001717{
1718 struct page *page = ctx->meta_page;
1719 struct r5l_meta_block *mb;
1720 u32 crc, stored_crc;
Song Liueffe6ee2017-03-07 16:49:17 -08001721 int ret;
Shaohua Li355810d2015-08-13 14:32:01 -07001722
Song Liueffe6ee2017-03-07 16:49:17 -08001723 ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
1724 if (ret != 0)
1725 return ret;
Shaohua Li355810d2015-08-13 14:32:01 -07001726
1727 mb = page_address(page);
1728 stored_crc = le32_to_cpu(mb->checksum);
1729 mb->checksum = 0;
1730
1731 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1732 le64_to_cpu(mb->seq) != ctx->seq ||
1733 mb->version != R5LOG_VERSION ||
1734 le64_to_cpu(mb->position) != ctx->pos)
1735 return -EINVAL;
1736
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07001737 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Li355810d2015-08-13 14:32:01 -07001738 if (stored_crc != crc)
1739 return -EINVAL;
1740
1741 if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
1742 return -EINVAL;
1743
1744 ctx->meta_total_blocks = BLOCK_SECTORS;
1745
1746 return 0;
1747}
1748
Song Liu9ed988f52016-11-17 15:24:42 -08001749static void
1750r5l_recovery_create_empty_meta_block(struct r5l_log *log,
1751 struct page *page,
1752 sector_t pos, u64 seq)
Shaohua Li355810d2015-08-13 14:32:01 -07001753{
Shaohua Li355810d2015-08-13 14:32:01 -07001754 struct r5l_meta_block *mb;
Shaohua Li355810d2015-08-13 14:32:01 -07001755
Shaohua Li355810d2015-08-13 14:32:01 -07001756 mb = page_address(page);
Song Liu9ed988f52016-11-17 15:24:42 -08001757 clear_page(mb);
Shaohua Li355810d2015-08-13 14:32:01 -07001758 mb->magic = cpu_to_le32(R5LOG_MAGIC);
1759 mb->version = R5LOG_VERSION;
1760 mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
1761 mb->seq = cpu_to_le64(seq);
1762 mb->position = cpu_to_le64(pos);
Shaohua Li355810d2015-08-13 14:32:01 -07001763}
1764
1765static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1766 u64 seq)
1767{
1768 struct page *page;
1769 struct r5l_meta_block *mb;
Shaohua Li355810d2015-08-13 14:32:01 -07001770
Song Liu9ed988f52016-11-17 15:24:42 -08001771 page = alloc_page(GFP_KERNEL);
Shaohua Li355810d2015-08-13 14:32:01 -07001772 if (!page)
1773 return -ENOMEM;
Song Liu9ed988f52016-11-17 15:24:42 -08001774 r5l_recovery_create_empty_meta_block(log, page, pos, seq);
Shaohua Li355810d2015-08-13 14:32:01 -07001775 mb = page_address(page);
Song Liu5c88f402016-12-07 09:42:05 -08001776 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
1777 mb, PAGE_SIZE));
Mike Christie796a5cf2016-06-05 14:32:07 -05001778 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001779 REQ_FUA, false)) {
Shaohua Li355810d2015-08-13 14:32:01 -07001780 __free_page(page);
1781 return -EIO;
1782 }
1783 __free_page(page);
1784 return 0;
1785}
1786
Song Liub4c625c2016-11-17 15:24:43 -08001787/*
1788 * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
1789 * to mark valid (potentially not flushed) data in the journal.
1790 *
1791 * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
1792 * so there should not be any mismatch here.
1793 */
1794static void r5l_recovery_load_data(struct r5l_log *log,
1795 struct stripe_head *sh,
1796 struct r5l_recovery_ctx *ctx,
1797 struct r5l_payload_data_parity *payload,
1798 sector_t log_offset)
1799{
1800 struct mddev *mddev = log->rdev->mddev;
1801 struct r5conf *conf = mddev->private;
1802 int dd_idx;
1803
1804 raid5_compute_sector(conf,
1805 le64_to_cpu(payload->location), 0,
1806 &dd_idx, sh);
Song Liueffe6ee2017-03-07 16:49:17 -08001807 r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
Song Liub4c625c2016-11-17 15:24:43 -08001808 sh->dev[dd_idx].log_checksum =
1809 le32_to_cpu(payload->checksum[0]);
1810 ctx->meta_total_blocks += BLOCK_SECTORS;
1811
1812 set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
1813 set_bit(STRIPE_R5C_CACHING, &sh->state);
1814}
1815
1816static void r5l_recovery_load_parity(struct r5l_log *log,
1817 struct stripe_head *sh,
1818 struct r5l_recovery_ctx *ctx,
1819 struct r5l_payload_data_parity *payload,
1820 sector_t log_offset)
1821{
1822 struct mddev *mddev = log->rdev->mddev;
1823 struct r5conf *conf = mddev->private;
1824
1825 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
Song Liueffe6ee2017-03-07 16:49:17 -08001826 r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
Song Liub4c625c2016-11-17 15:24:43 -08001827 sh->dev[sh->pd_idx].log_checksum =
1828 le32_to_cpu(payload->checksum[0]);
1829 set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
1830
1831 if (sh->qd_idx >= 0) {
Song Liueffe6ee2017-03-07 16:49:17 -08001832 r5l_recovery_read_page(
1833 log, ctx, sh->dev[sh->qd_idx].page,
1834 r5l_ring_add(log, log_offset, BLOCK_SECTORS));
Song Liub4c625c2016-11-17 15:24:43 -08001835 sh->dev[sh->qd_idx].log_checksum =
1836 le32_to_cpu(payload->checksum[1]);
1837 set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
1838 }
1839 clear_bit(STRIPE_R5C_CACHING, &sh->state);
1840}
1841
1842static void r5l_recovery_reset_stripe(struct stripe_head *sh)
1843{
1844 int i;
1845
1846 sh->state = 0;
1847 sh->log_start = MaxSector;
1848 for (i = sh->disks; i--; )
1849 sh->dev[i].flags = 0;
1850}
1851
1852static void
1853r5l_recovery_replay_one_stripe(struct r5conf *conf,
1854 struct stripe_head *sh,
1855 struct r5l_recovery_ctx *ctx)
1856{
1857 struct md_rdev *rdev, *rrdev;
1858 int disk_index;
1859 int data_count = 0;
1860
1861 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1862 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1863 continue;
1864 if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
1865 continue;
1866 data_count++;
1867 }
1868
1869 /*
1870 * stripes that only have parity must have been flushed
1871 * before the crash that we are now recovering from, so
1872 * there is nothing more to recovery.
1873 */
1874 if (data_count == 0)
1875 goto out;
1876
1877 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1878 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1879 continue;
1880
1881 /* in case device is broken */
1882 rcu_read_lock();
1883 rdev = rcu_dereference(conf->disks[disk_index].rdev);
1884 if (rdev) {
1885 atomic_inc(&rdev->nr_pending);
1886 rcu_read_unlock();
1887 sync_page_io(rdev, sh->sector, PAGE_SIZE,
1888 sh->dev[disk_index].page, REQ_OP_WRITE, 0,
1889 false);
1890 rdev_dec_pending(rdev, rdev->mddev);
1891 rcu_read_lock();
1892 }
1893 rrdev = rcu_dereference(conf->disks[disk_index].replacement);
1894 if (rrdev) {
1895 atomic_inc(&rrdev->nr_pending);
1896 rcu_read_unlock();
1897 sync_page_io(rrdev, sh->sector, PAGE_SIZE,
1898 sh->dev[disk_index].page, REQ_OP_WRITE, 0,
1899 false);
1900 rdev_dec_pending(rrdev, rrdev->mddev);
1901 rcu_read_lock();
1902 }
1903 rcu_read_unlock();
1904 }
1905 ctx->data_parity_stripes++;
1906out:
1907 r5l_recovery_reset_stripe(sh);
1908}
1909
1910static struct stripe_head *
1911r5c_recovery_alloc_stripe(struct r5conf *conf,
Song Liu3c66abb2016-12-14 15:38:01 -08001912 sector_t stripe_sect)
Song Liub4c625c2016-11-17 15:24:43 -08001913{
1914 struct stripe_head *sh;
1915
1916 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
1917 if (!sh)
1918 return NULL; /* no more stripe available */
1919
1920 r5l_recovery_reset_stripe(sh);
Song Liub4c625c2016-11-17 15:24:43 -08001921
1922 return sh;
1923}
1924
1925static struct stripe_head *
1926r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
1927{
1928 struct stripe_head *sh;
1929
1930 list_for_each_entry(sh, list, lru)
1931 if (sh->sector == sect)
1932 return sh;
1933 return NULL;
1934}
1935
1936static void
1937r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
1938 struct r5l_recovery_ctx *ctx)
1939{
1940 struct stripe_head *sh, *next;
1941
1942 list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
1943 r5l_recovery_reset_stripe(sh);
1944 list_del_init(&sh->lru);
1945 raid5_release_stripe(sh);
1946 }
1947}
1948
1949static void
1950r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
1951 struct r5l_recovery_ctx *ctx)
1952{
1953 struct stripe_head *sh, *next;
1954
1955 list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
1956 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
1957 r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
1958 list_del_init(&sh->lru);
1959 raid5_release_stripe(sh);
1960 }
1961}
1962
1963/* if matches return 0; otherwise return -EINVAL */
1964static int
Song Liueffe6ee2017-03-07 16:49:17 -08001965r5l_recovery_verify_data_checksum(struct r5l_log *log,
1966 struct r5l_recovery_ctx *ctx,
1967 struct page *page,
Song Liub4c625c2016-11-17 15:24:43 -08001968 sector_t log_offset, __le32 log_checksum)
1969{
1970 void *addr;
1971 u32 checksum;
1972
Song Liueffe6ee2017-03-07 16:49:17 -08001973 r5l_recovery_read_page(log, ctx, page, log_offset);
Song Liub4c625c2016-11-17 15:24:43 -08001974 addr = kmap_atomic(page);
1975 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
1976 kunmap_atomic(addr);
1977 return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
1978}
1979
1980/*
1981 * before loading data to stripe cache, we need verify checksum for all data,
1982 * if there is mismatch for any data page, we drop all data in the mata block
1983 */
1984static int
1985r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
1986 struct r5l_recovery_ctx *ctx)
1987{
1988 struct mddev *mddev = log->rdev->mddev;
1989 struct r5conf *conf = mddev->private;
1990 struct r5l_meta_block *mb = page_address(ctx->meta_page);
1991 sector_t mb_offset = sizeof(struct r5l_meta_block);
1992 sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
1993 struct page *page;
1994 struct r5l_payload_data_parity *payload;
Song Liu2d4f4682017-03-07 17:44:21 -08001995 struct r5l_payload_flush *payload_flush;
Song Liub4c625c2016-11-17 15:24:43 -08001996
1997 page = alloc_page(GFP_KERNEL);
1998 if (!page)
1999 return -ENOMEM;
2000
2001 while (mb_offset < le32_to_cpu(mb->meta_size)) {
2002 payload = (void *)mb + mb_offset;
Song Liu2d4f4682017-03-07 17:44:21 -08002003 payload_flush = (void *)mb + mb_offset;
Song Liub4c625c2016-11-17 15:24:43 -08002004
Jason Yan1ad45a92017-03-25 09:44:39 +08002005 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
Song Liub4c625c2016-11-17 15:24:43 -08002006 if (r5l_recovery_verify_data_checksum(
Song Liueffe6ee2017-03-07 16:49:17 -08002007 log, ctx, page, log_offset,
Song Liub4c625c2016-11-17 15:24:43 -08002008 payload->checksum[0]) < 0)
2009 goto mismatch;
Jason Yan1ad45a92017-03-25 09:44:39 +08002010 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) {
Song Liub4c625c2016-11-17 15:24:43 -08002011 if (r5l_recovery_verify_data_checksum(
Song Liueffe6ee2017-03-07 16:49:17 -08002012 log, ctx, page, log_offset,
Song Liub4c625c2016-11-17 15:24:43 -08002013 payload->checksum[0]) < 0)
2014 goto mismatch;
2015 if (conf->max_degraded == 2 && /* q for RAID 6 */
2016 r5l_recovery_verify_data_checksum(
Song Liueffe6ee2017-03-07 16:49:17 -08002017 log, ctx, page,
Song Liub4c625c2016-11-17 15:24:43 -08002018 r5l_ring_add(log, log_offset,
2019 BLOCK_SECTORS),
2020 payload->checksum[1]) < 0)
2021 goto mismatch;
Jason Yan1ad45a92017-03-25 09:44:39 +08002022 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
Song Liu2d4f4682017-03-07 17:44:21 -08002023 /* nothing to do for R5LOG_PAYLOAD_FLUSH here */
2024 } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */
Song Liub4c625c2016-11-17 15:24:43 -08002025 goto mismatch;
2026
Jason Yan1ad45a92017-03-25 09:44:39 +08002027 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
Song Liu2d4f4682017-03-07 17:44:21 -08002028 mb_offset += sizeof(struct r5l_payload_flush) +
2029 le32_to_cpu(payload_flush->size);
2030 } else {
2031 /* DATA or PARITY payload */
2032 log_offset = r5l_ring_add(log, log_offset,
2033 le32_to_cpu(payload->size));
2034 mb_offset += sizeof(struct r5l_payload_data_parity) +
2035 sizeof(__le32) *
2036 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2037 }
Song Liub4c625c2016-11-17 15:24:43 -08002038
Song Liub4c625c2016-11-17 15:24:43 -08002039 }
2040
2041 put_page(page);
2042 return 0;
2043
2044mismatch:
2045 put_page(page);
2046 return -EINVAL;
2047}
2048
2049/*
2050 * Analyze all data/parity pages in one meta block
2051 * Returns:
2052 * 0 for success
2053 * -EINVAL for unknown playload type
2054 * -EAGAIN for checksum mismatch of data page
2055 * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
2056 */
2057static int
2058r5c_recovery_analyze_meta_block(struct r5l_log *log,
2059 struct r5l_recovery_ctx *ctx,
2060 struct list_head *cached_stripe_list)
2061{
2062 struct mddev *mddev = log->rdev->mddev;
2063 struct r5conf *conf = mddev->private;
2064 struct r5l_meta_block *mb;
2065 struct r5l_payload_data_parity *payload;
Song Liu2d4f4682017-03-07 17:44:21 -08002066 struct r5l_payload_flush *payload_flush;
Song Liub4c625c2016-11-17 15:24:43 -08002067 int mb_offset;
2068 sector_t log_offset;
2069 sector_t stripe_sect;
2070 struct stripe_head *sh;
2071 int ret;
2072
2073 /*
2074 * for mismatch in data blocks, we will drop all data in this mb, but
2075 * we will still read next mb for other data with FLUSH flag, as
2076 * io_unit could finish out of order.
2077 */
2078 ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
2079 if (ret == -EINVAL)
2080 return -EAGAIN;
2081 else if (ret)
2082 return ret; /* -ENOMEM duo to alloc_page() failed */
2083
2084 mb = page_address(ctx->meta_page);
2085 mb_offset = sizeof(struct r5l_meta_block);
2086 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2087
2088 while (mb_offset < le32_to_cpu(mb->meta_size)) {
2089 int dd;
2090
2091 payload = (void *)mb + mb_offset;
Song Liu2d4f4682017-03-07 17:44:21 -08002092 payload_flush = (void *)mb + mb_offset;
2093
Jason Yan1ad45a92017-03-25 09:44:39 +08002094 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) {
Song Liu2d4f4682017-03-07 17:44:21 -08002095 int i, count;
2096
2097 count = le32_to_cpu(payload_flush->size) / sizeof(__le64);
2098 for (i = 0; i < count; ++i) {
2099 stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]);
2100 sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2101 stripe_sect);
2102 if (sh) {
2103 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
2104 r5l_recovery_reset_stripe(sh);
2105 list_del_init(&sh->lru);
2106 raid5_release_stripe(sh);
2107 }
2108 }
2109
2110 mb_offset += sizeof(struct r5l_payload_flush) +
2111 le32_to_cpu(payload_flush->size);
2112 continue;
2113 }
2114
2115 /* DATA or PARITY payload */
Jason Yan1ad45a92017-03-25 09:44:39 +08002116 stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ?
Song Liub4c625c2016-11-17 15:24:43 -08002117 raid5_compute_sector(
2118 conf, le64_to_cpu(payload->location), 0, &dd,
2119 NULL)
2120 : le64_to_cpu(payload->location);
2121
2122 sh = r5c_recovery_lookup_stripe(cached_stripe_list,
2123 stripe_sect);
2124
2125 if (!sh) {
Song Liu3c66abb2016-12-14 15:38:01 -08002126 sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
Song Liub4c625c2016-11-17 15:24:43 -08002127 /*
2128 * cannot get stripe from raid5_get_active_stripe
2129 * try replay some stripes
2130 */
2131 if (!sh) {
2132 r5c_recovery_replay_stripes(
2133 cached_stripe_list, ctx);
2134 sh = r5c_recovery_alloc_stripe(
Song Liu3c66abb2016-12-14 15:38:01 -08002135 conf, stripe_sect);
Song Liub4c625c2016-11-17 15:24:43 -08002136 }
2137 if (!sh) {
2138 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2139 mdname(mddev),
2140 conf->min_nr_stripes * 2);
2141 raid5_set_cache_size(mddev,
2142 conf->min_nr_stripes * 2);
Song Liu3c66abb2016-12-14 15:38:01 -08002143 sh = r5c_recovery_alloc_stripe(conf,
2144 stripe_sect);
Song Liub4c625c2016-11-17 15:24:43 -08002145 }
2146 if (!sh) {
2147 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2148 mdname(mddev));
2149 return -ENOMEM;
2150 }
2151 list_add_tail(&sh->lru, cached_stripe_list);
2152 }
2153
Jason Yan1ad45a92017-03-25 09:44:39 +08002154 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
Zhengyuan Liuf7b7bee2016-11-26 10:57:13 +08002155 if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
2156 test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
Song Liub4c625c2016-11-17 15:24:43 -08002157 r5l_recovery_replay_one_stripe(conf, sh, ctx);
Song Liub4c625c2016-11-17 15:24:43 -08002158 list_move_tail(&sh->lru, cached_stripe_list);
2159 }
2160 r5l_recovery_load_data(log, sh, ctx, payload,
2161 log_offset);
Jason Yan1ad45a92017-03-25 09:44:39 +08002162 } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
Song Liub4c625c2016-11-17 15:24:43 -08002163 r5l_recovery_load_parity(log, sh, ctx, payload,
2164 log_offset);
2165 else
2166 return -EINVAL;
2167
2168 log_offset = r5l_ring_add(log, log_offset,
2169 le32_to_cpu(payload->size));
2170
2171 mb_offset += sizeof(struct r5l_payload_data_parity) +
2172 sizeof(__le32) *
2173 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
2174 }
2175
2176 return 0;
2177}
2178
2179/*
2180 * Load the stripe into cache. The stripe will be written out later by
2181 * the stripe cache state machine.
2182 */
2183static void r5c_recovery_load_one_stripe(struct r5l_log *log,
2184 struct stripe_head *sh)
2185{
Song Liub4c625c2016-11-17 15:24:43 -08002186 struct r5dev *dev;
2187 int i;
2188
2189 for (i = sh->disks; i--; ) {
2190 dev = sh->dev + i;
2191 if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
2192 set_bit(R5_InJournal, &dev->flags);
2193 set_bit(R5_UPTODATE, &dev->flags);
2194 }
2195 }
Song Liub4c625c2016-11-17 15:24:43 -08002196}
2197
2198/*
2199 * Scan through the log for all to-be-flushed data
2200 *
2201 * For stripes with data and parity, namely Data-Parity stripe
2202 * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
2203 *
2204 * For stripes with only data, namely Data-Only stripe
2205 * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
2206 *
2207 * For a stripe, if we see data after parity, we should discard all previous
2208 * data and parity for this stripe, as these data are already flushed to
2209 * the array.
2210 *
2211 * At the end of the scan, we return the new journal_tail, which points to
2212 * first data-only stripe on the journal device, or next invalid meta block.
2213 */
2214static int r5c_recovery_flush_log(struct r5l_log *log,
2215 struct r5l_recovery_ctx *ctx)
2216{
JackieLiubc8f1672016-11-28 16:19:20 +08002217 struct stripe_head *sh;
Song Liub4c625c2016-11-17 15:24:43 -08002218 int ret = 0;
2219
2220 /* scan through the log */
2221 while (1) {
2222 if (r5l_recovery_read_meta_block(log, ctx))
2223 break;
2224
2225 ret = r5c_recovery_analyze_meta_block(log, ctx,
2226 &ctx->cached_list);
2227 /*
2228 * -EAGAIN means mismatch in data block, in this case, we still
2229 * try scan the next metablock
2230 */
2231 if (ret && ret != -EAGAIN)
2232 break; /* ret == -EINVAL or -ENOMEM */
2233 ctx->seq++;
2234 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
2235 }
2236
2237 if (ret == -ENOMEM) {
2238 r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
2239 return ret;
2240 }
2241
2242 /* replay data-parity stripes */
2243 r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
2244
2245 /* load data-only stripes to stripe cache */
JackieLiubc8f1672016-11-28 16:19:20 +08002246 list_for_each_entry(sh, &ctx->cached_list, lru) {
Song Liub4c625c2016-11-17 15:24:43 -08002247 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2248 r5c_recovery_load_one_stripe(log, sh);
Song Liub4c625c2016-11-17 15:24:43 -08002249 ctx->data_only_stripes++;
2250 }
2251
2252 return 0;
2253}
2254
2255/*
2256 * we did a recovery. Now ctx.pos points to an invalid meta block. New
2257 * log will start here. but we can't let superblock point to last valid
2258 * meta block. The log might looks like:
2259 * | meta 1| meta 2| meta 3|
2260 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
2261 * superblock points to meta 1, we write a new valid meta 2n. if crash
2262 * happens again, new recovery will start from meta 1. Since meta 2n is
2263 * valid now, recovery will think meta 3 is valid, which is wrong.
2264 * The solution is we create a new meta in meta2 with its seq == meta
Song Liu3c6edc62016-12-07 09:42:06 -08002265 * 1's seq + 10000 and let superblock points to meta2. The same recovery
2266 * will not think meta 3 is a valid meta, because its seq doesn't match
Song Liub4c625c2016-11-17 15:24:43 -08002267 */
2268
2269/*
2270 * Before recovery, the log looks like the following
2271 *
2272 * ---------------------------------------------
2273 * | valid log | invalid log |
2274 * ---------------------------------------------
2275 * ^
2276 * |- log->last_checkpoint
2277 * |- log->last_cp_seq
2278 *
2279 * Now we scan through the log until we see invalid entry
2280 *
2281 * ---------------------------------------------
2282 * | valid log | invalid log |
2283 * ---------------------------------------------
2284 * ^ ^
2285 * |- log->last_checkpoint |- ctx->pos
2286 * |- log->last_cp_seq |- ctx->seq
2287 *
2288 * From this point, we need to increase seq number by 10 to avoid
2289 * confusing next recovery.
2290 *
2291 * ---------------------------------------------
2292 * | valid log | invalid log |
2293 * ---------------------------------------------
2294 * ^ ^
2295 * |- log->last_checkpoint |- ctx->pos+1
Song Liu3c6edc62016-12-07 09:42:06 -08002296 * |- log->last_cp_seq |- ctx->seq+10001
Song Liub4c625c2016-11-17 15:24:43 -08002297 *
2298 * However, it is not safe to start the state machine yet, because data only
2299 * parities are not yet secured in RAID. To save these data only parities, we
2300 * rewrite them from seq+11.
2301 *
2302 * -----------------------------------------------------------------
2303 * | valid log | data only stripes | invalid log |
2304 * -----------------------------------------------------------------
2305 * ^ ^
2306 * |- log->last_checkpoint |- ctx->pos+n
Song Liu3c6edc62016-12-07 09:42:06 -08002307 * |- log->last_cp_seq |- ctx->seq+10000+n
Song Liub4c625c2016-11-17 15:24:43 -08002308 *
2309 * If failure happens again during this process, the recovery can safe start
2310 * again from log->last_checkpoint.
2311 *
2312 * Once data only stripes are rewritten to journal, we move log_tail
2313 *
2314 * -----------------------------------------------------------------
2315 * | old log | data only stripes | invalid log |
2316 * -----------------------------------------------------------------
2317 * ^ ^
2318 * |- log->last_checkpoint |- ctx->pos+n
Song Liu3c6edc62016-12-07 09:42:06 -08002319 * |- log->last_cp_seq |- ctx->seq+10000+n
Song Liub4c625c2016-11-17 15:24:43 -08002320 *
2321 * Then we can safely start the state machine. If failure happens from this
2322 * point on, the recovery will start from new log->last_checkpoint.
2323 */
2324static int
2325r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2326 struct r5l_recovery_ctx *ctx)
2327{
Song Liua85dd7b2017-01-23 17:12:57 -08002328 struct stripe_head *sh;
Song Liub4c625c2016-11-17 15:24:43 -08002329 struct mddev *mddev = log->rdev->mddev;
2330 struct page *page;
Song Liu3c66abb2016-12-14 15:38:01 -08002331 sector_t next_checkpoint = MaxSector;
Song Liub4c625c2016-11-17 15:24:43 -08002332
2333 page = alloc_page(GFP_KERNEL);
2334 if (!page) {
2335 pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
2336 mdname(mddev));
2337 return -ENOMEM;
2338 }
2339
Song Liu3c66abb2016-12-14 15:38:01 -08002340 WARN_ON(list_empty(&ctx->cached_list));
2341
Song Liua85dd7b2017-01-23 17:12:57 -08002342 list_for_each_entry(sh, &ctx->cached_list, lru) {
Song Liub4c625c2016-11-17 15:24:43 -08002343 struct r5l_meta_block *mb;
2344 int i;
2345 int offset;
2346 sector_t write_pos;
2347
2348 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2349 r5l_recovery_create_empty_meta_block(log, page,
2350 ctx->pos, ctx->seq);
2351 mb = page_address(page);
2352 offset = le32_to_cpu(mb->meta_size);
JackieLiufc833c22016-11-28 16:19:19 +08002353 write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
Song Liub4c625c2016-11-17 15:24:43 -08002354
2355 for (i = sh->disks; i--; ) {
2356 struct r5dev *dev = &sh->dev[i];
2357 struct r5l_payload_data_parity *payload;
2358 void *addr;
2359
2360 if (test_bit(R5_InJournal, &dev->flags)) {
2361 payload = (void *)mb + offset;
2362 payload->header.type = cpu_to_le16(
2363 R5LOG_PAYLOAD_DATA);
Jason Yan1ad45a92017-03-25 09:44:39 +08002364 payload->size = cpu_to_le32(BLOCK_SECTORS);
Song Liub4c625c2016-11-17 15:24:43 -08002365 payload->location = cpu_to_le64(
2366 raid5_compute_blocknr(sh, i, 0));
2367 addr = kmap_atomic(dev->page);
2368 payload->checksum[0] = cpu_to_le32(
2369 crc32c_le(log->uuid_checksum, addr,
2370 PAGE_SIZE));
2371 kunmap_atomic(addr);
2372 sync_page_io(log->rdev, write_pos, PAGE_SIZE,
2373 dev->page, REQ_OP_WRITE, 0, false);
2374 write_pos = r5l_ring_add(log, write_pos,
2375 BLOCK_SECTORS);
2376 offset += sizeof(__le32) +
2377 sizeof(struct r5l_payload_data_parity);
2378
2379 }
2380 }
2381 mb->meta_size = cpu_to_le32(offset);
Song Liu5c88f402016-12-07 09:42:05 -08002382 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
2383 mb, PAGE_SIZE));
Song Liub4c625c2016-11-17 15:24:43 -08002384 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
Shaohua Li20737732016-12-13 12:40:15 -08002385 REQ_OP_WRITE, REQ_FUA, false);
Song Liub4c625c2016-11-17 15:24:43 -08002386 sh->log_start = ctx->pos;
Song Liu3c66abb2016-12-14 15:38:01 -08002387 list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
2388 atomic_inc(&log->stripe_in_journal_count);
Song Liub4c625c2016-11-17 15:24:43 -08002389 ctx->pos = write_pos;
2390 ctx->seq += 1;
Song Liu3c66abb2016-12-14 15:38:01 -08002391 next_checkpoint = sh->log_start;
Song Liub4c625c2016-11-17 15:24:43 -08002392 }
Song Liu3c66abb2016-12-14 15:38:01 -08002393 log->next_checkpoint = next_checkpoint;
Song Liub4c625c2016-11-17 15:24:43 -08002394 __free_page(page);
2395 return 0;
2396}
2397
Song Liua85dd7b2017-01-23 17:12:57 -08002398static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
2399 struct r5l_recovery_ctx *ctx)
2400{
2401 struct mddev *mddev = log->rdev->mddev;
2402 struct r5conf *conf = mddev->private;
2403 struct stripe_head *sh, *next;
2404
2405 if (ctx->data_only_stripes == 0)
2406 return;
2407
2408 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
2409
2410 list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
2411 r5c_make_stripe_write_out(sh);
2412 set_bit(STRIPE_HANDLE, &sh->state);
2413 list_del_init(&sh->lru);
2414 raid5_release_stripe(sh);
2415 }
2416
2417 md_wakeup_thread(conf->mddev->thread);
2418 /* reuse conf->wait_for_quiescent in recovery */
2419 wait_event(conf->wait_for_quiescent,
2420 atomic_read(&conf->active_stripes) == 0);
2421
2422 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2423}
2424
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002425static int r5l_recovery_log(struct r5l_log *log)
2426{
Song Liu5aabf7c2016-11-17 15:24:44 -08002427 struct mddev *mddev = log->rdev->mddev;
Song Liueffe6ee2017-03-07 16:49:17 -08002428 struct r5l_recovery_ctx *ctx;
Song Liu5aabf7c2016-11-17 15:24:44 -08002429 int ret;
JackieLiu43b96742016-12-05 11:58:53 +08002430 sector_t pos;
Shaohua Li355810d2015-08-13 14:32:01 -07002431
Song Liueffe6ee2017-03-07 16:49:17 -08002432 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2433 if (!ctx)
Shaohua Li355810d2015-08-13 14:32:01 -07002434 return -ENOMEM;
2435
Song Liueffe6ee2017-03-07 16:49:17 -08002436 ctx->pos = log->last_checkpoint;
2437 ctx->seq = log->last_cp_seq;
2438 INIT_LIST_HEAD(&ctx->cached_list);
2439 ctx->meta_page = alloc_page(GFP_KERNEL);
2440
2441 if (!ctx->meta_page) {
2442 ret = -ENOMEM;
2443 goto meta_page;
2444 }
2445
2446 if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
2447 ret = -ENOMEM;
2448 goto ra_pool;
2449 }
2450
2451 ret = r5c_recovery_flush_log(log, ctx);
Shaohua Li355810d2015-08-13 14:32:01 -07002452
Song Liu5aabf7c2016-11-17 15:24:44 -08002453 if (ret)
Song Liueffe6ee2017-03-07 16:49:17 -08002454 goto error;
Shaohua Li355810d2015-08-13 14:32:01 -07002455
Song Liueffe6ee2017-03-07 16:49:17 -08002456 pos = ctx->pos;
2457 ctx->seq += 10000;
JackieLiu43b96742016-12-05 11:58:53 +08002458
Song Liueffe6ee2017-03-07 16:49:17 -08002459 if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
Song Liu5aabf7c2016-11-17 15:24:44 -08002460 pr_debug("md/raid:%s: starting from clean shutdown\n",
2461 mdname(mddev));
Song Liua85dd7b2017-01-23 17:12:57 -08002462 else
Colin Ian King99f17892016-12-23 00:52:30 +00002463 pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
Song Liueffe6ee2017-03-07 16:49:17 -08002464 mdname(mddev), ctx->data_only_stripes,
2465 ctx->data_parity_stripes);
Song Liu5aabf7c2016-11-17 15:24:44 -08002466
Song Liueffe6ee2017-03-07 16:49:17 -08002467 if (ctx->data_only_stripes == 0) {
2468 log->next_checkpoint = ctx->pos;
2469 r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
2470 ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2471 } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
Song Liua85dd7b2017-01-23 17:12:57 -08002472 pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
2473 mdname(mddev));
Song Liueffe6ee2017-03-07 16:49:17 -08002474 ret = -EIO;
2475 goto error;
Shaohua Li355810d2015-08-13 14:32:01 -07002476 }
Song Liub4c625c2016-11-17 15:24:43 -08002477
Song Liueffe6ee2017-03-07 16:49:17 -08002478 log->log_start = ctx->pos;
2479 log->seq = ctx->seq;
JackieLiu43b96742016-12-05 11:58:53 +08002480 log->last_checkpoint = pos;
2481 r5l_write_super(log, pos);
Song Liua85dd7b2017-01-23 17:12:57 -08002482
Song Liueffe6ee2017-03-07 16:49:17 -08002483 r5c_recovery_flush_data_only_stripes(log, ctx);
2484 ret = 0;
2485error:
2486 r5l_recovery_free_ra_pool(log, ctx);
2487ra_pool:
2488 __free_page(ctx->meta_page);
2489meta_page:
2490 kfree(ctx);
2491 return ret;
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002492}
2493
2494static void r5l_write_super(struct r5l_log *log, sector_t cp)
2495{
2496 struct mddev *mddev = log->rdev->mddev;
2497
2498 log->rdev->journal_tail = cp;
Shaohua Li29530792016-12-08 15:48:19 -08002499 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002500}
2501
Song Liu2c7da142016-11-17 15:24:41 -08002502static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
2503{
2504 struct r5conf *conf = mddev->private;
2505 int ret;
2506
2507 if (!conf->log)
2508 return 0;
2509
2510 switch (conf->log->r5c_journal_mode) {
2511 case R5C_JOURNAL_MODE_WRITE_THROUGH:
2512 ret = snprintf(
2513 page, PAGE_SIZE, "[%s] %s\n",
2514 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2515 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2516 break;
2517 case R5C_JOURNAL_MODE_WRITE_BACK:
2518 ret = snprintf(
2519 page, PAGE_SIZE, "%s [%s]\n",
2520 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2521 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2522 break;
2523 default:
2524 ret = 0;
2525 }
2526 return ret;
2527}
2528
2529static ssize_t r5c_journal_mode_store(struct mddev *mddev,
2530 const char *page, size_t length)
2531{
2532 struct r5conf *conf = mddev->private;
2533 struct r5l_log *log = conf->log;
2534 int val = -1, i;
2535 int len = length;
2536
2537 if (!log)
2538 return -ENODEV;
2539
2540 if (len && page[len - 1] == '\n')
2541 len -= 1;
2542 for (i = 0; i < ARRAY_SIZE(r5c_journal_mode_str); i++)
2543 if (strlen(r5c_journal_mode_str[i]) == len &&
2544 strncmp(page, r5c_journal_mode_str[i], len) == 0) {
2545 val = i;
2546 break;
2547 }
2548 if (val < R5C_JOURNAL_MODE_WRITE_THROUGH ||
2549 val > R5C_JOURNAL_MODE_WRITE_BACK)
2550 return -EINVAL;
2551
Song Liu2e38a372017-01-24 10:45:30 -08002552 if (raid5_calc_degraded(conf) > 0 &&
2553 val == R5C_JOURNAL_MODE_WRITE_BACK)
2554 return -EINVAL;
2555
Song Liu2c7da142016-11-17 15:24:41 -08002556 mddev_suspend(mddev);
2557 conf->log->r5c_journal_mode = val;
2558 mddev_resume(mddev);
2559
2560 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
2561 mdname(mddev), val, r5c_journal_mode_str[val]);
2562 return length;
2563}
2564
2565struct md_sysfs_entry
2566r5c_journal_mode = __ATTR(journal_mode, 0644,
2567 r5c_journal_mode_show, r5c_journal_mode_store);
2568
Song Liu2ded3702016-11-17 15:24:38 -08002569/*
2570 * Try handle write operation in caching phase. This function should only
2571 * be called in write-back mode.
2572 *
2573 * If all outstanding writes can be handled in caching phase, returns 0
2574 * If writes requires write-out phase, call r5c_make_stripe_write_out()
2575 * and returns -EAGAIN
2576 */
2577int r5c_try_caching_write(struct r5conf *conf,
2578 struct stripe_head *sh,
2579 struct stripe_head_state *s,
2580 int disks)
2581{
2582 struct r5l_log *log = conf->log;
Song Liu1e6d6902016-11-17 15:24:39 -08002583 int i;
2584 struct r5dev *dev;
2585 int to_cache = 0;
Song Liu03b047f2017-01-11 13:39:14 -08002586 void **pslot;
2587 sector_t tree_index;
2588 int ret;
2589 uintptr_t refcount;
Song Liu2ded3702016-11-17 15:24:38 -08002590
2591 BUG_ON(!r5c_is_writeback(log));
2592
Song Liu1e6d6902016-11-17 15:24:39 -08002593 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
2594 /*
2595 * There are two different scenarios here:
2596 * 1. The stripe has some data cached, and it is sent to
2597 * write-out phase for reclaim
2598 * 2. The stripe is clean, and this is the first write
2599 *
2600 * For 1, return -EAGAIN, so we continue with
2601 * handle_stripe_dirtying().
2602 *
2603 * For 2, set STRIPE_R5C_CACHING and continue with caching
2604 * write.
2605 */
2606
2607 /* case 1: anything injournal or anything in written */
2608 if (s->injournal > 0 || s->written > 0)
2609 return -EAGAIN;
2610 /* case 2 */
2611 set_bit(STRIPE_R5C_CACHING, &sh->state);
2612 }
2613
Song Liu2e38a372017-01-24 10:45:30 -08002614 /*
2615 * When run in degraded mode, array is set to write-through mode.
2616 * This check helps drain pending write safely in the transition to
2617 * write-through mode.
2618 */
2619 if (s->failed) {
2620 r5c_make_stripe_write_out(sh);
2621 return -EAGAIN;
2622 }
2623
Song Liu1e6d6902016-11-17 15:24:39 -08002624 for (i = disks; i--; ) {
2625 dev = &sh->dev[i];
2626 /* if non-overwrite, use writing-out phase */
2627 if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
2628 !test_bit(R5_InJournal, &dev->flags)) {
2629 r5c_make_stripe_write_out(sh);
2630 return -EAGAIN;
2631 }
2632 }
2633
Song Liu03b047f2017-01-11 13:39:14 -08002634 /* if the stripe is not counted in big_stripe_tree, add it now */
2635 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
2636 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2637 tree_index = r5c_tree_index(conf, sh->sector);
2638 spin_lock(&log->tree_lock);
2639 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2640 tree_index);
2641 if (pslot) {
2642 refcount = (uintptr_t)radix_tree_deref_slot_protected(
2643 pslot, &log->tree_lock) >>
2644 R5C_RADIX_COUNT_SHIFT;
2645 radix_tree_replace_slot(
2646 &log->big_stripe_tree, pslot,
2647 (void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT));
2648 } else {
2649 /*
2650 * this radix_tree_insert can fail safely, so no
2651 * need to call radix_tree_preload()
2652 */
2653 ret = radix_tree_insert(
2654 &log->big_stripe_tree, tree_index,
2655 (void *)(1 << R5C_RADIX_COUNT_SHIFT));
2656 if (ret) {
2657 spin_unlock(&log->tree_lock);
2658 r5c_make_stripe_write_out(sh);
2659 return -EAGAIN;
2660 }
2661 }
2662 spin_unlock(&log->tree_lock);
2663
2664 /*
2665 * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is
2666 * counted in the radix tree
2667 */
2668 set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state);
2669 atomic_inc(&conf->r5c_cached_partial_stripes);
2670 }
2671
Song Liu1e6d6902016-11-17 15:24:39 -08002672 for (i = disks; i--; ) {
2673 dev = &sh->dev[i];
2674 if (dev->towrite) {
2675 set_bit(R5_Wantwrite, &dev->flags);
2676 set_bit(R5_Wantdrain, &dev->flags);
2677 set_bit(R5_LOCKED, &dev->flags);
2678 to_cache++;
2679 }
2680 }
2681
2682 if (to_cache) {
2683 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2684 /*
2685 * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
2686 * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
2687 * r5c_handle_data_cached()
2688 */
2689 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
2690 }
2691
2692 return 0;
2693}
2694
2695/*
2696 * free extra pages (orig_page) we allocated for prexor
2697 */
2698void r5c_release_extra_page(struct stripe_head *sh)
2699{
Song Liud7bd3982016-11-23 22:50:39 -08002700 struct r5conf *conf = sh->raid_conf;
Song Liu1e6d6902016-11-17 15:24:39 -08002701 int i;
Song Liud7bd3982016-11-23 22:50:39 -08002702 bool using_disk_info_extra_page;
2703
2704 using_disk_info_extra_page =
2705 sh->dev[0].orig_page == conf->disks[0].extra_page;
Song Liu1e6d6902016-11-17 15:24:39 -08002706
2707 for (i = sh->disks; i--; )
2708 if (sh->dev[i].page != sh->dev[i].orig_page) {
2709 struct page *p = sh->dev[i].orig_page;
2710
2711 sh->dev[i].orig_page = sh->dev[i].page;
Song Liu86aa1392017-01-12 17:22:41 -08002712 clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
2713
Song Liud7bd3982016-11-23 22:50:39 -08002714 if (!using_disk_info_extra_page)
2715 put_page(p);
Song Liu1e6d6902016-11-17 15:24:39 -08002716 }
Song Liud7bd3982016-11-23 22:50:39 -08002717
2718 if (using_disk_info_extra_page) {
2719 clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
2720 md_wakeup_thread(conf->mddev->thread);
2721 }
2722}
2723
2724void r5c_use_extra_page(struct stripe_head *sh)
2725{
2726 struct r5conf *conf = sh->raid_conf;
2727 int i;
2728 struct r5dev *dev;
2729
2730 for (i = sh->disks; i--; ) {
2731 dev = &sh->dev[i];
2732 if (dev->orig_page != dev->page)
2733 put_page(dev->orig_page);
2734 dev->orig_page = conf->disks[i].extra_page;
2735 }
Song Liu2ded3702016-11-17 15:24:38 -08002736}
2737
2738/*
2739 * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
2740 * stripe is committed to RAID disks.
2741 */
2742void r5c_finish_stripe_write_out(struct r5conf *conf,
2743 struct stripe_head *sh,
2744 struct stripe_head_state *s)
2745{
Song Liu03b047f2017-01-11 13:39:14 -08002746 struct r5l_log *log = conf->log;
Song Liu1e6d6902016-11-17 15:24:39 -08002747 int i;
2748 int do_wakeup = 0;
Song Liu03b047f2017-01-11 13:39:14 -08002749 sector_t tree_index;
2750 void **pslot;
2751 uintptr_t refcount;
Song Liu1e6d6902016-11-17 15:24:39 -08002752
Song Liu03b047f2017-01-11 13:39:14 -08002753 if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
Song Liu2ded3702016-11-17 15:24:38 -08002754 return;
2755
2756 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
2757 clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
2758
Song Liu03b047f2017-01-11 13:39:14 -08002759 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
Song Liu2ded3702016-11-17 15:24:38 -08002760 return;
Song Liu1e6d6902016-11-17 15:24:39 -08002761
2762 for (i = sh->disks; i--; ) {
2763 clear_bit(R5_InJournal, &sh->dev[i].flags);
2764 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2765 do_wakeup = 1;
2766 }
2767
2768 /*
2769 * analyse_stripe() runs before r5c_finish_stripe_write_out(),
2770 * We updated R5_InJournal, so we also update s->injournal.
2771 */
2772 s->injournal = 0;
2773
2774 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2775 if (atomic_dec_and_test(&conf->pending_full_writes))
2776 md_wakeup_thread(conf->mddev->thread);
2777
2778 if (do_wakeup)
2779 wake_up(&conf->wait_for_overlap);
Song Liua39f7af2016-11-17 15:24:40 -08002780
Song Liu03b047f2017-01-11 13:39:14 -08002781 spin_lock_irq(&log->stripe_in_journal_lock);
Song Liua39f7af2016-11-17 15:24:40 -08002782 list_del_init(&sh->r5c);
Song Liu03b047f2017-01-11 13:39:14 -08002783 spin_unlock_irq(&log->stripe_in_journal_lock);
Song Liua39f7af2016-11-17 15:24:40 -08002784 sh->log_start = MaxSector;
Song Liu03b047f2017-01-11 13:39:14 -08002785
2786 atomic_dec(&log->stripe_in_journal_count);
2787 r5c_update_log_state(log);
2788
2789 /* stop counting this stripe in big_stripe_tree */
2790 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) ||
2791 test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2792 tree_index = r5c_tree_index(conf, sh->sector);
2793 spin_lock(&log->tree_lock);
2794 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2795 tree_index);
2796 BUG_ON(pslot == NULL);
2797 refcount = (uintptr_t)radix_tree_deref_slot_protected(
2798 pslot, &log->tree_lock) >>
2799 R5C_RADIX_COUNT_SHIFT;
2800 if (refcount == 1)
2801 radix_tree_delete(&log->big_stripe_tree, tree_index);
2802 else
2803 radix_tree_replace_slot(
2804 &log->big_stripe_tree, pslot,
2805 (void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT));
2806 spin_unlock(&log->tree_lock);
2807 }
2808
2809 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
2810 BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
Shaohua Lie33fbb92017-02-10 16:18:09 -08002811 atomic_dec(&conf->r5c_flushing_partial_stripes);
Song Liu03b047f2017-01-11 13:39:14 -08002812 atomic_dec(&conf->r5c_cached_partial_stripes);
2813 }
2814
2815 if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2816 BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
Shaohua Lie33fbb92017-02-10 16:18:09 -08002817 atomic_dec(&conf->r5c_flushing_full_stripes);
Song Liu03b047f2017-01-11 13:39:14 -08002818 atomic_dec(&conf->r5c_cached_full_stripes);
2819 }
Song Liuea174812017-03-09 21:23:39 -08002820
2821 r5l_append_flush_payload(log, sh->sector);
Song Liu1e6d6902016-11-17 15:24:39 -08002822}
2823
Artur Paszkiewiczff875732017-03-09 09:59:58 +01002824int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
Song Liu1e6d6902016-11-17 15:24:39 -08002825{
Song Liua39f7af2016-11-17 15:24:40 -08002826 struct r5conf *conf = sh->raid_conf;
Song Liu1e6d6902016-11-17 15:24:39 -08002827 int pages = 0;
2828 int reserve;
2829 int i;
2830 int ret = 0;
2831
2832 BUG_ON(!log);
2833
2834 for (i = 0; i < sh->disks; i++) {
2835 void *addr;
2836
2837 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
2838 continue;
2839 addr = kmap_atomic(sh->dev[i].page);
2840 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
2841 addr, PAGE_SIZE);
2842 kunmap_atomic(addr);
2843 pages++;
2844 }
2845 WARN_ON(pages == 0);
2846
2847 /*
2848 * The stripe must enter state machine again to call endio, so
2849 * don't delay.
2850 */
2851 clear_bit(STRIPE_DELAYED, &sh->state);
2852 atomic_inc(&sh->count);
2853
2854 mutex_lock(&log->io_mutex);
2855 /* meta + data */
2856 reserve = (1 + pages) << (PAGE_SHIFT - 9);
Song Liu1e6d6902016-11-17 15:24:39 -08002857
Song Liua39f7af2016-11-17 15:24:40 -08002858 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
2859 sh->log_start == MaxSector)
2860 r5l_add_no_space_stripe(log, sh);
2861 else if (!r5l_has_free_space(log, reserve)) {
2862 if (sh->log_start == log->last_checkpoint)
2863 BUG();
2864 else
2865 r5l_add_no_space_stripe(log, sh);
Song Liu1e6d6902016-11-17 15:24:39 -08002866 } else {
2867 ret = r5l_log_stripe(log, sh, pages, 0);
2868 if (ret) {
2869 spin_lock_irq(&log->io_list_lock);
2870 list_add_tail(&sh->log_list, &log->no_mem_stripes);
2871 spin_unlock_irq(&log->io_list_lock);
2872 }
2873 }
2874
2875 mutex_unlock(&log->io_mutex);
2876 return 0;
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002877}
2878
Song Liu03b047f2017-01-11 13:39:14 -08002879/* check whether this big stripe is in write back cache. */
2880bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect)
2881{
2882 struct r5l_log *log = conf->log;
2883 sector_t tree_index;
2884 void *slot;
2885
2886 if (!log)
2887 return false;
2888
2889 WARN_ON_ONCE(!rcu_read_lock_held());
2890 tree_index = r5c_tree_index(conf, sect);
2891 slot = radix_tree_lookup(&log->big_stripe_tree, tree_index);
2892 return slot != NULL;
2893}
2894
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002895static int r5l_load_log(struct r5l_log *log)
2896{
2897 struct md_rdev *rdev = log->rdev;
2898 struct page *page;
2899 struct r5l_meta_block *mb;
2900 sector_t cp = log->rdev->journal_tail;
2901 u32 stored_crc, expected_crc;
2902 bool create_super = false;
JackieLiud30dfeb2016-12-08 08:47:39 +08002903 int ret = 0;
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002904
2905 /* Make sure it's valid */
2906 if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
2907 cp = 0;
2908 page = alloc_page(GFP_KERNEL);
2909 if (!page)
2910 return -ENOMEM;
2911
Mike Christie796a5cf2016-06-05 14:32:07 -05002912 if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) {
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002913 ret = -EIO;
2914 goto ioerr;
2915 }
2916 mb = page_address(page);
2917
2918 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
2919 mb->version != R5LOG_VERSION) {
2920 create_super = true;
2921 goto create;
2922 }
2923 stored_crc = le32_to_cpu(mb->checksum);
2924 mb->checksum = 0;
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07002925 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002926 if (stored_crc != expected_crc) {
2927 create_super = true;
2928 goto create;
2929 }
2930 if (le64_to_cpu(mb->position) != cp) {
2931 create_super = true;
2932 goto create;
2933 }
2934create:
2935 if (create_super) {
2936 log->last_cp_seq = prandom_u32();
2937 cp = 0;
Zhengyuan Liu56056c22016-10-24 16:15:59 +08002938 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002939 /*
2940 * Make sure super points to correct address. Log might have
2941 * data very soon. If super hasn't correct log tail address,
2942 * recovery can't find the log
2943 */
2944 r5l_write_super(log, cp);
2945 } else
2946 log->last_cp_seq = le64_to_cpu(mb->seq);
2947
2948 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
Shaohua Li0576b1c2015-08-13 14:32:00 -07002949 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
2950 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
2951 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002952 log->last_checkpoint = cp;
2953
2954 __free_page(page);
2955
JackieLiud30dfeb2016-12-08 08:47:39 +08002956 if (create_super) {
2957 log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
2958 log->seq = log->last_cp_seq + 1;
2959 log->next_checkpoint = cp;
2960 } else
2961 ret = r5l_recovery_log(log);
2962
Zhengyuan Liu3d7e7e12016-12-04 16:49:44 +08002963 r5c_update_log_state(log);
2964 return ret;
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002965ioerr:
2966 __free_page(page);
2967 return ret;
2968}
2969
Song Liu2e38a372017-01-24 10:45:30 -08002970void r5c_update_on_rdev_error(struct mddev *mddev)
2971{
2972 struct r5conf *conf = mddev->private;
2973 struct r5l_log *log = conf->log;
2974
2975 if (!log)
2976 return;
2977
2978 if (raid5_calc_degraded(conf) > 0 &&
2979 conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
2980 schedule_work(&log->disable_writeback_work);
2981}
2982
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002983int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
2984{
Jens Axboec888a8f2016-04-13 13:33:19 -06002985 struct request_queue *q = bdev_get_queue(rdev->bdev);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002986 struct r5l_log *log;
Artur Paszkiewiczff875732017-03-09 09:59:58 +01002987 char b[BDEVNAME_SIZE];
2988
2989 pr_debug("md/raid:%s: using device %s as journal\n",
2990 mdname(conf->mddev), bdevname(rdev->bdev, b));
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002991
2992 if (PAGE_SIZE != 4096)
2993 return -EINVAL;
Song Liuc757ec92016-11-17 15:24:36 -08002994
2995 /*
2996 * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
2997 * raid_disks r5l_payload_data_parity.
2998 *
2999 * Write journal and cache does not work for very big array
3000 * (raid_disks > 203)
3001 */
3002 if (sizeof(struct r5l_meta_block) +
3003 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
3004 conf->raid_disks) > PAGE_SIZE) {
3005 pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
3006 mdname(conf->mddev), conf->raid_disks);
3007 return -EINVAL;
3008 }
3009
Shaohua Lif6bed0e2015-08-13 14:31:59 -07003010 log = kzalloc(sizeof(*log), GFP_KERNEL);
3011 if (!log)
3012 return -ENOMEM;
3013 log->rdev = rdev;
3014
Jens Axboec888a8f2016-04-13 13:33:19 -06003015 log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0;
Christoph Hellwig56fef7c2015-10-05 09:31:09 +02003016
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07003017 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
3018 sizeof(rdev->mddev->uuid));
Shaohua Lif6bed0e2015-08-13 14:31:59 -07003019
3020 mutex_init(&log->io_mutex);
3021
3022 spin_lock_init(&log->io_list_lock);
3023 INIT_LIST_HEAD(&log->running_ios);
Shaohua Li0576b1c2015-08-13 14:32:00 -07003024 INIT_LIST_HEAD(&log->io_end_ios);
Shaohua Lia8c34f92015-09-02 13:49:46 -07003025 INIT_LIST_HEAD(&log->flushing_ios);
Christoph Hellwig04732f72015-10-05 09:31:07 +02003026 INIT_LIST_HEAD(&log->finished_ios);
Ming Lei3a83f462016-11-22 08:57:21 -07003027 bio_init(&log->flush_bio, NULL, 0);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07003028
3029 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
3030 if (!log->io_kc)
3031 goto io_kc;
3032
Christoph Hellwig5036c3902015-12-21 10:51:02 +11003033 log->io_pool = mempool_create_slab_pool(R5L_POOL_SIZE, log->io_kc);
3034 if (!log->io_pool)
3035 goto io_pool;
3036
Christoph Hellwigc38d29b2015-12-21 10:51:02 +11003037 log->bs = bioset_create(R5L_POOL_SIZE, 0);
3038 if (!log->bs)
3039 goto io_bs;
3040
Christoph Hellwige8deb632015-12-21 10:51:02 +11003041 log->meta_pool = mempool_create_page_pool(R5L_POOL_SIZE, 0);
3042 if (!log->meta_pool)
3043 goto out_mempool;
3044
Song Liu03b047f2017-01-11 13:39:14 -08003045 spin_lock_init(&log->tree_lock);
3046 INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
3047
Shaohua Li0576b1c2015-08-13 14:32:00 -07003048 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
3049 log->rdev->mddev, "reclaim");
3050 if (!log->reclaim_thread)
3051 goto reclaim_thread;
Song Liua39f7af2016-11-17 15:24:40 -08003052 log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
3053
Shaohua Li0fd22b42015-09-02 13:49:47 -07003054 init_waitqueue_head(&log->iounit_wait);
Shaohua Li0576b1c2015-08-13 14:32:00 -07003055
Christoph Hellwig5036c3902015-12-21 10:51:02 +11003056 INIT_LIST_HEAD(&log->no_mem_stripes);
3057
Shaohua Lif6bed0e2015-08-13 14:31:59 -07003058 INIT_LIST_HEAD(&log->no_space_stripes);
3059 spin_lock_init(&log->no_space_stripes_lock);
3060
Song Liu3bddb7f2016-11-18 16:46:50 -08003061 INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
Song Liu2e38a372017-01-24 10:45:30 -08003062 INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
Song Liu3bddb7f2016-11-18 16:46:50 -08003063
Song Liu2ded3702016-11-17 15:24:38 -08003064 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
Song Liua39f7af2016-11-17 15:24:40 -08003065 INIT_LIST_HEAD(&log->stripe_in_journal_list);
3066 spin_lock_init(&log->stripe_in_journal_lock);
3067 atomic_set(&log->stripe_in_journal_count, 0);
Song Liu2ded3702016-11-17 15:24:38 -08003068
Song Liud2250f12016-12-14 15:38:02 -08003069 rcu_assign_pointer(conf->log, log);
3070
Shaohua Lif6bed0e2015-08-13 14:31:59 -07003071 if (r5l_load_log(log))
3072 goto error;
3073
Shaohua Lia62ab492016-01-06 14:37:13 -08003074 set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07003075 return 0;
Christoph Hellwige8deb632015-12-21 10:51:02 +11003076
Shaohua Lif6bed0e2015-08-13 14:31:59 -07003077error:
Song Liud2250f12016-12-14 15:38:02 -08003078 rcu_assign_pointer(conf->log, NULL);
Shaohua Li0576b1c2015-08-13 14:32:00 -07003079 md_unregister_thread(&log->reclaim_thread);
3080reclaim_thread:
Christoph Hellwige8deb632015-12-21 10:51:02 +11003081 mempool_destroy(log->meta_pool);
3082out_mempool:
Christoph Hellwigc38d29b2015-12-21 10:51:02 +11003083 bioset_free(log->bs);
3084io_bs:
Christoph Hellwig5036c3902015-12-21 10:51:02 +11003085 mempool_destroy(log->io_pool);
3086io_pool:
Shaohua Lif6bed0e2015-08-13 14:31:59 -07003087 kmem_cache_destroy(log->io_kc);
3088io_kc:
3089 kfree(log);
3090 return -EINVAL;
3091}
3092
Artur Paszkiewiczff875732017-03-09 09:59:58 +01003093void r5l_exit_log(struct r5conf *conf)
Shaohua Lif6bed0e2015-08-13 14:31:59 -07003094{
Artur Paszkiewiczff875732017-03-09 09:59:58 +01003095 struct r5l_log *log = conf->log;
3096
3097 conf->log = NULL;
3098 synchronize_rcu();
3099
Song Liu2e38a372017-01-24 10:45:30 -08003100 flush_work(&log->disable_writeback_work);
Shaohua Li0576b1c2015-08-13 14:32:00 -07003101 md_unregister_thread(&log->reclaim_thread);
Christoph Hellwige8deb632015-12-21 10:51:02 +11003102 mempool_destroy(log->meta_pool);
Christoph Hellwigc38d29b2015-12-21 10:51:02 +11003103 bioset_free(log->bs);
Christoph Hellwig5036c3902015-12-21 10:51:02 +11003104 mempool_destroy(log->io_pool);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07003105 kmem_cache_destroy(log->io_kc);
3106 kfree(log);
3107}