blob: 218b6f37da852ff8d3ae57c6a1713df87b6e44f9 [file] [log] [blame]
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001/*
2 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
Song Liub4c625c2016-11-17 15:24:43 -08003 * Copyright (C) 2016 Song Liu <songliubraving@fb.com>
Shaohua Lif6bed0e2015-08-13 14:31:59 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15#include <linux/kernel.h>
16#include <linux/wait.h>
17#include <linux/blkdev.h>
18#include <linux/slab.h>
19#include <linux/raid/md_p.h>
Shaohua Li5cb2fbd2015-10-28 08:41:25 -070020#include <linux/crc32c.h>
Shaohua Lif6bed0e2015-08-13 14:31:59 -070021#include <linux/random.h>
Shaohua Lice1ccd02016-11-21 10:29:18 -080022#include <linux/kthread.h>
Song Liu03b047f2017-01-11 13:39:14 -080023#include <linux/types.h>
Shaohua Lif6bed0e2015-08-13 14:31:59 -070024#include "md.h"
25#include "raid5.h"
Song Liu1e6d6902016-11-17 15:24:39 -080026#include "bitmap.h"
Shaohua Lif6bed0e2015-08-13 14:31:59 -070027
28/*
29 * metadata/data stored in disk with 4k size unit (a block) regardless
30 * underneath hardware sector size. only works with PAGE_SIZE == 4096
31 */
32#define BLOCK_SECTORS (8)
33
Shaohua Li0576b1c2015-08-13 14:32:00 -070034/*
Song Liua39f7af2016-11-17 15:24:40 -080035 * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
36 *
37 * In write through mode, the reclaim runs every log->max_free_space.
38 * This can prevent the recovery scans for too long
Shaohua Li0576b1c2015-08-13 14:32:00 -070039 */
40#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
41#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
42
Song Liua39f7af2016-11-17 15:24:40 -080043/* wake up reclaim thread periodically */
44#define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ)
45/* start flush with these full stripes */
46#define R5C_FULL_STRIPE_FLUSH_BATCH 256
47/* reclaim stripes in groups */
48#define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2)
49
Christoph Hellwigc38d29b2015-12-21 10:51:02 +110050/*
51 * We only need 2 bios per I/O unit to make progress, but ensure we
52 * have a few more available to not get too tight.
53 */
54#define R5L_POOL_SIZE 4
55
Song Liu2c7da142016-11-17 15:24:41 -080056static char *r5c_journal_mode_str[] = {"write-through",
57 "write-back"};
Song Liu2ded3702016-11-17 15:24:38 -080058/*
59 * raid5 cache state machine
60 *
JackieLiu9b691732016-11-28 16:19:18 +080061 * With the RAID cache, each stripe works in two phases:
Song Liu2ded3702016-11-17 15:24:38 -080062 * - caching phase
63 * - writing-out phase
64 *
65 * These two phases are controlled by bit STRIPE_R5C_CACHING:
66 * if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase
67 * if STRIPE_R5C_CACHING == 1, the stripe is in caching phase
68 *
69 * When there is no journal, or the journal is in write-through mode,
70 * the stripe is always in writing-out phase.
71 *
72 * For write-back journal, the stripe is sent to caching phase on write
73 * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off
74 * the write-out phase by clearing STRIPE_R5C_CACHING.
75 *
76 * Stripes in caching phase do not write the raid disks. Instead, all
77 * writes are committed from the log device. Therefore, a stripe in
78 * caching phase handles writes as:
79 * - write to log device
80 * - return IO
81 *
82 * Stripes in writing-out phase handle writes as:
83 * - calculate parity
84 * - write pending data and parity to journal
85 * - write data and parity to raid disks
86 * - return IO for pending writes
87 */
88
Shaohua Lif6bed0e2015-08-13 14:31:59 -070089struct r5l_log {
90 struct md_rdev *rdev;
91
92 u32 uuid_checksum;
93
94 sector_t device_size; /* log device size, round to
95 * BLOCK_SECTORS */
Shaohua Li0576b1c2015-08-13 14:32:00 -070096 sector_t max_free_space; /* reclaim run if free space is at
97 * this size */
Shaohua Lif6bed0e2015-08-13 14:31:59 -070098
99 sector_t last_checkpoint; /* log tail. where recovery scan
100 * starts from */
101 u64 last_cp_seq; /* log tail sequence */
102
103 sector_t log_start; /* log head. where new data appends */
104 u64 seq; /* log head sequence */
105
Christoph Hellwig17036462015-10-05 09:31:06 +0200106 sector_t next_checkpoint;
Christoph Hellwig17036462015-10-05 09:31:06 +0200107
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700108 struct mutex io_mutex;
109 struct r5l_io_unit *current_io; /* current io_unit accepting new data */
110
111 spinlock_t io_list_lock;
112 struct list_head running_ios; /* io_units which are still running,
113 * and have not yet been completely
114 * written to the log */
115 struct list_head io_end_ios; /* io_units which have been completely
116 * written to the log but not yet written
117 * to the RAID */
Shaohua Lia8c34f92015-09-02 13:49:46 -0700118 struct list_head flushing_ios; /* io_units which are waiting for log
119 * cache flush */
Christoph Hellwig04732f72015-10-05 09:31:07 +0200120 struct list_head finished_ios; /* io_units which settle down in log disk */
Shaohua Lia8c34f92015-09-02 13:49:46 -0700121 struct bio flush_bio;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700122
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100123 struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */
124
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700125 struct kmem_cache *io_kc;
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100126 mempool_t *io_pool;
Christoph Hellwigc38d29b2015-12-21 10:51:02 +1100127 struct bio_set *bs;
Christoph Hellwige8deb632015-12-21 10:51:02 +1100128 mempool_t *meta_pool;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700129
Shaohua Li0576b1c2015-08-13 14:32:00 -0700130 struct md_thread *reclaim_thread;
131 unsigned long reclaim_target; /* number of space that need to be
132 * reclaimed. if it's 0, reclaim spaces
133 * used by io_units which are in
134 * IO_UNIT_STRIPE_END state (eg, reclaim
135 * dones't wait for specific io_unit
136 * switching to IO_UNIT_STRIPE_END
137 * state) */
Shaohua Li0fd22b42015-09-02 13:49:47 -0700138 wait_queue_head_t iounit_wait;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700139
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700140 struct list_head no_space_stripes; /* pending stripes, log has no space */
141 spinlock_t no_space_stripes_lock;
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200142
143 bool need_cache_flush;
Song Liu2ded3702016-11-17 15:24:38 -0800144
145 /* for r5c_cache */
146 enum r5c_journal_mode r5c_journal_mode;
Song Liua39f7af2016-11-17 15:24:40 -0800147
148 /* all stripes in r5cache, in the order of seq at sh->log_start */
149 struct list_head stripe_in_journal_list;
150
151 spinlock_t stripe_in_journal_lock;
152 atomic_t stripe_in_journal_count;
Song Liu3bddb7f2016-11-18 16:46:50 -0800153
154 /* to submit async io_units, to fulfill ordering of flush */
155 struct work_struct deferred_io_work;
Song Liu2e38a372017-01-24 10:45:30 -0800156 /* to disable write back during in degraded mode */
157 struct work_struct disable_writeback_work;
Song Liu03b047f2017-01-11 13:39:14 -0800158
159 /* to for chunk_aligned_read in writeback mode, details below */
160 spinlock_t tree_lock;
161 struct radix_tree_root big_stripe_tree;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700162};
163
164/*
Song Liu03b047f2017-01-11 13:39:14 -0800165 * Enable chunk_aligned_read() with write back cache.
166 *
167 * Each chunk may contain more than one stripe (for example, a 256kB
168 * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For
169 * chunk_aligned_read, these stripes are grouped into one "big_stripe".
170 * For each big_stripe, we count how many stripes of this big_stripe
171 * are in the write back cache. These data are tracked in a radix tree
172 * (big_stripe_tree). We use radix_tree item pointer as the counter.
173 * r5c_tree_index() is used to calculate keys for the radix tree.
174 *
175 * chunk_aligned_read() calls r5c_big_stripe_cached() to look up
176 * big_stripe of each chunk in the tree. If this big_stripe is in the
177 * tree, chunk_aligned_read() aborts. This look up is protected by
178 * rcu_read_lock().
179 *
180 * It is necessary to remember whether a stripe is counted in
181 * big_stripe_tree. Instead of adding new flag, we reuses existing flags:
182 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these
183 * two flags are set, the stripe is counted in big_stripe_tree. This
184 * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to
185 * r5c_try_caching_write(); and moving clear_bit of
186 * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to
187 * r5c_finish_stripe_write_out().
188 */
189
190/*
191 * radix tree requests lowest 2 bits of data pointer to be 2b'00.
192 * So it is necessary to left shift the counter by 2 bits before using it
193 * as data pointer of the tree.
194 */
195#define R5C_RADIX_COUNT_SHIFT 2
196
197/*
198 * calculate key for big_stripe_tree
199 *
200 * sect: align_bi->bi_iter.bi_sector or sh->sector
201 */
202static inline sector_t r5c_tree_index(struct r5conf *conf,
203 sector_t sect)
204{
205 sector_t offset;
206
207 offset = sector_div(sect, conf->chunk_sectors);
208 return sect;
209}
210
211/*
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700212 * an IO range starts from a meta data block and end at the next meta data
213 * block. The io unit's the meta data block tracks data/parity followed it. io
214 * unit is written to log disk with normal write, as we always flush log disk
215 * first and then start move data to raid disks, there is no requirement to
216 * write io unit with FLUSH/FUA
217 */
218struct r5l_io_unit {
219 struct r5l_log *log;
220
221 struct page *meta_page; /* store meta block */
222 int meta_offset; /* current offset in meta_page */
223
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700224 struct bio *current_bio;/* current_bio accepting new data */
225
226 atomic_t pending_stripe;/* how many stripes not flushed to raid */
227 u64 seq; /* seq number of the metablock */
228 sector_t log_start; /* where the io_unit starts */
229 sector_t log_end; /* where the io_unit ends */
230 struct list_head log_sibling; /* log->running_ios */
231 struct list_head stripe_list; /* stripes added to the io_unit */
232
233 int state;
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200234 bool need_split_bio;
Song Liu3bddb7f2016-11-18 16:46:50 -0800235 struct bio *split_bio;
236
237 unsigned int has_flush:1; /* include flush request */
238 unsigned int has_fua:1; /* include fua request */
239 unsigned int has_null_flush:1; /* include empty flush request */
240 /*
241 * io isn't sent yet, flush/fua request can only be submitted till it's
242 * the first IO in running_ios list
243 */
244 unsigned int io_deferred:1;
245
246 struct bio_list flush_barriers; /* size == 0 flush bios */
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700247};
248
249/* r5l_io_unit state */
250enum r5l_io_unit_state {
251 IO_UNIT_RUNNING = 0, /* accepting new IO */
252 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
253 * don't accepting new bio */
254 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
Shaohua Lia8c34f92015-09-02 13:49:46 -0700255 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700256};
257
Song Liu2ded3702016-11-17 15:24:38 -0800258bool r5c_is_writeback(struct r5l_log *log)
259{
260 return (log != NULL &&
261 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
262}
263
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700264static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
265{
266 start += inc;
267 if (start >= log->device_size)
268 start = start - log->device_size;
269 return start;
270}
271
272static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
273 sector_t end)
274{
275 if (end >= start)
276 return end - start;
277 else
278 return end + log->device_size - start;
279}
280
281static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
282{
283 sector_t used_size;
284
285 used_size = r5l_ring_distance(log, log->last_checkpoint,
286 log->log_start);
287
288 return log->device_size > used_size + size;
289}
290
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700291static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
292 enum r5l_io_unit_state state)
293{
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700294 if (WARN_ON(io->state >= state))
295 return;
296 io->state = state;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700297}
298
Song Liu1e6d6902016-11-17 15:24:39 -0800299static void
300r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev,
301 struct bio_list *return_bi)
302{
303 struct bio *wbi, *wbi2;
304
305 wbi = dev->written;
306 dev->written = NULL;
307 while (wbi && wbi->bi_iter.bi_sector <
308 dev->sector + STRIPE_SECTORS) {
309 wbi2 = r5_next_bio(wbi, dev->sector);
310 if (!raid5_dec_bi_active_stripes(wbi)) {
311 md_write_end(conf->mddev);
312 bio_list_add(return_bi, wbi);
313 }
314 wbi = wbi2;
315 }
316}
317
318void r5c_handle_cached_data_endio(struct r5conf *conf,
319 struct stripe_head *sh, int disks, struct bio_list *return_bi)
320{
321 int i;
322
323 for (i = sh->disks; i--; ) {
324 if (sh->dev[i].written) {
325 set_bit(R5_UPTODATE, &sh->dev[i].flags);
326 r5c_return_dev_pending_writes(conf, &sh->dev[i],
327 return_bi);
328 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
329 STRIPE_SECTORS,
330 !test_bit(STRIPE_DEGRADED, &sh->state),
331 0);
332 }
333 }
334}
335
Song Liua39f7af2016-11-17 15:24:40 -0800336/* Check whether we should flush some stripes to free up stripe cache */
337void r5c_check_stripe_cache_usage(struct r5conf *conf)
338{
339 int total_cached;
340
341 if (!r5c_is_writeback(conf->log))
342 return;
343
344 total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
345 atomic_read(&conf->r5c_cached_full_stripes);
346
347 /*
348 * The following condition is true for either of the following:
349 * - stripe cache pressure high:
350 * total_cached > 3/4 min_nr_stripes ||
351 * empty_inactive_list_nr > 0
352 * - stripe cache pressure moderate:
353 * total_cached > 1/2 min_nr_stripes
354 */
355 if (total_cached > conf->min_nr_stripes * 1 / 2 ||
356 atomic_read(&conf->empty_inactive_list_nr) > 0)
357 r5l_wake_reclaim(conf->log, 0);
358}
359
360/*
361 * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full
362 * stripes in the cache
363 */
364void r5c_check_cached_full_stripe(struct r5conf *conf)
365{
366 if (!r5c_is_writeback(conf->log))
367 return;
368
369 /*
370 * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes
371 * or a full stripe (chunk size / 4k stripes).
372 */
373 if (atomic_read(&conf->r5c_cached_full_stripes) >=
374 min(R5C_FULL_STRIPE_FLUSH_BATCH,
375 conf->chunk_sectors >> STRIPE_SHIFT))
376 r5l_wake_reclaim(conf->log, 0);
377}
378
379/*
380 * Total log space (in sectors) needed to flush all data in cache
381 *
Song Liu39b99582017-01-24 14:08:23 -0800382 * To avoid deadlock due to log space, it is necessary to reserve log
383 * space to flush critical stripes (stripes that occupying log space near
384 * last_checkpoint). This function helps check how much log space is
385 * required to flush all cached stripes.
Song Liua39f7af2016-11-17 15:24:40 -0800386 *
Song Liu39b99582017-01-24 14:08:23 -0800387 * To reduce log space requirements, two mechanisms are used to give cache
388 * flush higher priorities:
389 * 1. In handle_stripe_dirtying() and schedule_reconstruction(),
390 * stripes ALREADY in journal can be flushed w/o pending writes;
391 * 2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal
392 * can be delayed (r5l_add_no_space_stripe).
Song Liua39f7af2016-11-17 15:24:40 -0800393 *
Song Liu39b99582017-01-24 14:08:23 -0800394 * In cache flush, the stripe goes through 1 and then 2. For a stripe that
395 * already passed 1, flushing it requires at most (conf->max_degraded + 1)
396 * pages of journal space. For stripes that has not passed 1, flushing it
397 * requires (conf->raid_disks + 1) pages of journal space. There are at
398 * most (conf->group_cnt + 1) stripe that passed 1. So total journal space
399 * required to flush all cached stripes (in pages) is:
400 *
401 * (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) +
402 * (group_cnt + 1) * (raid_disks + 1)
403 * or
404 * (stripe_in_journal_count) * (max_degraded + 1) +
405 * (group_cnt + 1) * (raid_disks - max_degraded)
Song Liua39f7af2016-11-17 15:24:40 -0800406 */
407static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf)
408{
409 struct r5l_log *log = conf->log;
410
411 if (!r5c_is_writeback(log))
412 return 0;
413
Song Liu39b99582017-01-24 14:08:23 -0800414 return BLOCK_SECTORS *
415 ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) +
416 (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1));
Song Liua39f7af2016-11-17 15:24:40 -0800417}
418
419/*
420 * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
421 *
422 * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
423 * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
424 * device is less than 2x of reclaim_required_space.
425 */
426static inline void r5c_update_log_state(struct r5l_log *log)
427{
428 struct r5conf *conf = log->rdev->mddev->private;
429 sector_t free_space;
430 sector_t reclaim_space;
Song Liuf687a332016-11-30 16:57:54 -0800431 bool wake_reclaim = false;
Song Liua39f7af2016-11-17 15:24:40 -0800432
433 if (!r5c_is_writeback(log))
434 return;
435
436 free_space = r5l_ring_distance(log, log->log_start,
437 log->last_checkpoint);
438 reclaim_space = r5c_log_required_to_flush_cache(conf);
439 if (free_space < 2 * reclaim_space)
440 set_bit(R5C_LOG_CRITICAL, &conf->cache_state);
Song Liuf687a332016-11-30 16:57:54 -0800441 else {
442 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
443 wake_reclaim = true;
Song Liua39f7af2016-11-17 15:24:40 -0800444 clear_bit(R5C_LOG_CRITICAL, &conf->cache_state);
Song Liuf687a332016-11-30 16:57:54 -0800445 }
Song Liua39f7af2016-11-17 15:24:40 -0800446 if (free_space < 3 * reclaim_space)
447 set_bit(R5C_LOG_TIGHT, &conf->cache_state);
448 else
449 clear_bit(R5C_LOG_TIGHT, &conf->cache_state);
Song Liuf687a332016-11-30 16:57:54 -0800450
451 if (wake_reclaim)
452 r5l_wake_reclaim(log, 0);
Song Liua39f7af2016-11-17 15:24:40 -0800453}
454
Song Liu2ded3702016-11-17 15:24:38 -0800455/*
456 * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING.
457 * This function should only be called in write-back mode.
458 */
Song Liua39f7af2016-11-17 15:24:40 -0800459void r5c_make_stripe_write_out(struct stripe_head *sh)
Song Liu2ded3702016-11-17 15:24:38 -0800460{
461 struct r5conf *conf = sh->raid_conf;
462 struct r5l_log *log = conf->log;
463
464 BUG_ON(!r5c_is_writeback(log));
465
466 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
467 clear_bit(STRIPE_R5C_CACHING, &sh->state);
Song Liu1e6d6902016-11-17 15:24:39 -0800468
469 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
470 atomic_inc(&conf->preread_active_stripes);
Song Liu1e6d6902016-11-17 15:24:39 -0800471}
472
473static void r5c_handle_data_cached(struct stripe_head *sh)
474{
475 int i;
476
477 for (i = sh->disks; i--; )
478 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
479 set_bit(R5_InJournal, &sh->dev[i].flags);
480 clear_bit(R5_LOCKED, &sh->dev[i].flags);
481 }
482 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
483}
484
485/*
486 * this journal write must contain full parity,
487 * it may also contain some data pages
488 */
489static void r5c_handle_parity_cached(struct stripe_head *sh)
490{
491 int i;
492
493 for (i = sh->disks; i--; )
494 if (test_bit(R5_InJournal, &sh->dev[i].flags))
495 set_bit(R5_Wantwrite, &sh->dev[i].flags);
Song Liu2ded3702016-11-17 15:24:38 -0800496}
497
498/*
499 * Setting proper flags after writing (or flushing) data and/or parity to the
500 * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
501 */
502static void r5c_finish_cache_stripe(struct stripe_head *sh)
503{
504 struct r5l_log *log = sh->raid_conf->log;
505
506 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
507 BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
508 /*
509 * Set R5_InJournal for parity dev[pd_idx]. This means
510 * all data AND parity in the journal. For RAID 6, it is
511 * NOT necessary to set the flag for dev[qd_idx], as the
512 * two parities are written out together.
513 */
514 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
Song Liu1e6d6902016-11-17 15:24:39 -0800515 } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) {
516 r5c_handle_data_cached(sh);
517 } else {
518 r5c_handle_parity_cached(sh);
519 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
520 }
Song Liu2ded3702016-11-17 15:24:38 -0800521}
522
Christoph Hellwigd8858f42015-10-05 09:31:08 +0200523static void r5l_io_run_stripes(struct r5l_io_unit *io)
524{
525 struct stripe_head *sh, *next;
526
527 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
528 list_del_init(&sh->log_list);
Song Liu2ded3702016-11-17 15:24:38 -0800529
530 r5c_finish_cache_stripe(sh);
531
Christoph Hellwigd8858f42015-10-05 09:31:08 +0200532 set_bit(STRIPE_HANDLE, &sh->state);
533 raid5_release_stripe(sh);
534 }
535}
536
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200537static void r5l_log_run_stripes(struct r5l_log *log)
538{
539 struct r5l_io_unit *io, *next;
540
541 assert_spin_locked(&log->io_list_lock);
542
543 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
544 /* don't change list order */
545 if (io->state < IO_UNIT_IO_END)
546 break;
547
548 list_move_tail(&io->log_sibling, &log->finished_ios);
549 r5l_io_run_stripes(io);
550 }
551}
552
Christoph Hellwig3848c0b2015-12-21 10:51:01 +1100553static void r5l_move_to_end_ios(struct r5l_log *log)
554{
555 struct r5l_io_unit *io, *next;
556
557 assert_spin_locked(&log->io_list_lock);
558
559 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
560 /* don't change list order */
561 if (io->state < IO_UNIT_IO_END)
562 break;
563 list_move_tail(&io->log_sibling, &log->io_end_ios);
564 }
565}
566
Song Liu3bddb7f2016-11-18 16:46:50 -0800567static void __r5l_stripe_write_finished(struct r5l_io_unit *io);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700568static void r5l_log_endio(struct bio *bio)
569{
570 struct r5l_io_unit *io = bio->bi_private;
Song Liu3bddb7f2016-11-18 16:46:50 -0800571 struct r5l_io_unit *io_deferred;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700572 struct r5l_log *log = io->log;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700573 unsigned long flags;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700574
Shaohua Li6e74a9c2015-10-08 21:54:08 -0700575 if (bio->bi_error)
576 md_error(log->rdev->mddev, log->rdev);
577
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700578 bio_put(bio);
Christoph Hellwige8deb632015-12-21 10:51:02 +1100579 mempool_free(io->meta_page, log->meta_pool);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700580
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700581 spin_lock_irqsave(&log->io_list_lock, flags);
582 __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200583 if (log->need_cache_flush)
Christoph Hellwig3848c0b2015-12-21 10:51:01 +1100584 r5l_move_to_end_ios(log);
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200585 else
586 r5l_log_run_stripes(log);
Song Liu3bddb7f2016-11-18 16:46:50 -0800587 if (!list_empty(&log->running_ios)) {
588 /*
589 * FLUSH/FUA io_unit is deferred because of ordering, now we
590 * can dispatch it
591 */
592 io_deferred = list_first_entry(&log->running_ios,
593 struct r5l_io_unit, log_sibling);
594 if (io_deferred->io_deferred)
595 schedule_work(&log->deferred_io_work);
596 }
597
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700598 spin_unlock_irqrestore(&log->io_list_lock, flags);
599
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200600 if (log->need_cache_flush)
601 md_wakeup_thread(log->rdev->mddev->thread);
Song Liu3bddb7f2016-11-18 16:46:50 -0800602
603 if (io->has_null_flush) {
604 struct bio *bi;
605
606 WARN_ON(bio_list_empty(&io->flush_barriers));
607 while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
608 bio_endio(bi);
609 atomic_dec(&io->pending_stripe);
610 }
611 if (atomic_read(&io->pending_stripe) == 0)
612 __r5l_stripe_write_finished(io);
613 }
614}
615
616static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
617{
618 unsigned long flags;
619
620 spin_lock_irqsave(&log->io_list_lock, flags);
621 __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
622 spin_unlock_irqrestore(&log->io_list_lock, flags);
623
624 if (io->has_flush)
Shaohua Li20737732016-12-13 12:40:15 -0800625 io->current_bio->bi_opf |= REQ_PREFLUSH;
Song Liu3bddb7f2016-11-18 16:46:50 -0800626 if (io->has_fua)
Shaohua Li20737732016-12-13 12:40:15 -0800627 io->current_bio->bi_opf |= REQ_FUA;
Song Liu3bddb7f2016-11-18 16:46:50 -0800628 submit_bio(io->current_bio);
629
630 if (!io->split_bio)
631 return;
632
633 if (io->has_flush)
Shaohua Li20737732016-12-13 12:40:15 -0800634 io->split_bio->bi_opf |= REQ_PREFLUSH;
Song Liu3bddb7f2016-11-18 16:46:50 -0800635 if (io->has_fua)
Shaohua Li20737732016-12-13 12:40:15 -0800636 io->split_bio->bi_opf |= REQ_FUA;
Song Liu3bddb7f2016-11-18 16:46:50 -0800637 submit_bio(io->split_bio);
638}
639
640/* deferred io_unit will be dispatched here */
641static void r5l_submit_io_async(struct work_struct *work)
642{
643 struct r5l_log *log = container_of(work, struct r5l_log,
644 deferred_io_work);
645 struct r5l_io_unit *io = NULL;
646 unsigned long flags;
647
648 spin_lock_irqsave(&log->io_list_lock, flags);
649 if (!list_empty(&log->running_ios)) {
650 io = list_first_entry(&log->running_ios, struct r5l_io_unit,
651 log_sibling);
652 if (!io->io_deferred)
653 io = NULL;
654 else
655 io->io_deferred = 0;
656 }
657 spin_unlock_irqrestore(&log->io_list_lock, flags);
658 if (io)
659 r5l_do_submit_io(log, io);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700660}
661
Song Liu2e38a372017-01-24 10:45:30 -0800662static void r5c_disable_writeback_async(struct work_struct *work)
663{
664 struct r5l_log *log = container_of(work, struct r5l_log,
665 disable_writeback_work);
666 struct mddev *mddev = log->rdev->mddev;
667
668 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
669 return;
670 pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n",
671 mdname(mddev));
672 mddev_suspend(mddev);
673 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
674 mddev_resume(mddev);
675}
676
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700677static void r5l_submit_current_io(struct r5l_log *log)
678{
679 struct r5l_io_unit *io = log->current_io;
Song Liu3bddb7f2016-11-18 16:46:50 -0800680 struct bio *bio;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700681 struct r5l_meta_block *block;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700682 unsigned long flags;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700683 u32 crc;
Song Liu3bddb7f2016-11-18 16:46:50 -0800684 bool do_submit = true;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700685
686 if (!io)
687 return;
688
689 block = page_address(io->meta_page);
690 block->meta_size = cpu_to_le32(io->meta_offset);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700691 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700692 block->checksum = cpu_to_le32(crc);
Song Liu3bddb7f2016-11-18 16:46:50 -0800693 bio = io->current_bio;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700694
695 log->current_io = NULL;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700696 spin_lock_irqsave(&log->io_list_lock, flags);
Song Liu3bddb7f2016-11-18 16:46:50 -0800697 if (io->has_flush || io->has_fua) {
698 if (io != list_first_entry(&log->running_ios,
699 struct r5l_io_unit, log_sibling)) {
700 io->io_deferred = 1;
701 do_submit = false;
702 }
703 }
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700704 spin_unlock_irqrestore(&log->io_list_lock, flags);
Song Liu3bddb7f2016-11-18 16:46:50 -0800705 if (do_submit)
706 r5l_do_submit_io(log, io);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700707}
708
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200709static struct bio *r5l_bio_alloc(struct r5l_log *log)
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200710{
Christoph Hellwigc38d29b2015-12-21 10:51:02 +1100711 struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200712
Mike Christie796a5cf2016-06-05 14:32:07 -0500713 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200714 bio->bi_bdev = log->rdev->bdev;
Christoph Hellwig1e932a32015-10-05 09:31:12 +0200715 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200716
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200717 return bio;
718}
719
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200720static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
721{
722 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
723
Song Liua39f7af2016-11-17 15:24:40 -0800724 r5c_update_log_state(log);
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200725 /*
726 * If we filled up the log device start from the beginning again,
727 * which will require a new bio.
728 *
729 * Note: for this to work properly the log size needs to me a multiple
730 * of BLOCK_SECTORS.
731 */
732 if (log->log_start == 0)
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200733 io->need_split_bio = true;
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200734
735 io->log_end = log->log_start;
736}
737
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700738static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
739{
740 struct r5l_io_unit *io;
741 struct r5l_meta_block *block;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700742
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100743 io = mempool_alloc(log->io_pool, GFP_ATOMIC);
744 if (!io)
745 return NULL;
746 memset(io, 0, sizeof(*io));
747
Christoph Hellwig51039cd2015-10-05 09:31:13 +0200748 io->log = log;
Christoph Hellwig51039cd2015-10-05 09:31:13 +0200749 INIT_LIST_HEAD(&io->log_sibling);
750 INIT_LIST_HEAD(&io->stripe_list);
Song Liu3bddb7f2016-11-18 16:46:50 -0800751 bio_list_init(&io->flush_barriers);
Christoph Hellwig51039cd2015-10-05 09:31:13 +0200752 io->state = IO_UNIT_RUNNING;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700753
Christoph Hellwige8deb632015-12-21 10:51:02 +1100754 io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700755 block = page_address(io->meta_page);
Christoph Hellwige8deb632015-12-21 10:51:02 +1100756 clear_page(block);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700757 block->magic = cpu_to_le32(R5LOG_MAGIC);
758 block->version = R5LOG_VERSION;
759 block->seq = cpu_to_le64(log->seq);
760 block->position = cpu_to_le64(log->log_start);
761
762 io->log_start = log->log_start;
763 io->meta_offset = sizeof(struct r5l_meta_block);
Christoph Hellwig2b8ef162015-10-05 09:31:15 +0200764 io->seq = log->seq++;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700765
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200766 io->current_bio = r5l_bio_alloc(log);
767 io->current_bio->bi_end_io = r5l_log_endio;
768 io->current_bio->bi_private = io;
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200769 bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700770
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200771 r5_reserve_log_entry(log, io);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700772
773 spin_lock_irq(&log->io_list_lock);
774 list_add_tail(&io->log_sibling, &log->running_ios);
775 spin_unlock_irq(&log->io_list_lock);
776
777 return io;
778}
779
780static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
781{
Christoph Hellwig22581f52015-10-05 09:31:10 +0200782 if (log->current_io &&
783 log->current_io->meta_offset + payload_size > PAGE_SIZE)
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700784 r5l_submit_current_io(log);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700785
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100786 if (!log->current_io) {
Christoph Hellwig22581f52015-10-05 09:31:10 +0200787 log->current_io = r5l_new_meta(log);
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100788 if (!log->current_io)
789 return -ENOMEM;
790 }
791
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700792 return 0;
793}
794
795static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
796 sector_t location,
797 u32 checksum1, u32 checksum2,
798 bool checksum2_valid)
799{
800 struct r5l_io_unit *io = log->current_io;
801 struct r5l_payload_data_parity *payload;
802
803 payload = page_address(io->meta_page) + io->meta_offset;
804 payload->header.type = cpu_to_le16(type);
805 payload->header.flags = cpu_to_le16(0);
806 payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
807 (PAGE_SHIFT - 9));
808 payload->location = cpu_to_le64(location);
809 payload->checksum[0] = cpu_to_le32(checksum1);
810 if (checksum2_valid)
811 payload->checksum[1] = cpu_to_le32(checksum2);
812
813 io->meta_offset += sizeof(struct r5l_payload_data_parity) +
814 sizeof(__le32) * (1 + !!checksum2_valid);
815}
816
817static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
818{
819 struct r5l_io_unit *io = log->current_io;
820
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200821 if (io->need_split_bio) {
Song Liu3bddb7f2016-11-18 16:46:50 -0800822 BUG_ON(io->split_bio);
823 io->split_bio = io->current_bio;
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200824 io->current_bio = r5l_bio_alloc(log);
Song Liu3bddb7f2016-11-18 16:46:50 -0800825 bio_chain(io->current_bio, io->split_bio);
826 io->need_split_bio = false;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700827 }
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700828
Christoph Hellwig6143e2c2015-10-05 09:31:16 +0200829 if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
830 BUG();
831
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200832 r5_reserve_log_entry(log, io);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700833}
834
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100835static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700836 int data_pages, int parity_pages)
837{
838 int i;
839 int meta_size;
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100840 int ret;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700841 struct r5l_io_unit *io;
842
843 meta_size =
844 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
845 * data_pages) +
846 sizeof(struct r5l_payload_data_parity) +
847 sizeof(__le32) * parity_pages;
848
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100849 ret = r5l_get_meta(log, meta_size);
850 if (ret)
851 return ret;
852
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700853 io = log->current_io;
854
Song Liu3bddb7f2016-11-18 16:46:50 -0800855 if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state))
856 io->has_flush = 1;
857
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700858 for (i = 0; i < sh->disks; i++) {
Song Liu1e6d6902016-11-17 15:24:39 -0800859 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
860 test_bit(R5_InJournal, &sh->dev[i].flags))
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700861 continue;
862 if (i == sh->pd_idx || i == sh->qd_idx)
863 continue;
Song Liu3bddb7f2016-11-18 16:46:50 -0800864 if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
865 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
866 io->has_fua = 1;
867 /*
868 * we need to flush journal to make sure recovery can
869 * reach the data with fua flag
870 */
871 io->has_flush = 1;
872 }
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700873 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
874 raid5_compute_blocknr(sh, i, 0),
875 sh->dev[i].log_checksum, 0, false);
876 r5l_append_payload_page(log, sh->dev[i].page);
877 }
878
Song Liu2ded3702016-11-17 15:24:38 -0800879 if (parity_pages == 2) {
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700880 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
881 sh->sector, sh->dev[sh->pd_idx].log_checksum,
882 sh->dev[sh->qd_idx].log_checksum, true);
883 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
884 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
Song Liu2ded3702016-11-17 15:24:38 -0800885 } else if (parity_pages == 1) {
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700886 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
887 sh->sector, sh->dev[sh->pd_idx].log_checksum,
888 0, false);
889 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
Song Liu2ded3702016-11-17 15:24:38 -0800890 } else /* Just writing data, not parity, in caching phase */
891 BUG_ON(parity_pages != 0);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700892
893 list_add_tail(&sh->log_list, &io->stripe_list);
894 atomic_inc(&io->pending_stripe);
895 sh->log_io = io;
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100896
Song Liua39f7af2016-11-17 15:24:40 -0800897 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
898 return 0;
899
900 if (sh->log_start == MaxSector) {
901 BUG_ON(!list_empty(&sh->r5c));
902 sh->log_start = io->log_start;
903 spin_lock_irq(&log->stripe_in_journal_lock);
904 list_add_tail(&sh->r5c,
905 &log->stripe_in_journal_list);
906 spin_unlock_irq(&log->stripe_in_journal_lock);
907 atomic_inc(&log->stripe_in_journal_count);
908 }
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100909 return 0;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700910}
911
Song Liua39f7af2016-11-17 15:24:40 -0800912/* add stripe to no_space_stripes, and then wake up reclaim */
913static inline void r5l_add_no_space_stripe(struct r5l_log *log,
914 struct stripe_head *sh)
915{
916 spin_lock(&log->no_space_stripes_lock);
917 list_add_tail(&sh->log_list, &log->no_space_stripes);
918 spin_unlock(&log->no_space_stripes_lock);
919}
920
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700921/*
922 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
923 * data from log to raid disks), so we shouldn't wait for reclaim here
924 */
925int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
926{
Song Liua39f7af2016-11-17 15:24:40 -0800927 struct r5conf *conf = sh->raid_conf;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700928 int write_disks = 0;
929 int data_pages, parity_pages;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700930 int reserve;
931 int i;
Christoph Hellwig5036c3902015-12-21 10:51:02 +1100932 int ret = 0;
Song Liua39f7af2016-11-17 15:24:40 -0800933 bool wake_reclaim = false;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700934
935 if (!log)
936 return -EAGAIN;
937 /* Don't support stripe batch */
938 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
939 test_bit(STRIPE_SYNCING, &sh->state)) {
940 /* the stripe is written to log, we start writing it to raid */
941 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
942 return -EAGAIN;
943 }
944
Song Liu2ded3702016-11-17 15:24:38 -0800945 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
946
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700947 for (i = 0; i < sh->disks; i++) {
948 void *addr;
949
Song Liu1e6d6902016-11-17 15:24:39 -0800950 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
951 test_bit(R5_InJournal, &sh->dev[i].flags))
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700952 continue;
Song Liu1e6d6902016-11-17 15:24:39 -0800953
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700954 write_disks++;
955 /* checksum is already calculated in last run */
956 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
957 continue;
958 addr = kmap_atomic(sh->dev[i].page);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700959 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
960 addr, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700961 kunmap_atomic(addr);
962 }
963 parity_pages = 1 + !!(sh->qd_idx >= 0);
964 data_pages = write_disks - parity_pages;
965
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700966 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
Shaohua Li253f9fd42015-09-04 14:14:16 -0700967 /*
968 * The stripe must enter state machine again to finish the write, so
969 * don't delay.
970 */
971 clear_bit(STRIPE_DELAYED, &sh->state);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700972 atomic_inc(&sh->count);
973
974 mutex_lock(&log->io_mutex);
975 /* meta + data */
976 reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700977
Song Liua39f7af2016-11-17 15:24:40 -0800978 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
979 if (!r5l_has_free_space(log, reserve)) {
980 r5l_add_no_space_stripe(log, sh);
981 wake_reclaim = true;
982 } else {
983 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
984 if (ret) {
985 spin_lock_irq(&log->io_list_lock);
986 list_add_tail(&sh->log_list,
987 &log->no_mem_stripes);
988 spin_unlock_irq(&log->io_list_lock);
989 }
990 }
991 } else { /* R5C_JOURNAL_MODE_WRITE_BACK */
992 /*
993 * log space critical, do not process stripes that are
994 * not in cache yet (sh->log_start == MaxSector).
995 */
996 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
997 sh->log_start == MaxSector) {
998 r5l_add_no_space_stripe(log, sh);
999 wake_reclaim = true;
1000 reserve = 0;
1001 } else if (!r5l_has_free_space(log, reserve)) {
1002 if (sh->log_start == log->last_checkpoint)
1003 BUG();
1004 else
1005 r5l_add_no_space_stripe(log, sh);
1006 } else {
1007 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1008 if (ret) {
1009 spin_lock_irq(&log->io_list_lock);
1010 list_add_tail(&sh->log_list,
1011 &log->no_mem_stripes);
1012 spin_unlock_irq(&log->io_list_lock);
1013 }
Christoph Hellwig5036c3902015-12-21 10:51:02 +11001014 }
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001015 }
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001016
Christoph Hellwig5036c3902015-12-21 10:51:02 +11001017 mutex_unlock(&log->io_mutex);
Song Liua39f7af2016-11-17 15:24:40 -08001018 if (wake_reclaim)
1019 r5l_wake_reclaim(log, reserve);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001020 return 0;
1021}
1022
1023void r5l_write_stripe_run(struct r5l_log *log)
1024{
1025 if (!log)
1026 return;
1027 mutex_lock(&log->io_mutex);
1028 r5l_submit_current_io(log);
1029 mutex_unlock(&log->io_mutex);
1030}
1031
Shaohua Li828cbe92015-09-02 13:49:49 -07001032int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
1033{
1034 if (!log)
1035 return -ENODEV;
Song Liu3bddb7f2016-11-18 16:46:50 -08001036
1037 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1038 /*
1039 * in write through (journal only)
1040 * we flush log disk cache first, then write stripe data to
1041 * raid disks. So if bio is finished, the log disk cache is
1042 * flushed already. The recovery guarantees we can recovery
1043 * the bio from log disk, so we don't need to flush again
1044 */
1045 if (bio->bi_iter.bi_size == 0) {
1046 bio_endio(bio);
1047 return 0;
1048 }
1049 bio->bi_opf &= ~REQ_PREFLUSH;
1050 } else {
1051 /* write back (with cache) */
1052 if (bio->bi_iter.bi_size == 0) {
1053 mutex_lock(&log->io_mutex);
1054 r5l_get_meta(log, 0);
1055 bio_list_add(&log->current_io->flush_barriers, bio);
1056 log->current_io->has_flush = 1;
1057 log->current_io->has_null_flush = 1;
1058 atomic_inc(&log->current_io->pending_stripe);
1059 r5l_submit_current_io(log);
1060 mutex_unlock(&log->io_mutex);
1061 return 0;
1062 }
Shaohua Li828cbe92015-09-02 13:49:49 -07001063 }
Shaohua Li828cbe92015-09-02 13:49:49 -07001064 return -EAGAIN;
1065}
1066
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001067/* This will run after log space is reclaimed */
1068static void r5l_run_no_space_stripes(struct r5l_log *log)
1069{
1070 struct stripe_head *sh;
1071
1072 spin_lock(&log->no_space_stripes_lock);
1073 while (!list_empty(&log->no_space_stripes)) {
1074 sh = list_first_entry(&log->no_space_stripes,
1075 struct stripe_head, log_list);
1076 list_del_init(&sh->log_list);
1077 set_bit(STRIPE_HANDLE, &sh->state);
1078 raid5_release_stripe(sh);
1079 }
1080 spin_unlock(&log->no_space_stripes_lock);
1081}
1082
Song Liua39f7af2016-11-17 15:24:40 -08001083/*
1084 * calculate new last_checkpoint
1085 * for write through mode, returns log->next_checkpoint
1086 * for write back, returns log_start of first sh in stripe_in_journal_list
1087 */
1088static sector_t r5c_calculate_new_cp(struct r5conf *conf)
1089{
1090 struct stripe_head *sh;
1091 struct r5l_log *log = conf->log;
1092 sector_t new_cp;
1093 unsigned long flags;
1094
1095 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
1096 return log->next_checkpoint;
1097
1098 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1099 if (list_empty(&conf->log->stripe_in_journal_list)) {
1100 /* all stripes flushed */
Dan Carpenterd3014e22016-11-24 14:13:04 +03001101 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
Song Liua39f7af2016-11-17 15:24:40 -08001102 return log->next_checkpoint;
1103 }
1104 sh = list_first_entry(&conf->log->stripe_in_journal_list,
1105 struct stripe_head, r5c);
1106 new_cp = sh->log_start;
1107 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1108 return new_cp;
1109}
1110
Christoph Hellwig17036462015-10-05 09:31:06 +02001111static sector_t r5l_reclaimable_space(struct r5l_log *log)
1112{
Song Liua39f7af2016-11-17 15:24:40 -08001113 struct r5conf *conf = log->rdev->mddev->private;
1114
Christoph Hellwig17036462015-10-05 09:31:06 +02001115 return r5l_ring_distance(log, log->last_checkpoint,
Song Liua39f7af2016-11-17 15:24:40 -08001116 r5c_calculate_new_cp(conf));
Christoph Hellwig17036462015-10-05 09:31:06 +02001117}
1118
Christoph Hellwig5036c3902015-12-21 10:51:02 +11001119static void r5l_run_no_mem_stripe(struct r5l_log *log)
1120{
1121 struct stripe_head *sh;
1122
1123 assert_spin_locked(&log->io_list_lock);
1124
1125 if (!list_empty(&log->no_mem_stripes)) {
1126 sh = list_first_entry(&log->no_mem_stripes,
1127 struct stripe_head, log_list);
1128 list_del_init(&sh->log_list);
1129 set_bit(STRIPE_HANDLE, &sh->state);
1130 raid5_release_stripe(sh);
1131 }
1132}
1133
Christoph Hellwig04732f72015-10-05 09:31:07 +02001134static bool r5l_complete_finished_ios(struct r5l_log *log)
Christoph Hellwig17036462015-10-05 09:31:06 +02001135{
1136 struct r5l_io_unit *io, *next;
1137 bool found = false;
1138
1139 assert_spin_locked(&log->io_list_lock);
1140
Christoph Hellwig04732f72015-10-05 09:31:07 +02001141 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
Christoph Hellwig17036462015-10-05 09:31:06 +02001142 /* don't change list order */
1143 if (io->state < IO_UNIT_STRIPE_END)
1144 break;
1145
1146 log->next_checkpoint = io->log_start;
Christoph Hellwig17036462015-10-05 09:31:06 +02001147
1148 list_del(&io->log_sibling);
Christoph Hellwig5036c3902015-12-21 10:51:02 +11001149 mempool_free(io, log->io_pool);
1150 r5l_run_no_mem_stripe(log);
Christoph Hellwig17036462015-10-05 09:31:06 +02001151
1152 found = true;
1153 }
1154
1155 return found;
1156}
1157
Christoph Hellwig509ffec2015-09-02 13:49:48 -07001158static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
1159{
1160 struct r5l_log *log = io->log;
Song Liua39f7af2016-11-17 15:24:40 -08001161 struct r5conf *conf = log->rdev->mddev->private;
Christoph Hellwig509ffec2015-09-02 13:49:48 -07001162 unsigned long flags;
1163
1164 spin_lock_irqsave(&log->io_list_lock, flags);
1165 __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
Christoph Hellwig17036462015-10-05 09:31:06 +02001166
Christoph Hellwig04732f72015-10-05 09:31:07 +02001167 if (!r5l_complete_finished_ios(log)) {
Shaohua Li85f2f9a2015-09-04 14:14:05 -07001168 spin_unlock_irqrestore(&log->io_list_lock, flags);
1169 return;
1170 }
Christoph Hellwig509ffec2015-09-02 13:49:48 -07001171
Song Liua39f7af2016-11-17 15:24:40 -08001172 if (r5l_reclaimable_space(log) > log->max_free_space ||
1173 test_bit(R5C_LOG_TIGHT, &conf->cache_state))
Christoph Hellwig509ffec2015-09-02 13:49:48 -07001174 r5l_wake_reclaim(log, 0);
1175
Christoph Hellwig509ffec2015-09-02 13:49:48 -07001176 spin_unlock_irqrestore(&log->io_list_lock, flags);
1177 wake_up(&log->iounit_wait);
1178}
1179
Shaohua Li0576b1c2015-08-13 14:32:00 -07001180void r5l_stripe_write_finished(struct stripe_head *sh)
1181{
1182 struct r5l_io_unit *io;
1183
Shaohua Li0576b1c2015-08-13 14:32:00 -07001184 io = sh->log_io;
Shaohua Li0576b1c2015-08-13 14:32:00 -07001185 sh->log_io = NULL;
1186
Christoph Hellwig509ffec2015-09-02 13:49:48 -07001187 if (io && atomic_dec_and_test(&io->pending_stripe))
1188 __r5l_stripe_write_finished(io);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001189}
1190
Shaohua Lia8c34f92015-09-02 13:49:46 -07001191static void r5l_log_flush_endio(struct bio *bio)
1192{
1193 struct r5l_log *log = container_of(bio, struct r5l_log,
1194 flush_bio);
1195 unsigned long flags;
1196 struct r5l_io_unit *io;
Shaohua Lia8c34f92015-09-02 13:49:46 -07001197
Shaohua Li6e74a9c2015-10-08 21:54:08 -07001198 if (bio->bi_error)
1199 md_error(log->rdev->mddev, log->rdev);
1200
Shaohua Lia8c34f92015-09-02 13:49:46 -07001201 spin_lock_irqsave(&log->io_list_lock, flags);
Christoph Hellwigd8858f42015-10-05 09:31:08 +02001202 list_for_each_entry(io, &log->flushing_ios, log_sibling)
1203 r5l_io_run_stripes(io);
Christoph Hellwig04732f72015-10-05 09:31:07 +02001204 list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
Shaohua Lia8c34f92015-09-02 13:49:46 -07001205 spin_unlock_irqrestore(&log->io_list_lock, flags);
1206}
1207
Shaohua Li0576b1c2015-08-13 14:32:00 -07001208/*
1209 * Starting dispatch IO to raid.
1210 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
1211 * broken meta in the middle of a log causes recovery can't find meta at the
1212 * head of log. If operations require meta at the head persistent in log, we
1213 * must make sure meta before it persistent in log too. A case is:
1214 *
1215 * stripe data/parity is in log, we start write stripe to raid disks. stripe
1216 * data/parity must be persistent in log before we do the write to raid disks.
1217 *
1218 * The solution is we restrictly maintain io_unit list order. In this case, we
1219 * only write stripes of an io_unit to raid disks till the io_unit is the first
1220 * one whose data/parity is in log.
1221 */
1222void r5l_flush_stripe_to_raid(struct r5l_log *log)
1223{
Shaohua Lia8c34f92015-09-02 13:49:46 -07001224 bool do_flush;
Christoph Hellwig56fef7c2015-10-05 09:31:09 +02001225
1226 if (!log || !log->need_cache_flush)
Shaohua Li0576b1c2015-08-13 14:32:00 -07001227 return;
Shaohua Li0576b1c2015-08-13 14:32:00 -07001228
Shaohua Lia8c34f92015-09-02 13:49:46 -07001229 spin_lock_irq(&log->io_list_lock);
1230 /* flush bio is running */
1231 if (!list_empty(&log->flushing_ios)) {
1232 spin_unlock_irq(&log->io_list_lock);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001233 return;
Shaohua Li0576b1c2015-08-13 14:32:00 -07001234 }
Shaohua Lia8c34f92015-09-02 13:49:46 -07001235 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
1236 do_flush = !list_empty(&log->flushing_ios);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001237 spin_unlock_irq(&log->io_list_lock);
Shaohua Lia8c34f92015-09-02 13:49:46 -07001238
1239 if (!do_flush)
1240 return;
1241 bio_reset(&log->flush_bio);
1242 log->flush_bio.bi_bdev = log->rdev->bdev;
1243 log->flush_bio.bi_end_io = r5l_log_flush_endio;
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001244 log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
Mike Christie4e49ea42016-06-05 14:31:41 -05001245 submit_bio(&log->flush_bio);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001246}
1247
Shaohua Li0576b1c2015-08-13 14:32:00 -07001248static void r5l_write_super(struct r5l_log *log, sector_t cp);
Shaohua Li4b482042015-10-08 21:54:06 -07001249static void r5l_write_super_and_discard_space(struct r5l_log *log,
1250 sector_t end)
1251{
1252 struct block_device *bdev = log->rdev->bdev;
1253 struct mddev *mddev;
1254
1255 r5l_write_super(log, end);
1256
1257 if (!blk_queue_discard(bdev_get_queue(bdev)))
1258 return;
1259
1260 mddev = log->rdev->mddev;
1261 /*
Shaohua Li8e018c22016-08-25 10:09:39 -07001262 * Discard could zero data, so before discard we must make sure
1263 * superblock is updated to new log tail. Updating superblock (either
1264 * directly call md_update_sb() or depend on md thread) must hold
1265 * reconfig mutex. On the other hand, raid5_quiesce is called with
1266 * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
1267 * for all IO finish, hence waitting for reclaim thread, while reclaim
1268 * thread is calling this function and waitting for reconfig mutex. So
1269 * there is a deadlock. We workaround this issue with a trylock.
1270 * FIXME: we could miss discard if we can't take reconfig mutex
Shaohua Li4b482042015-10-08 21:54:06 -07001271 */
Shaohua Li29530792016-12-08 15:48:19 -08001272 set_mask_bits(&mddev->sb_flags, 0,
1273 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
Shaohua Li8e018c22016-08-25 10:09:39 -07001274 if (!mddev_trylock(mddev))
1275 return;
1276 md_update_sb(mddev, 1);
1277 mddev_unlock(mddev);
Shaohua Li4b482042015-10-08 21:54:06 -07001278
Shaohua Li6e74a9c2015-10-08 21:54:08 -07001279 /* discard IO error really doesn't matter, ignore it */
Shaohua Li4b482042015-10-08 21:54:06 -07001280 if (log->last_checkpoint < end) {
1281 blkdev_issue_discard(bdev,
1282 log->last_checkpoint + log->rdev->data_offset,
1283 end - log->last_checkpoint, GFP_NOIO, 0);
1284 } else {
1285 blkdev_issue_discard(bdev,
1286 log->last_checkpoint + log->rdev->data_offset,
1287 log->device_size - log->last_checkpoint,
1288 GFP_NOIO, 0);
1289 blkdev_issue_discard(bdev, log->rdev->data_offset, end,
1290 GFP_NOIO, 0);
1291 }
1292}
1293
Song Liua39f7af2016-11-17 15:24:40 -08001294/*
1295 * r5c_flush_stripe moves stripe from cached list to handle_list. When called,
1296 * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes.
1297 *
1298 * must hold conf->device_lock
1299 */
1300static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
1301{
1302 BUG_ON(list_empty(&sh->lru));
1303 BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
1304 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
1305
1306 /*
1307 * The stripe is not ON_RELEASE_LIST, so it is safe to call
1308 * raid5_release_stripe() while holding conf->device_lock
1309 */
1310 BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
1311 assert_spin_locked(&conf->device_lock);
1312
1313 list_del_init(&sh->lru);
1314 atomic_inc(&sh->count);
1315
1316 set_bit(STRIPE_HANDLE, &sh->state);
1317 atomic_inc(&conf->active_stripes);
1318 r5c_make_stripe_write_out(sh);
1319
Shaohua Lie33fbb92017-02-10 16:18:09 -08001320 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state))
1321 atomic_inc(&conf->r5c_flushing_partial_stripes);
1322 else
1323 atomic_inc(&conf->r5c_flushing_full_stripes);
Song Liua39f7af2016-11-17 15:24:40 -08001324 raid5_release_stripe(sh);
1325}
1326
1327/*
1328 * if num == 0, flush all full stripes
1329 * if num > 0, flush all full stripes. If less than num full stripes are
1330 * flushed, flush some partial stripes until totally num stripes are
1331 * flushed or there is no more cached stripes.
1332 */
1333void r5c_flush_cache(struct r5conf *conf, int num)
1334{
1335 int count;
1336 struct stripe_head *sh, *next;
1337
1338 assert_spin_locked(&conf->device_lock);
1339 if (!conf->log)
1340 return;
1341
1342 count = 0;
1343 list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) {
1344 r5c_flush_stripe(conf, sh);
1345 count++;
1346 }
1347
1348 if (count >= num)
1349 return;
1350 list_for_each_entry_safe(sh, next,
1351 &conf->r5c_partial_stripe_list, lru) {
1352 r5c_flush_stripe(conf, sh);
1353 if (++count >= num)
1354 break;
1355 }
1356}
1357
1358static void r5c_do_reclaim(struct r5conf *conf)
1359{
1360 struct r5l_log *log = conf->log;
1361 struct stripe_head *sh;
1362 int count = 0;
1363 unsigned long flags;
1364 int total_cached;
1365 int stripes_to_flush;
Shaohua Lie33fbb92017-02-10 16:18:09 -08001366 int flushing_partial, flushing_full;
Song Liua39f7af2016-11-17 15:24:40 -08001367
1368 if (!r5c_is_writeback(log))
1369 return;
1370
Shaohua Lie33fbb92017-02-10 16:18:09 -08001371 flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes);
1372 flushing_full = atomic_read(&conf->r5c_flushing_full_stripes);
Song Liua39f7af2016-11-17 15:24:40 -08001373 total_cached = atomic_read(&conf->r5c_cached_partial_stripes) +
Shaohua Lie33fbb92017-02-10 16:18:09 -08001374 atomic_read(&conf->r5c_cached_full_stripes) -
1375 flushing_full - flushing_partial;
Song Liua39f7af2016-11-17 15:24:40 -08001376
1377 if (total_cached > conf->min_nr_stripes * 3 / 4 ||
1378 atomic_read(&conf->empty_inactive_list_nr) > 0)
1379 /*
1380 * if stripe cache pressure high, flush all full stripes and
1381 * some partial stripes
1382 */
1383 stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP;
1384 else if (total_cached > conf->min_nr_stripes * 1 / 2 ||
Shaohua Lie33fbb92017-02-10 16:18:09 -08001385 atomic_read(&conf->r5c_cached_full_stripes) - flushing_full >
Song Liua39f7af2016-11-17 15:24:40 -08001386 R5C_FULL_STRIPE_FLUSH_BATCH)
1387 /*
1388 * if stripe cache pressure moderate, or if there is many full
1389 * stripes,flush all full stripes
1390 */
1391 stripes_to_flush = 0;
1392 else
1393 /* no need to flush */
1394 stripes_to_flush = -1;
1395
1396 if (stripes_to_flush >= 0) {
1397 spin_lock_irqsave(&conf->device_lock, flags);
1398 r5c_flush_cache(conf, stripes_to_flush);
1399 spin_unlock_irqrestore(&conf->device_lock, flags);
1400 }
1401
1402 /* if log space is tight, flush stripes on stripe_in_journal_list */
1403 if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) {
1404 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1405 spin_lock(&conf->device_lock);
1406 list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
1407 /*
1408 * stripes on stripe_in_journal_list could be in any
1409 * state of the stripe_cache state machine. In this
1410 * case, we only want to flush stripe on
1411 * r5c_cached_full/partial_stripes. The following
1412 * condition makes sure the stripe is on one of the
1413 * two lists.
1414 */
1415 if (!list_empty(&sh->lru) &&
1416 !test_bit(STRIPE_HANDLE, &sh->state) &&
1417 atomic_read(&sh->count) == 0) {
1418 r5c_flush_stripe(conf, sh);
Shaohua Lie8fd52e2017-02-10 16:18:08 -08001419 if (count++ >= R5C_RECLAIM_STRIPE_GROUP)
1420 break;
Song Liua39f7af2016-11-17 15:24:40 -08001421 }
Song Liua39f7af2016-11-17 15:24:40 -08001422 }
1423 spin_unlock(&conf->device_lock);
1424 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1425 }
Song Liuf687a332016-11-30 16:57:54 -08001426
1427 if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state))
1428 r5l_run_no_space_stripes(log);
1429
Song Liua39f7af2016-11-17 15:24:40 -08001430 md_wakeup_thread(conf->mddev->thread);
1431}
Shaohua Li4b482042015-10-08 21:54:06 -07001432
Shaohua Li0576b1c2015-08-13 14:32:00 -07001433static void r5l_do_reclaim(struct r5l_log *log)
1434{
Song Liua39f7af2016-11-17 15:24:40 -08001435 struct r5conf *conf = log->rdev->mddev->private;
Shaohua Li0576b1c2015-08-13 14:32:00 -07001436 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
Christoph Hellwig17036462015-10-05 09:31:06 +02001437 sector_t reclaimable;
1438 sector_t next_checkpoint;
Song Liua39f7af2016-11-17 15:24:40 -08001439 bool write_super;
Shaohua Li0576b1c2015-08-13 14:32:00 -07001440
1441 spin_lock_irq(&log->io_list_lock);
Song Liua39f7af2016-11-17 15:24:40 -08001442 write_super = r5l_reclaimable_space(log) > log->max_free_space ||
1443 reclaim_target != 0 || !list_empty(&log->no_space_stripes);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001444 /*
1445 * move proper io_unit to reclaim list. We should not change the order.
1446 * reclaimable/unreclaimable io_unit can be mixed in the list, we
1447 * shouldn't reuse space of an unreclaimable io_unit
1448 */
1449 while (1) {
Christoph Hellwig17036462015-10-05 09:31:06 +02001450 reclaimable = r5l_reclaimable_space(log);
1451 if (reclaimable >= reclaim_target ||
Shaohua Li0576b1c2015-08-13 14:32:00 -07001452 (list_empty(&log->running_ios) &&
1453 list_empty(&log->io_end_ios) &&
Shaohua Lia8c34f92015-09-02 13:49:46 -07001454 list_empty(&log->flushing_ios) &&
Christoph Hellwig04732f72015-10-05 09:31:07 +02001455 list_empty(&log->finished_ios)))
Shaohua Li0576b1c2015-08-13 14:32:00 -07001456 break;
1457
Christoph Hellwig17036462015-10-05 09:31:06 +02001458 md_wakeup_thread(log->rdev->mddev->thread);
1459 wait_event_lock_irq(log->iounit_wait,
1460 r5l_reclaimable_space(log) > reclaimable,
1461 log->io_list_lock);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001462 }
Christoph Hellwig17036462015-10-05 09:31:06 +02001463
Song Liua39f7af2016-11-17 15:24:40 -08001464 next_checkpoint = r5c_calculate_new_cp(conf);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001465 spin_unlock_irq(&log->io_list_lock);
1466
Song Liua39f7af2016-11-17 15:24:40 -08001467 if (reclaimable == 0 || !write_super)
Shaohua Li0576b1c2015-08-13 14:32:00 -07001468 return;
1469
Shaohua Li0576b1c2015-08-13 14:32:00 -07001470 /*
1471 * write_super will flush cache of each raid disk. We must write super
1472 * here, because the log area might be reused soon and we don't want to
1473 * confuse recovery
1474 */
Shaohua Li4b482042015-10-08 21:54:06 -07001475 r5l_write_super_and_discard_space(log, next_checkpoint);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001476
1477 mutex_lock(&log->io_mutex);
Christoph Hellwig17036462015-10-05 09:31:06 +02001478 log->last_checkpoint = next_checkpoint;
Song Liua39f7af2016-11-17 15:24:40 -08001479 r5c_update_log_state(log);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001480 mutex_unlock(&log->io_mutex);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001481
Christoph Hellwig17036462015-10-05 09:31:06 +02001482 r5l_run_no_space_stripes(log);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001483}
1484
1485static void r5l_reclaim_thread(struct md_thread *thread)
1486{
1487 struct mddev *mddev = thread->mddev;
1488 struct r5conf *conf = mddev->private;
1489 struct r5l_log *log = conf->log;
1490
1491 if (!log)
1492 return;
Song Liua39f7af2016-11-17 15:24:40 -08001493 r5c_do_reclaim(conf);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001494 r5l_do_reclaim(log);
1495}
1496
Song Liua39f7af2016-11-17 15:24:40 -08001497void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001498{
Shaohua Li0576b1c2015-08-13 14:32:00 -07001499 unsigned long target;
1500 unsigned long new = (unsigned long)space; /* overflow in theory */
1501
Song Liua39f7af2016-11-17 15:24:40 -08001502 if (!log)
1503 return;
Shaohua Li0576b1c2015-08-13 14:32:00 -07001504 do {
1505 target = log->reclaim_target;
1506 if (new < target)
1507 return;
1508 } while (cmpxchg(&log->reclaim_target, target, new) != target);
1509 md_wakeup_thread(log->reclaim_thread);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001510}
1511
Shaohua Lie6c033f2015-10-04 09:20:12 -07001512void r5l_quiesce(struct r5l_log *log, int state)
1513{
Shaohua Li4b482042015-10-08 21:54:06 -07001514 struct mddev *mddev;
Shaohua Lie6c033f2015-10-04 09:20:12 -07001515 if (!log || state == 2)
1516 return;
Shaohua Lice1ccd02016-11-21 10:29:18 -08001517 if (state == 0)
1518 kthread_unpark(log->reclaim_thread->tsk);
1519 else if (state == 1) {
Shaohua Li4b482042015-10-08 21:54:06 -07001520 /* make sure r5l_write_super_and_discard_space exits */
1521 mddev = log->rdev->mddev;
1522 wake_up(&mddev->sb_wait);
Shaohua Lice1ccd02016-11-21 10:29:18 -08001523 kthread_park(log->reclaim_thread->tsk);
Song Liua39f7af2016-11-17 15:24:40 -08001524 r5l_wake_reclaim(log, MaxSector);
Shaohua Lie6c033f2015-10-04 09:20:12 -07001525 r5l_do_reclaim(log);
1526 }
1527}
1528
Shaohua Li6e74a9c2015-10-08 21:54:08 -07001529bool r5l_log_disk_error(struct r5conf *conf)
1530{
Shaohua Lif6b6ec52015-12-21 10:51:02 +11001531 struct r5l_log *log;
1532 bool ret;
Shaohua Li7dde2ad2015-10-08 21:54:10 -07001533 /* don't allow write if journal disk is missing */
Shaohua Lif6b6ec52015-12-21 10:51:02 +11001534 rcu_read_lock();
1535 log = rcu_dereference(conf->log);
1536
1537 if (!log)
1538 ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
1539 else
1540 ret = test_bit(Faulty, &log->rdev->flags);
1541 rcu_read_unlock();
1542 return ret;
Shaohua Li6e74a9c2015-10-08 21:54:08 -07001543}
1544
Shaohua Li355810d2015-08-13 14:32:01 -07001545struct r5l_recovery_ctx {
1546 struct page *meta_page; /* current meta */
1547 sector_t meta_total_blocks; /* total size of current meta and data */
1548 sector_t pos; /* recovery position */
1549 u64 seq; /* recovery position seq */
Song Liub4c625c2016-11-17 15:24:43 -08001550 int data_parity_stripes; /* number of data_parity stripes */
1551 int data_only_stripes; /* number of data_only stripes */
1552 struct list_head cached_list;
Shaohua Li355810d2015-08-13 14:32:01 -07001553};
1554
Song Liu9ed988f52016-11-17 15:24:42 -08001555static int r5l_recovery_read_meta_block(struct r5l_log *log,
1556 struct r5l_recovery_ctx *ctx)
Shaohua Li355810d2015-08-13 14:32:01 -07001557{
1558 struct page *page = ctx->meta_page;
1559 struct r5l_meta_block *mb;
1560 u32 crc, stored_crc;
1561
Mike Christie796a5cf2016-06-05 14:32:07 -05001562 if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, REQ_OP_READ, 0,
1563 false))
Shaohua Li355810d2015-08-13 14:32:01 -07001564 return -EIO;
1565
1566 mb = page_address(page);
1567 stored_crc = le32_to_cpu(mb->checksum);
1568 mb->checksum = 0;
1569
1570 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1571 le64_to_cpu(mb->seq) != ctx->seq ||
1572 mb->version != R5LOG_VERSION ||
1573 le64_to_cpu(mb->position) != ctx->pos)
1574 return -EINVAL;
1575
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07001576 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Li355810d2015-08-13 14:32:01 -07001577 if (stored_crc != crc)
1578 return -EINVAL;
1579
1580 if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
1581 return -EINVAL;
1582
1583 ctx->meta_total_blocks = BLOCK_SECTORS;
1584
1585 return 0;
1586}
1587
Song Liu9ed988f52016-11-17 15:24:42 -08001588static void
1589r5l_recovery_create_empty_meta_block(struct r5l_log *log,
1590 struct page *page,
1591 sector_t pos, u64 seq)
Shaohua Li355810d2015-08-13 14:32:01 -07001592{
Shaohua Li355810d2015-08-13 14:32:01 -07001593 struct r5l_meta_block *mb;
Shaohua Li355810d2015-08-13 14:32:01 -07001594
Shaohua Li355810d2015-08-13 14:32:01 -07001595 mb = page_address(page);
Song Liu9ed988f52016-11-17 15:24:42 -08001596 clear_page(mb);
Shaohua Li355810d2015-08-13 14:32:01 -07001597 mb->magic = cpu_to_le32(R5LOG_MAGIC);
1598 mb->version = R5LOG_VERSION;
1599 mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
1600 mb->seq = cpu_to_le64(seq);
1601 mb->position = cpu_to_le64(pos);
Shaohua Li355810d2015-08-13 14:32:01 -07001602}
1603
1604static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1605 u64 seq)
1606{
1607 struct page *page;
1608 struct r5l_meta_block *mb;
Shaohua Li355810d2015-08-13 14:32:01 -07001609
Song Liu9ed988f52016-11-17 15:24:42 -08001610 page = alloc_page(GFP_KERNEL);
Shaohua Li355810d2015-08-13 14:32:01 -07001611 if (!page)
1612 return -ENOMEM;
Song Liu9ed988f52016-11-17 15:24:42 -08001613 r5l_recovery_create_empty_meta_block(log, page, pos, seq);
Shaohua Li355810d2015-08-13 14:32:01 -07001614 mb = page_address(page);
Song Liu5c88f402016-12-07 09:42:05 -08001615 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
1616 mb, PAGE_SIZE));
Mike Christie796a5cf2016-06-05 14:32:07 -05001617 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
Christoph Hellwig70fd7612016-11-01 07:40:10 -06001618 REQ_FUA, false)) {
Shaohua Li355810d2015-08-13 14:32:01 -07001619 __free_page(page);
1620 return -EIO;
1621 }
1622 __free_page(page);
1623 return 0;
1624}
1625
Song Liub4c625c2016-11-17 15:24:43 -08001626/*
1627 * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite
1628 * to mark valid (potentially not flushed) data in the journal.
1629 *
1630 * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb,
1631 * so there should not be any mismatch here.
1632 */
1633static void r5l_recovery_load_data(struct r5l_log *log,
1634 struct stripe_head *sh,
1635 struct r5l_recovery_ctx *ctx,
1636 struct r5l_payload_data_parity *payload,
1637 sector_t log_offset)
1638{
1639 struct mddev *mddev = log->rdev->mddev;
1640 struct r5conf *conf = mddev->private;
1641 int dd_idx;
1642
1643 raid5_compute_sector(conf,
1644 le64_to_cpu(payload->location), 0,
1645 &dd_idx, sh);
1646 sync_page_io(log->rdev, log_offset, PAGE_SIZE,
1647 sh->dev[dd_idx].page, REQ_OP_READ, 0, false);
1648 sh->dev[dd_idx].log_checksum =
1649 le32_to_cpu(payload->checksum[0]);
1650 ctx->meta_total_blocks += BLOCK_SECTORS;
1651
1652 set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
1653 set_bit(STRIPE_R5C_CACHING, &sh->state);
1654}
1655
1656static void r5l_recovery_load_parity(struct r5l_log *log,
1657 struct stripe_head *sh,
1658 struct r5l_recovery_ctx *ctx,
1659 struct r5l_payload_data_parity *payload,
1660 sector_t log_offset)
1661{
1662 struct mddev *mddev = log->rdev->mddev;
1663 struct r5conf *conf = mddev->private;
1664
1665 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
1666 sync_page_io(log->rdev, log_offset, PAGE_SIZE,
1667 sh->dev[sh->pd_idx].page, REQ_OP_READ, 0, false);
1668 sh->dev[sh->pd_idx].log_checksum =
1669 le32_to_cpu(payload->checksum[0]);
1670 set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
1671
1672 if (sh->qd_idx >= 0) {
1673 sync_page_io(log->rdev,
1674 r5l_ring_add(log, log_offset, BLOCK_SECTORS),
1675 PAGE_SIZE, sh->dev[sh->qd_idx].page,
1676 REQ_OP_READ, 0, false);
1677 sh->dev[sh->qd_idx].log_checksum =
1678 le32_to_cpu(payload->checksum[1]);
1679 set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
1680 }
1681 clear_bit(STRIPE_R5C_CACHING, &sh->state);
1682}
1683
1684static void r5l_recovery_reset_stripe(struct stripe_head *sh)
1685{
1686 int i;
1687
1688 sh->state = 0;
1689 sh->log_start = MaxSector;
1690 for (i = sh->disks; i--; )
1691 sh->dev[i].flags = 0;
1692}
1693
1694static void
1695r5l_recovery_replay_one_stripe(struct r5conf *conf,
1696 struct stripe_head *sh,
1697 struct r5l_recovery_ctx *ctx)
1698{
1699 struct md_rdev *rdev, *rrdev;
1700 int disk_index;
1701 int data_count = 0;
1702
1703 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1704 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1705 continue;
1706 if (disk_index == sh->qd_idx || disk_index == sh->pd_idx)
1707 continue;
1708 data_count++;
1709 }
1710
1711 /*
1712 * stripes that only have parity must have been flushed
1713 * before the crash that we are now recovering from, so
1714 * there is nothing more to recovery.
1715 */
1716 if (data_count == 0)
1717 goto out;
1718
1719 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
1720 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1721 continue;
1722
1723 /* in case device is broken */
1724 rcu_read_lock();
1725 rdev = rcu_dereference(conf->disks[disk_index].rdev);
1726 if (rdev) {
1727 atomic_inc(&rdev->nr_pending);
1728 rcu_read_unlock();
1729 sync_page_io(rdev, sh->sector, PAGE_SIZE,
1730 sh->dev[disk_index].page, REQ_OP_WRITE, 0,
1731 false);
1732 rdev_dec_pending(rdev, rdev->mddev);
1733 rcu_read_lock();
1734 }
1735 rrdev = rcu_dereference(conf->disks[disk_index].replacement);
1736 if (rrdev) {
1737 atomic_inc(&rrdev->nr_pending);
1738 rcu_read_unlock();
1739 sync_page_io(rrdev, sh->sector, PAGE_SIZE,
1740 sh->dev[disk_index].page, REQ_OP_WRITE, 0,
1741 false);
1742 rdev_dec_pending(rrdev, rrdev->mddev);
1743 rcu_read_lock();
1744 }
1745 rcu_read_unlock();
1746 }
1747 ctx->data_parity_stripes++;
1748out:
1749 r5l_recovery_reset_stripe(sh);
1750}
1751
1752static struct stripe_head *
1753r5c_recovery_alloc_stripe(struct r5conf *conf,
Song Liu3c66abb2016-12-14 15:38:01 -08001754 sector_t stripe_sect)
Song Liub4c625c2016-11-17 15:24:43 -08001755{
1756 struct stripe_head *sh;
1757
1758 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
1759 if (!sh)
1760 return NULL; /* no more stripe available */
1761
1762 r5l_recovery_reset_stripe(sh);
Song Liub4c625c2016-11-17 15:24:43 -08001763
1764 return sh;
1765}
1766
1767static struct stripe_head *
1768r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect)
1769{
1770 struct stripe_head *sh;
1771
1772 list_for_each_entry(sh, list, lru)
1773 if (sh->sector == sect)
1774 return sh;
1775 return NULL;
1776}
1777
1778static void
1779r5c_recovery_drop_stripes(struct list_head *cached_stripe_list,
1780 struct r5l_recovery_ctx *ctx)
1781{
1782 struct stripe_head *sh, *next;
1783
1784 list_for_each_entry_safe(sh, next, cached_stripe_list, lru) {
1785 r5l_recovery_reset_stripe(sh);
1786 list_del_init(&sh->lru);
1787 raid5_release_stripe(sh);
1788 }
1789}
1790
1791static void
1792r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
1793 struct r5l_recovery_ctx *ctx)
1794{
1795 struct stripe_head *sh, *next;
1796
1797 list_for_each_entry_safe(sh, next, cached_stripe_list, lru)
1798 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
1799 r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
1800 list_del_init(&sh->lru);
1801 raid5_release_stripe(sh);
1802 }
1803}
1804
1805/* if matches return 0; otherwise return -EINVAL */
1806static int
1807r5l_recovery_verify_data_checksum(struct r5l_log *log, struct page *page,
1808 sector_t log_offset, __le32 log_checksum)
1809{
1810 void *addr;
1811 u32 checksum;
1812
1813 sync_page_io(log->rdev, log_offset, PAGE_SIZE,
1814 page, REQ_OP_READ, 0, false);
1815 addr = kmap_atomic(page);
1816 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
1817 kunmap_atomic(addr);
1818 return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL;
1819}
1820
1821/*
1822 * before loading data to stripe cache, we need verify checksum for all data,
1823 * if there is mismatch for any data page, we drop all data in the mata block
1824 */
1825static int
1826r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
1827 struct r5l_recovery_ctx *ctx)
1828{
1829 struct mddev *mddev = log->rdev->mddev;
1830 struct r5conf *conf = mddev->private;
1831 struct r5l_meta_block *mb = page_address(ctx->meta_page);
1832 sector_t mb_offset = sizeof(struct r5l_meta_block);
1833 sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
1834 struct page *page;
1835 struct r5l_payload_data_parity *payload;
1836
1837 page = alloc_page(GFP_KERNEL);
1838 if (!page)
1839 return -ENOMEM;
1840
1841 while (mb_offset < le32_to_cpu(mb->meta_size)) {
1842 payload = (void *)mb + mb_offset;
1843
1844 if (payload->header.type == R5LOG_PAYLOAD_DATA) {
1845 if (r5l_recovery_verify_data_checksum(
1846 log, page, log_offset,
1847 payload->checksum[0]) < 0)
1848 goto mismatch;
1849 } else if (payload->header.type == R5LOG_PAYLOAD_PARITY) {
1850 if (r5l_recovery_verify_data_checksum(
1851 log, page, log_offset,
1852 payload->checksum[0]) < 0)
1853 goto mismatch;
1854 if (conf->max_degraded == 2 && /* q for RAID 6 */
1855 r5l_recovery_verify_data_checksum(
1856 log, page,
1857 r5l_ring_add(log, log_offset,
1858 BLOCK_SECTORS),
1859 payload->checksum[1]) < 0)
1860 goto mismatch;
1861 } else /* not R5LOG_PAYLOAD_DATA or R5LOG_PAYLOAD_PARITY */
1862 goto mismatch;
1863
1864 log_offset = r5l_ring_add(log, log_offset,
1865 le32_to_cpu(payload->size));
1866
1867 mb_offset += sizeof(struct r5l_payload_data_parity) +
1868 sizeof(__le32) *
1869 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
1870 }
1871
1872 put_page(page);
1873 return 0;
1874
1875mismatch:
1876 put_page(page);
1877 return -EINVAL;
1878}
1879
1880/*
1881 * Analyze all data/parity pages in one meta block
1882 * Returns:
1883 * 0 for success
1884 * -EINVAL for unknown playload type
1885 * -EAGAIN for checksum mismatch of data page
1886 * -ENOMEM for run out of memory (alloc_page failed or run out of stripes)
1887 */
1888static int
1889r5c_recovery_analyze_meta_block(struct r5l_log *log,
1890 struct r5l_recovery_ctx *ctx,
1891 struct list_head *cached_stripe_list)
1892{
1893 struct mddev *mddev = log->rdev->mddev;
1894 struct r5conf *conf = mddev->private;
1895 struct r5l_meta_block *mb;
1896 struct r5l_payload_data_parity *payload;
1897 int mb_offset;
1898 sector_t log_offset;
1899 sector_t stripe_sect;
1900 struct stripe_head *sh;
1901 int ret;
1902
1903 /*
1904 * for mismatch in data blocks, we will drop all data in this mb, but
1905 * we will still read next mb for other data with FLUSH flag, as
1906 * io_unit could finish out of order.
1907 */
1908 ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
1909 if (ret == -EINVAL)
1910 return -EAGAIN;
1911 else if (ret)
1912 return ret; /* -ENOMEM duo to alloc_page() failed */
1913
1914 mb = page_address(ctx->meta_page);
1915 mb_offset = sizeof(struct r5l_meta_block);
1916 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
1917
1918 while (mb_offset < le32_to_cpu(mb->meta_size)) {
1919 int dd;
1920
1921 payload = (void *)mb + mb_offset;
1922 stripe_sect = (payload->header.type == R5LOG_PAYLOAD_DATA) ?
1923 raid5_compute_sector(
1924 conf, le64_to_cpu(payload->location), 0, &dd,
1925 NULL)
1926 : le64_to_cpu(payload->location);
1927
1928 sh = r5c_recovery_lookup_stripe(cached_stripe_list,
1929 stripe_sect);
1930
1931 if (!sh) {
Song Liu3c66abb2016-12-14 15:38:01 -08001932 sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
Song Liub4c625c2016-11-17 15:24:43 -08001933 /*
1934 * cannot get stripe from raid5_get_active_stripe
1935 * try replay some stripes
1936 */
1937 if (!sh) {
1938 r5c_recovery_replay_stripes(
1939 cached_stripe_list, ctx);
1940 sh = r5c_recovery_alloc_stripe(
Song Liu3c66abb2016-12-14 15:38:01 -08001941 conf, stripe_sect);
Song Liub4c625c2016-11-17 15:24:43 -08001942 }
1943 if (!sh) {
1944 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
1945 mdname(mddev),
1946 conf->min_nr_stripes * 2);
1947 raid5_set_cache_size(mddev,
1948 conf->min_nr_stripes * 2);
Song Liu3c66abb2016-12-14 15:38:01 -08001949 sh = r5c_recovery_alloc_stripe(conf,
1950 stripe_sect);
Song Liub4c625c2016-11-17 15:24:43 -08001951 }
1952 if (!sh) {
1953 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
1954 mdname(mddev));
1955 return -ENOMEM;
1956 }
1957 list_add_tail(&sh->lru, cached_stripe_list);
1958 }
1959
1960 if (payload->header.type == R5LOG_PAYLOAD_DATA) {
Zhengyuan Liuf7b7bee2016-11-26 10:57:13 +08001961 if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
1962 test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
Song Liub4c625c2016-11-17 15:24:43 -08001963 r5l_recovery_replay_one_stripe(conf, sh, ctx);
Song Liub4c625c2016-11-17 15:24:43 -08001964 list_move_tail(&sh->lru, cached_stripe_list);
1965 }
1966 r5l_recovery_load_data(log, sh, ctx, payload,
1967 log_offset);
1968 } else if (payload->header.type == R5LOG_PAYLOAD_PARITY)
1969 r5l_recovery_load_parity(log, sh, ctx, payload,
1970 log_offset);
1971 else
1972 return -EINVAL;
1973
1974 log_offset = r5l_ring_add(log, log_offset,
1975 le32_to_cpu(payload->size));
1976
1977 mb_offset += sizeof(struct r5l_payload_data_parity) +
1978 sizeof(__le32) *
1979 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
1980 }
1981
1982 return 0;
1983}
1984
1985/*
1986 * Load the stripe into cache. The stripe will be written out later by
1987 * the stripe cache state machine.
1988 */
1989static void r5c_recovery_load_one_stripe(struct r5l_log *log,
1990 struct stripe_head *sh)
1991{
Song Liub4c625c2016-11-17 15:24:43 -08001992 struct r5dev *dev;
1993 int i;
1994
1995 for (i = sh->disks; i--; ) {
1996 dev = sh->dev + i;
1997 if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
1998 set_bit(R5_InJournal, &dev->flags);
1999 set_bit(R5_UPTODATE, &dev->flags);
2000 }
2001 }
Song Liub4c625c2016-11-17 15:24:43 -08002002}
2003
2004/*
2005 * Scan through the log for all to-be-flushed data
2006 *
2007 * For stripes with data and parity, namely Data-Parity stripe
2008 * (STRIPE_R5C_CACHING == 0), we simply replay all the writes.
2009 *
2010 * For stripes with only data, namely Data-Only stripe
2011 * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine.
2012 *
2013 * For a stripe, if we see data after parity, we should discard all previous
2014 * data and parity for this stripe, as these data are already flushed to
2015 * the array.
2016 *
2017 * At the end of the scan, we return the new journal_tail, which points to
2018 * first data-only stripe on the journal device, or next invalid meta block.
2019 */
2020static int r5c_recovery_flush_log(struct r5l_log *log,
2021 struct r5l_recovery_ctx *ctx)
2022{
JackieLiubc8f1672016-11-28 16:19:20 +08002023 struct stripe_head *sh;
Song Liub4c625c2016-11-17 15:24:43 -08002024 int ret = 0;
2025
2026 /* scan through the log */
2027 while (1) {
2028 if (r5l_recovery_read_meta_block(log, ctx))
2029 break;
2030
2031 ret = r5c_recovery_analyze_meta_block(log, ctx,
2032 &ctx->cached_list);
2033 /*
2034 * -EAGAIN means mismatch in data block, in this case, we still
2035 * try scan the next metablock
2036 */
2037 if (ret && ret != -EAGAIN)
2038 break; /* ret == -EINVAL or -ENOMEM */
2039 ctx->seq++;
2040 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
2041 }
2042
2043 if (ret == -ENOMEM) {
2044 r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
2045 return ret;
2046 }
2047
2048 /* replay data-parity stripes */
2049 r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
2050
2051 /* load data-only stripes to stripe cache */
JackieLiubc8f1672016-11-28 16:19:20 +08002052 list_for_each_entry(sh, &ctx->cached_list, lru) {
Song Liub4c625c2016-11-17 15:24:43 -08002053 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2054 r5c_recovery_load_one_stripe(log, sh);
Song Liub4c625c2016-11-17 15:24:43 -08002055 ctx->data_only_stripes++;
2056 }
2057
2058 return 0;
2059}
2060
2061/*
2062 * we did a recovery. Now ctx.pos points to an invalid meta block. New
2063 * log will start here. but we can't let superblock point to last valid
2064 * meta block. The log might looks like:
2065 * | meta 1| meta 2| meta 3|
2066 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
2067 * superblock points to meta 1, we write a new valid meta 2n. if crash
2068 * happens again, new recovery will start from meta 1. Since meta 2n is
2069 * valid now, recovery will think meta 3 is valid, which is wrong.
2070 * The solution is we create a new meta in meta2 with its seq == meta
Song Liu3c6edc62016-12-07 09:42:06 -08002071 * 1's seq + 10000 and let superblock points to meta2. The same recovery
2072 * will not think meta 3 is a valid meta, because its seq doesn't match
Song Liub4c625c2016-11-17 15:24:43 -08002073 */
2074
2075/*
2076 * Before recovery, the log looks like the following
2077 *
2078 * ---------------------------------------------
2079 * | valid log | invalid log |
2080 * ---------------------------------------------
2081 * ^
2082 * |- log->last_checkpoint
2083 * |- log->last_cp_seq
2084 *
2085 * Now we scan through the log until we see invalid entry
2086 *
2087 * ---------------------------------------------
2088 * | valid log | invalid log |
2089 * ---------------------------------------------
2090 * ^ ^
2091 * |- log->last_checkpoint |- ctx->pos
2092 * |- log->last_cp_seq |- ctx->seq
2093 *
2094 * From this point, we need to increase seq number by 10 to avoid
2095 * confusing next recovery.
2096 *
2097 * ---------------------------------------------
2098 * | valid log | invalid log |
2099 * ---------------------------------------------
2100 * ^ ^
2101 * |- log->last_checkpoint |- ctx->pos+1
Song Liu3c6edc62016-12-07 09:42:06 -08002102 * |- log->last_cp_seq |- ctx->seq+10001
Song Liub4c625c2016-11-17 15:24:43 -08002103 *
2104 * However, it is not safe to start the state machine yet, because data only
2105 * parities are not yet secured in RAID. To save these data only parities, we
2106 * rewrite them from seq+11.
2107 *
2108 * -----------------------------------------------------------------
2109 * | valid log | data only stripes | invalid log |
2110 * -----------------------------------------------------------------
2111 * ^ ^
2112 * |- log->last_checkpoint |- ctx->pos+n
Song Liu3c6edc62016-12-07 09:42:06 -08002113 * |- log->last_cp_seq |- ctx->seq+10000+n
Song Liub4c625c2016-11-17 15:24:43 -08002114 *
2115 * If failure happens again during this process, the recovery can safe start
2116 * again from log->last_checkpoint.
2117 *
2118 * Once data only stripes are rewritten to journal, we move log_tail
2119 *
2120 * -----------------------------------------------------------------
2121 * | old log | data only stripes | invalid log |
2122 * -----------------------------------------------------------------
2123 * ^ ^
2124 * |- log->last_checkpoint |- ctx->pos+n
Song Liu3c6edc62016-12-07 09:42:06 -08002125 * |- log->last_cp_seq |- ctx->seq+10000+n
Song Liub4c625c2016-11-17 15:24:43 -08002126 *
2127 * Then we can safely start the state machine. If failure happens from this
2128 * point on, the recovery will start from new log->last_checkpoint.
2129 */
2130static int
2131r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2132 struct r5l_recovery_ctx *ctx)
2133{
Song Liua85dd7b2017-01-23 17:12:57 -08002134 struct stripe_head *sh;
Song Liub4c625c2016-11-17 15:24:43 -08002135 struct mddev *mddev = log->rdev->mddev;
2136 struct page *page;
Song Liu3c66abb2016-12-14 15:38:01 -08002137 sector_t next_checkpoint = MaxSector;
Song Liub4c625c2016-11-17 15:24:43 -08002138
2139 page = alloc_page(GFP_KERNEL);
2140 if (!page) {
2141 pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n",
2142 mdname(mddev));
2143 return -ENOMEM;
2144 }
2145
Song Liu3c66abb2016-12-14 15:38:01 -08002146 WARN_ON(list_empty(&ctx->cached_list));
2147
Song Liua85dd7b2017-01-23 17:12:57 -08002148 list_for_each_entry(sh, &ctx->cached_list, lru) {
Song Liub4c625c2016-11-17 15:24:43 -08002149 struct r5l_meta_block *mb;
2150 int i;
2151 int offset;
2152 sector_t write_pos;
2153
2154 WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state));
2155 r5l_recovery_create_empty_meta_block(log, page,
2156 ctx->pos, ctx->seq);
2157 mb = page_address(page);
2158 offset = le32_to_cpu(mb->meta_size);
JackieLiufc833c22016-11-28 16:19:19 +08002159 write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
Song Liub4c625c2016-11-17 15:24:43 -08002160
2161 for (i = sh->disks; i--; ) {
2162 struct r5dev *dev = &sh->dev[i];
2163 struct r5l_payload_data_parity *payload;
2164 void *addr;
2165
2166 if (test_bit(R5_InJournal, &dev->flags)) {
2167 payload = (void *)mb + offset;
2168 payload->header.type = cpu_to_le16(
2169 R5LOG_PAYLOAD_DATA);
2170 payload->size = BLOCK_SECTORS;
2171 payload->location = cpu_to_le64(
2172 raid5_compute_blocknr(sh, i, 0));
2173 addr = kmap_atomic(dev->page);
2174 payload->checksum[0] = cpu_to_le32(
2175 crc32c_le(log->uuid_checksum, addr,
2176 PAGE_SIZE));
2177 kunmap_atomic(addr);
2178 sync_page_io(log->rdev, write_pos, PAGE_SIZE,
2179 dev->page, REQ_OP_WRITE, 0, false);
2180 write_pos = r5l_ring_add(log, write_pos,
2181 BLOCK_SECTORS);
2182 offset += sizeof(__le32) +
2183 sizeof(struct r5l_payload_data_parity);
2184
2185 }
2186 }
2187 mb->meta_size = cpu_to_le32(offset);
Song Liu5c88f402016-12-07 09:42:05 -08002188 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
2189 mb, PAGE_SIZE));
Song Liub4c625c2016-11-17 15:24:43 -08002190 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
Shaohua Li20737732016-12-13 12:40:15 -08002191 REQ_OP_WRITE, REQ_FUA, false);
Song Liub4c625c2016-11-17 15:24:43 -08002192 sh->log_start = ctx->pos;
Song Liu3c66abb2016-12-14 15:38:01 -08002193 list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
2194 atomic_inc(&log->stripe_in_journal_count);
Song Liub4c625c2016-11-17 15:24:43 -08002195 ctx->pos = write_pos;
2196 ctx->seq += 1;
Song Liu3c66abb2016-12-14 15:38:01 -08002197 next_checkpoint = sh->log_start;
Song Liub4c625c2016-11-17 15:24:43 -08002198 }
Song Liu3c66abb2016-12-14 15:38:01 -08002199 log->next_checkpoint = next_checkpoint;
Song Liub4c625c2016-11-17 15:24:43 -08002200 __free_page(page);
2201 return 0;
2202}
2203
Song Liua85dd7b2017-01-23 17:12:57 -08002204static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
2205 struct r5l_recovery_ctx *ctx)
2206{
2207 struct mddev *mddev = log->rdev->mddev;
2208 struct r5conf *conf = mddev->private;
2209 struct stripe_head *sh, *next;
2210
2211 if (ctx->data_only_stripes == 0)
2212 return;
2213
2214 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
2215
2216 list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
2217 r5c_make_stripe_write_out(sh);
2218 set_bit(STRIPE_HANDLE, &sh->state);
2219 list_del_init(&sh->lru);
2220 raid5_release_stripe(sh);
2221 }
2222
2223 md_wakeup_thread(conf->mddev->thread);
2224 /* reuse conf->wait_for_quiescent in recovery */
2225 wait_event(conf->wait_for_quiescent,
2226 atomic_read(&conf->active_stripes) == 0);
2227
2228 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2229}
2230
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002231static int r5l_recovery_log(struct r5l_log *log)
2232{
Song Liu5aabf7c2016-11-17 15:24:44 -08002233 struct mddev *mddev = log->rdev->mddev;
Shaohua Li355810d2015-08-13 14:32:01 -07002234 struct r5l_recovery_ctx ctx;
Song Liu5aabf7c2016-11-17 15:24:44 -08002235 int ret;
JackieLiu43b96742016-12-05 11:58:53 +08002236 sector_t pos;
Shaohua Li355810d2015-08-13 14:32:01 -07002237
2238 ctx.pos = log->last_checkpoint;
2239 ctx.seq = log->last_cp_seq;
2240 ctx.meta_page = alloc_page(GFP_KERNEL);
Song Liub4c625c2016-11-17 15:24:43 -08002241 ctx.data_only_stripes = 0;
2242 ctx.data_parity_stripes = 0;
2243 INIT_LIST_HEAD(&ctx.cached_list);
2244
Shaohua Li355810d2015-08-13 14:32:01 -07002245 if (!ctx.meta_page)
2246 return -ENOMEM;
2247
Song Liu5aabf7c2016-11-17 15:24:44 -08002248 ret = r5c_recovery_flush_log(log, &ctx);
Shaohua Li355810d2015-08-13 14:32:01 -07002249 __free_page(ctx.meta_page);
2250
Song Liu5aabf7c2016-11-17 15:24:44 -08002251 if (ret)
2252 return ret;
Shaohua Li355810d2015-08-13 14:32:01 -07002253
Song Liu3c6edc62016-12-07 09:42:06 -08002254 pos = ctx.pos;
2255 ctx.seq += 10000;
JackieLiu43b96742016-12-05 11:58:53 +08002256
JackieLiu43b96742016-12-05 11:58:53 +08002257
Song Liu5aabf7c2016-11-17 15:24:44 -08002258 if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
2259 pr_debug("md/raid:%s: starting from clean shutdown\n",
2260 mdname(mddev));
Song Liua85dd7b2017-01-23 17:12:57 -08002261 else
Colin Ian King99f17892016-12-23 00:52:30 +00002262 pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
Song Liu5aabf7c2016-11-17 15:24:44 -08002263 mdname(mddev), ctx.data_only_stripes,
2264 ctx.data_parity_stripes);
2265
Song Liua85dd7b2017-01-23 17:12:57 -08002266 if (ctx.data_only_stripes == 0) {
2267 log->next_checkpoint = ctx.pos;
2268 r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
2269 ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
2270 } else if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) {
2271 pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
2272 mdname(mddev));
2273 return -EIO;
Shaohua Li355810d2015-08-13 14:32:01 -07002274 }
Song Liub4c625c2016-11-17 15:24:43 -08002275
Song Liu5aabf7c2016-11-17 15:24:44 -08002276 log->log_start = ctx.pos;
Song Liu5aabf7c2016-11-17 15:24:44 -08002277 log->seq = ctx.seq;
JackieLiu43b96742016-12-05 11:58:53 +08002278 log->last_checkpoint = pos;
2279 r5l_write_super(log, pos);
Song Liua85dd7b2017-01-23 17:12:57 -08002280
2281 r5c_recovery_flush_data_only_stripes(log, &ctx);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002282 return 0;
2283}
2284
2285static void r5l_write_super(struct r5l_log *log, sector_t cp)
2286{
2287 struct mddev *mddev = log->rdev->mddev;
2288
2289 log->rdev->journal_tail = cp;
Shaohua Li29530792016-12-08 15:48:19 -08002290 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002291}
2292
Song Liu2c7da142016-11-17 15:24:41 -08002293static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
2294{
2295 struct r5conf *conf = mddev->private;
2296 int ret;
2297
2298 if (!conf->log)
2299 return 0;
2300
2301 switch (conf->log->r5c_journal_mode) {
2302 case R5C_JOURNAL_MODE_WRITE_THROUGH:
2303 ret = snprintf(
2304 page, PAGE_SIZE, "[%s] %s\n",
2305 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2306 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2307 break;
2308 case R5C_JOURNAL_MODE_WRITE_BACK:
2309 ret = snprintf(
2310 page, PAGE_SIZE, "%s [%s]\n",
2311 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH],
2312 r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]);
2313 break;
2314 default:
2315 ret = 0;
2316 }
2317 return ret;
2318}
2319
Heinz Mauelshagen78e470c2017-03-22 17:44:37 +01002320/*
2321 * Set journal cache mode on @mddev (external API initially needed by dm-raid).
2322 *
2323 * @mode as defined in 'enum r5c_journal_mode'.
2324 *
2325 */
2326int r5c_journal_mode_set(struct mddev *mddev, int mode)
Song Liu2c7da142016-11-17 15:24:41 -08002327{
2328 struct r5conf *conf = mddev->private;
2329 struct r5l_log *log = conf->log;
Song Liu2c7da142016-11-17 15:24:41 -08002330
2331 if (!log)
2332 return -ENODEV;
2333
Heinz Mauelshagen78e470c2017-03-22 17:44:37 +01002334 if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
2335 mode > R5C_JOURNAL_MODE_WRITE_BACK)
Song Liu2c7da142016-11-17 15:24:41 -08002336 return -EINVAL;
2337
Song Liu2e38a372017-01-24 10:45:30 -08002338 if (raid5_calc_degraded(conf) > 0 &&
Heinz Mauelshagen78e470c2017-03-22 17:44:37 +01002339 mode == R5C_JOURNAL_MODE_WRITE_BACK)
Song Liu2e38a372017-01-24 10:45:30 -08002340 return -EINVAL;
2341
Song Liu2c7da142016-11-17 15:24:41 -08002342 mddev_suspend(mddev);
Heinz Mauelshagen78e470c2017-03-22 17:44:37 +01002343 conf->log->r5c_journal_mode = mode;
Song Liu2c7da142016-11-17 15:24:41 -08002344 mddev_resume(mddev);
2345
2346 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
Heinz Mauelshagen78e470c2017-03-22 17:44:37 +01002347 mdname(mddev), mode, r5c_journal_mode_str[mode]);
2348 return 0;
2349}
2350EXPORT_SYMBOL(r5c_journal_mode_set);
2351
2352static ssize_t r5c_journal_mode_store(struct mddev *mddev,
2353 const char *page, size_t length)
2354{
2355 int mode = ARRAY_SIZE(r5c_journal_mode_str);
2356 size_t len = length;
2357
2358 if (len < 2)
2359 return -EINVAL;
2360
2361 if (page[len - 1] == '\n')
2362 len--;
2363
2364 while (mode--)
2365 if (strlen(r5c_journal_mode_str[mode]) == len &&
2366 !strncmp(page, r5c_journal_mode_str[mode], len))
2367 break;
2368
2369 return r5c_journal_mode_set(mddev, mode) ?: length;
Song Liu2c7da142016-11-17 15:24:41 -08002370}
2371
2372struct md_sysfs_entry
2373r5c_journal_mode = __ATTR(journal_mode, 0644,
2374 r5c_journal_mode_show, r5c_journal_mode_store);
2375
Song Liu2ded3702016-11-17 15:24:38 -08002376/*
2377 * Try handle write operation in caching phase. This function should only
2378 * be called in write-back mode.
2379 *
2380 * If all outstanding writes can be handled in caching phase, returns 0
2381 * If writes requires write-out phase, call r5c_make_stripe_write_out()
2382 * and returns -EAGAIN
2383 */
2384int r5c_try_caching_write(struct r5conf *conf,
2385 struct stripe_head *sh,
2386 struct stripe_head_state *s,
2387 int disks)
2388{
2389 struct r5l_log *log = conf->log;
Song Liu1e6d6902016-11-17 15:24:39 -08002390 int i;
2391 struct r5dev *dev;
2392 int to_cache = 0;
Song Liu03b047f2017-01-11 13:39:14 -08002393 void **pslot;
2394 sector_t tree_index;
2395 int ret;
2396 uintptr_t refcount;
Song Liu2ded3702016-11-17 15:24:38 -08002397
2398 BUG_ON(!r5c_is_writeback(log));
2399
Song Liu1e6d6902016-11-17 15:24:39 -08002400 if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) {
2401 /*
2402 * There are two different scenarios here:
2403 * 1. The stripe has some data cached, and it is sent to
2404 * write-out phase for reclaim
2405 * 2. The stripe is clean, and this is the first write
2406 *
2407 * For 1, return -EAGAIN, so we continue with
2408 * handle_stripe_dirtying().
2409 *
2410 * For 2, set STRIPE_R5C_CACHING and continue with caching
2411 * write.
2412 */
2413
2414 /* case 1: anything injournal or anything in written */
2415 if (s->injournal > 0 || s->written > 0)
2416 return -EAGAIN;
2417 /* case 2 */
2418 set_bit(STRIPE_R5C_CACHING, &sh->state);
2419 }
2420
Song Liu2e38a372017-01-24 10:45:30 -08002421 /*
2422 * When run in degraded mode, array is set to write-through mode.
2423 * This check helps drain pending write safely in the transition to
2424 * write-through mode.
2425 */
2426 if (s->failed) {
2427 r5c_make_stripe_write_out(sh);
2428 return -EAGAIN;
2429 }
2430
Song Liu1e6d6902016-11-17 15:24:39 -08002431 for (i = disks; i--; ) {
2432 dev = &sh->dev[i];
2433 /* if non-overwrite, use writing-out phase */
2434 if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
2435 !test_bit(R5_InJournal, &dev->flags)) {
2436 r5c_make_stripe_write_out(sh);
2437 return -EAGAIN;
2438 }
2439 }
2440
Song Liu03b047f2017-01-11 13:39:14 -08002441 /* if the stripe is not counted in big_stripe_tree, add it now */
2442 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) &&
2443 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2444 tree_index = r5c_tree_index(conf, sh->sector);
2445 spin_lock(&log->tree_lock);
2446 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2447 tree_index);
2448 if (pslot) {
2449 refcount = (uintptr_t)radix_tree_deref_slot_protected(
2450 pslot, &log->tree_lock) >>
2451 R5C_RADIX_COUNT_SHIFT;
2452 radix_tree_replace_slot(
2453 &log->big_stripe_tree, pslot,
2454 (void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT));
2455 } else {
2456 /*
2457 * this radix_tree_insert can fail safely, so no
2458 * need to call radix_tree_preload()
2459 */
2460 ret = radix_tree_insert(
2461 &log->big_stripe_tree, tree_index,
2462 (void *)(1 << R5C_RADIX_COUNT_SHIFT));
2463 if (ret) {
2464 spin_unlock(&log->tree_lock);
2465 r5c_make_stripe_write_out(sh);
2466 return -EAGAIN;
2467 }
2468 }
2469 spin_unlock(&log->tree_lock);
2470
2471 /*
2472 * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is
2473 * counted in the radix tree
2474 */
2475 set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state);
2476 atomic_inc(&conf->r5c_cached_partial_stripes);
2477 }
2478
Song Liu1e6d6902016-11-17 15:24:39 -08002479 for (i = disks; i--; ) {
2480 dev = &sh->dev[i];
2481 if (dev->towrite) {
2482 set_bit(R5_Wantwrite, &dev->flags);
2483 set_bit(R5_Wantdrain, &dev->flags);
2484 set_bit(R5_LOCKED, &dev->flags);
2485 to_cache++;
2486 }
2487 }
2488
2489 if (to_cache) {
2490 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2491 /*
2492 * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data()
2493 * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in
2494 * r5c_handle_data_cached()
2495 */
2496 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
2497 }
2498
2499 return 0;
2500}
2501
2502/*
2503 * free extra pages (orig_page) we allocated for prexor
2504 */
2505void r5c_release_extra_page(struct stripe_head *sh)
2506{
Song Liud7bd3982016-11-23 22:50:39 -08002507 struct r5conf *conf = sh->raid_conf;
Song Liu1e6d6902016-11-17 15:24:39 -08002508 int i;
Song Liud7bd3982016-11-23 22:50:39 -08002509 bool using_disk_info_extra_page;
2510
2511 using_disk_info_extra_page =
2512 sh->dev[0].orig_page == conf->disks[0].extra_page;
Song Liu1e6d6902016-11-17 15:24:39 -08002513
2514 for (i = sh->disks; i--; )
2515 if (sh->dev[i].page != sh->dev[i].orig_page) {
2516 struct page *p = sh->dev[i].orig_page;
2517
2518 sh->dev[i].orig_page = sh->dev[i].page;
Song Liu86aa1392017-01-12 17:22:41 -08002519 clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
2520
Song Liud7bd3982016-11-23 22:50:39 -08002521 if (!using_disk_info_extra_page)
2522 put_page(p);
Song Liu1e6d6902016-11-17 15:24:39 -08002523 }
Song Liud7bd3982016-11-23 22:50:39 -08002524
2525 if (using_disk_info_extra_page) {
2526 clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state);
2527 md_wakeup_thread(conf->mddev->thread);
2528 }
2529}
2530
2531void r5c_use_extra_page(struct stripe_head *sh)
2532{
2533 struct r5conf *conf = sh->raid_conf;
2534 int i;
2535 struct r5dev *dev;
2536
2537 for (i = sh->disks; i--; ) {
2538 dev = &sh->dev[i];
2539 if (dev->orig_page != dev->page)
2540 put_page(dev->orig_page);
2541 dev->orig_page = conf->disks[i].extra_page;
2542 }
Song Liu2ded3702016-11-17 15:24:38 -08002543}
2544
2545/*
2546 * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
2547 * stripe is committed to RAID disks.
2548 */
2549void r5c_finish_stripe_write_out(struct r5conf *conf,
2550 struct stripe_head *sh,
2551 struct stripe_head_state *s)
2552{
Song Liu03b047f2017-01-11 13:39:14 -08002553 struct r5l_log *log = conf->log;
Song Liu1e6d6902016-11-17 15:24:39 -08002554 int i;
2555 int do_wakeup = 0;
Song Liu03b047f2017-01-11 13:39:14 -08002556 sector_t tree_index;
2557 void **pslot;
2558 uintptr_t refcount;
Song Liu1e6d6902016-11-17 15:24:39 -08002559
Song Liu03b047f2017-01-11 13:39:14 -08002560 if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
Song Liu2ded3702016-11-17 15:24:38 -08002561 return;
2562
2563 WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state));
2564 clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
2565
Song Liu03b047f2017-01-11 13:39:14 -08002566 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
Song Liu2ded3702016-11-17 15:24:38 -08002567 return;
Song Liu1e6d6902016-11-17 15:24:39 -08002568
2569 for (i = sh->disks; i--; ) {
2570 clear_bit(R5_InJournal, &sh->dev[i].flags);
2571 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2572 do_wakeup = 1;
2573 }
2574
2575 /*
2576 * analyse_stripe() runs before r5c_finish_stripe_write_out(),
2577 * We updated R5_InJournal, so we also update s->injournal.
2578 */
2579 s->injournal = 0;
2580
2581 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2582 if (atomic_dec_and_test(&conf->pending_full_writes))
2583 md_wakeup_thread(conf->mddev->thread);
2584
2585 if (do_wakeup)
2586 wake_up(&conf->wait_for_overlap);
Song Liua39f7af2016-11-17 15:24:40 -08002587
Song Liu03b047f2017-01-11 13:39:14 -08002588 spin_lock_irq(&log->stripe_in_journal_lock);
Song Liua39f7af2016-11-17 15:24:40 -08002589 list_del_init(&sh->r5c);
Song Liu03b047f2017-01-11 13:39:14 -08002590 spin_unlock_irq(&log->stripe_in_journal_lock);
Song Liua39f7af2016-11-17 15:24:40 -08002591 sh->log_start = MaxSector;
Song Liu03b047f2017-01-11 13:39:14 -08002592
2593 atomic_dec(&log->stripe_in_journal_count);
2594 r5c_update_log_state(log);
2595
2596 /* stop counting this stripe in big_stripe_tree */
2597 if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) ||
2598 test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2599 tree_index = r5c_tree_index(conf, sh->sector);
2600 spin_lock(&log->tree_lock);
2601 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2602 tree_index);
2603 BUG_ON(pslot == NULL);
2604 refcount = (uintptr_t)radix_tree_deref_slot_protected(
2605 pslot, &log->tree_lock) >>
2606 R5C_RADIX_COUNT_SHIFT;
2607 if (refcount == 1)
2608 radix_tree_delete(&log->big_stripe_tree, tree_index);
2609 else
2610 radix_tree_replace_slot(
2611 &log->big_stripe_tree, pslot,
2612 (void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT));
2613 spin_unlock(&log->tree_lock);
2614 }
2615
2616 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) {
2617 BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0);
Shaohua Lie33fbb92017-02-10 16:18:09 -08002618 atomic_dec(&conf->r5c_flushing_partial_stripes);
Song Liu03b047f2017-01-11 13:39:14 -08002619 atomic_dec(&conf->r5c_cached_partial_stripes);
2620 }
2621
2622 if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) {
2623 BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0);
Shaohua Lie33fbb92017-02-10 16:18:09 -08002624 atomic_dec(&conf->r5c_flushing_full_stripes);
Song Liu03b047f2017-01-11 13:39:14 -08002625 atomic_dec(&conf->r5c_cached_full_stripes);
2626 }
Song Liu1e6d6902016-11-17 15:24:39 -08002627}
2628
2629int
2630r5c_cache_data(struct r5l_log *log, struct stripe_head *sh,
2631 struct stripe_head_state *s)
2632{
Song Liua39f7af2016-11-17 15:24:40 -08002633 struct r5conf *conf = sh->raid_conf;
Song Liu1e6d6902016-11-17 15:24:39 -08002634 int pages = 0;
2635 int reserve;
2636 int i;
2637 int ret = 0;
2638
2639 BUG_ON(!log);
2640
2641 for (i = 0; i < sh->disks; i++) {
2642 void *addr;
2643
2644 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
2645 continue;
2646 addr = kmap_atomic(sh->dev[i].page);
2647 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
2648 addr, PAGE_SIZE);
2649 kunmap_atomic(addr);
2650 pages++;
2651 }
2652 WARN_ON(pages == 0);
2653
2654 /*
2655 * The stripe must enter state machine again to call endio, so
2656 * don't delay.
2657 */
2658 clear_bit(STRIPE_DELAYED, &sh->state);
2659 atomic_inc(&sh->count);
2660
2661 mutex_lock(&log->io_mutex);
2662 /* meta + data */
2663 reserve = (1 + pages) << (PAGE_SHIFT - 9);
Song Liu1e6d6902016-11-17 15:24:39 -08002664
Song Liua39f7af2016-11-17 15:24:40 -08002665 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
2666 sh->log_start == MaxSector)
2667 r5l_add_no_space_stripe(log, sh);
2668 else if (!r5l_has_free_space(log, reserve)) {
2669 if (sh->log_start == log->last_checkpoint)
2670 BUG();
2671 else
2672 r5l_add_no_space_stripe(log, sh);
Song Liu1e6d6902016-11-17 15:24:39 -08002673 } else {
2674 ret = r5l_log_stripe(log, sh, pages, 0);
2675 if (ret) {
2676 spin_lock_irq(&log->io_list_lock);
2677 list_add_tail(&sh->log_list, &log->no_mem_stripes);
2678 spin_unlock_irq(&log->io_list_lock);
2679 }
2680 }
2681
2682 mutex_unlock(&log->io_mutex);
2683 return 0;
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002684}
2685
Song Liu03b047f2017-01-11 13:39:14 -08002686/* check whether this big stripe is in write back cache. */
2687bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect)
2688{
2689 struct r5l_log *log = conf->log;
2690 sector_t tree_index;
2691 void *slot;
2692
2693 if (!log)
2694 return false;
2695
2696 WARN_ON_ONCE(!rcu_read_lock_held());
2697 tree_index = r5c_tree_index(conf, sect);
2698 slot = radix_tree_lookup(&log->big_stripe_tree, tree_index);
2699 return slot != NULL;
2700}
2701
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002702static int r5l_load_log(struct r5l_log *log)
2703{
2704 struct md_rdev *rdev = log->rdev;
2705 struct page *page;
2706 struct r5l_meta_block *mb;
2707 sector_t cp = log->rdev->journal_tail;
2708 u32 stored_crc, expected_crc;
2709 bool create_super = false;
JackieLiud30dfeb2016-12-08 08:47:39 +08002710 int ret = 0;
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002711
2712 /* Make sure it's valid */
2713 if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
2714 cp = 0;
2715 page = alloc_page(GFP_KERNEL);
2716 if (!page)
2717 return -ENOMEM;
2718
Mike Christie796a5cf2016-06-05 14:32:07 -05002719 if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) {
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002720 ret = -EIO;
2721 goto ioerr;
2722 }
2723 mb = page_address(page);
2724
2725 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
2726 mb->version != R5LOG_VERSION) {
2727 create_super = true;
2728 goto create;
2729 }
2730 stored_crc = le32_to_cpu(mb->checksum);
2731 mb->checksum = 0;
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07002732 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002733 if (stored_crc != expected_crc) {
2734 create_super = true;
2735 goto create;
2736 }
2737 if (le64_to_cpu(mb->position) != cp) {
2738 create_super = true;
2739 goto create;
2740 }
2741create:
2742 if (create_super) {
2743 log->last_cp_seq = prandom_u32();
2744 cp = 0;
Zhengyuan Liu56056c22016-10-24 16:15:59 +08002745 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002746 /*
2747 * Make sure super points to correct address. Log might have
2748 * data very soon. If super hasn't correct log tail address,
2749 * recovery can't find the log
2750 */
2751 r5l_write_super(log, cp);
2752 } else
2753 log->last_cp_seq = le64_to_cpu(mb->seq);
2754
2755 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
Shaohua Li0576b1c2015-08-13 14:32:00 -07002756 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
2757 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
2758 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002759 log->last_checkpoint = cp;
2760
2761 __free_page(page);
2762
JackieLiud30dfeb2016-12-08 08:47:39 +08002763 if (create_super) {
2764 log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
2765 log->seq = log->last_cp_seq + 1;
2766 log->next_checkpoint = cp;
2767 } else
2768 ret = r5l_recovery_log(log);
2769
Zhengyuan Liu3d7e7e12016-12-04 16:49:44 +08002770 r5c_update_log_state(log);
2771 return ret;
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002772ioerr:
2773 __free_page(page);
2774 return ret;
2775}
2776
Song Liu2e38a372017-01-24 10:45:30 -08002777void r5c_update_on_rdev_error(struct mddev *mddev)
2778{
2779 struct r5conf *conf = mddev->private;
2780 struct r5l_log *log = conf->log;
2781
2782 if (!log)
2783 return;
2784
2785 if (raid5_calc_degraded(conf) > 0 &&
2786 conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
2787 schedule_work(&log->disable_writeback_work);
2788}
2789
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002790int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
2791{
Jens Axboec888a8f2016-04-13 13:33:19 -06002792 struct request_queue *q = bdev_get_queue(rdev->bdev);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002793 struct r5l_log *log;
2794
2795 if (PAGE_SIZE != 4096)
2796 return -EINVAL;
Song Liuc757ec92016-11-17 15:24:36 -08002797
2798 /*
2799 * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and
2800 * raid_disks r5l_payload_data_parity.
2801 *
2802 * Write journal and cache does not work for very big array
2803 * (raid_disks > 203)
2804 */
2805 if (sizeof(struct r5l_meta_block) +
2806 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) *
2807 conf->raid_disks) > PAGE_SIZE) {
2808 pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n",
2809 mdname(conf->mddev), conf->raid_disks);
2810 return -EINVAL;
2811 }
2812
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002813 log = kzalloc(sizeof(*log), GFP_KERNEL);
2814 if (!log)
2815 return -ENOMEM;
2816 log->rdev = rdev;
2817
Jens Axboec888a8f2016-04-13 13:33:19 -06002818 log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0;
Christoph Hellwig56fef7c2015-10-05 09:31:09 +02002819
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07002820 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
2821 sizeof(rdev->mddev->uuid));
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002822
2823 mutex_init(&log->io_mutex);
2824
2825 spin_lock_init(&log->io_list_lock);
2826 INIT_LIST_HEAD(&log->running_ios);
Shaohua Li0576b1c2015-08-13 14:32:00 -07002827 INIT_LIST_HEAD(&log->io_end_ios);
Shaohua Lia8c34f92015-09-02 13:49:46 -07002828 INIT_LIST_HEAD(&log->flushing_ios);
Christoph Hellwig04732f72015-10-05 09:31:07 +02002829 INIT_LIST_HEAD(&log->finished_ios);
Ming Lei3a83f462016-11-22 08:57:21 -07002830 bio_init(&log->flush_bio, NULL, 0);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002831
2832 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
2833 if (!log->io_kc)
2834 goto io_kc;
2835
Christoph Hellwig5036c3902015-12-21 10:51:02 +11002836 log->io_pool = mempool_create_slab_pool(R5L_POOL_SIZE, log->io_kc);
2837 if (!log->io_pool)
2838 goto io_pool;
2839
Christoph Hellwigc38d29b2015-12-21 10:51:02 +11002840 log->bs = bioset_create(R5L_POOL_SIZE, 0);
2841 if (!log->bs)
2842 goto io_bs;
2843
Christoph Hellwige8deb632015-12-21 10:51:02 +11002844 log->meta_pool = mempool_create_page_pool(R5L_POOL_SIZE, 0);
2845 if (!log->meta_pool)
2846 goto out_mempool;
2847
Song Liu03b047f2017-01-11 13:39:14 -08002848 spin_lock_init(&log->tree_lock);
2849 INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
2850
Shaohua Li0576b1c2015-08-13 14:32:00 -07002851 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
2852 log->rdev->mddev, "reclaim");
2853 if (!log->reclaim_thread)
2854 goto reclaim_thread;
Song Liua39f7af2016-11-17 15:24:40 -08002855 log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL;
2856
Shaohua Li0fd22b42015-09-02 13:49:47 -07002857 init_waitqueue_head(&log->iounit_wait);
Shaohua Li0576b1c2015-08-13 14:32:00 -07002858
Christoph Hellwig5036c3902015-12-21 10:51:02 +11002859 INIT_LIST_HEAD(&log->no_mem_stripes);
2860
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002861 INIT_LIST_HEAD(&log->no_space_stripes);
2862 spin_lock_init(&log->no_space_stripes_lock);
2863
Song Liu3bddb7f2016-11-18 16:46:50 -08002864 INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
Song Liu2e38a372017-01-24 10:45:30 -08002865 INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
Song Liu3bddb7f2016-11-18 16:46:50 -08002866
Song Liu2ded3702016-11-17 15:24:38 -08002867 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
Song Liua39f7af2016-11-17 15:24:40 -08002868 INIT_LIST_HEAD(&log->stripe_in_journal_list);
2869 spin_lock_init(&log->stripe_in_journal_lock);
2870 atomic_set(&log->stripe_in_journal_count, 0);
Song Liu2ded3702016-11-17 15:24:38 -08002871
Song Liud2250f12016-12-14 15:38:02 -08002872 rcu_assign_pointer(conf->log, log);
2873
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002874 if (r5l_load_log(log))
2875 goto error;
2876
Shaohua Lia62ab492016-01-06 14:37:13 -08002877 set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002878 return 0;
Christoph Hellwige8deb632015-12-21 10:51:02 +11002879
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002880error:
Song Liud2250f12016-12-14 15:38:02 -08002881 rcu_assign_pointer(conf->log, NULL);
Shaohua Li0576b1c2015-08-13 14:32:00 -07002882 md_unregister_thread(&log->reclaim_thread);
2883reclaim_thread:
Christoph Hellwige8deb632015-12-21 10:51:02 +11002884 mempool_destroy(log->meta_pool);
2885out_mempool:
Christoph Hellwigc38d29b2015-12-21 10:51:02 +11002886 bioset_free(log->bs);
2887io_bs:
Christoph Hellwig5036c3902015-12-21 10:51:02 +11002888 mempool_destroy(log->io_pool);
2889io_pool:
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002890 kmem_cache_destroy(log->io_kc);
2891io_kc:
2892 kfree(log);
2893 return -EINVAL;
2894}
2895
2896void r5l_exit_log(struct r5l_log *log)
2897{
Song Liu2e38a372017-01-24 10:45:30 -08002898 flush_work(&log->disable_writeback_work);
Shaohua Li0576b1c2015-08-13 14:32:00 -07002899 md_unregister_thread(&log->reclaim_thread);
Christoph Hellwige8deb632015-12-21 10:51:02 +11002900 mempool_destroy(log->meta_pool);
Christoph Hellwigc38d29b2015-12-21 10:51:02 +11002901 bioset_free(log->bs);
Christoph Hellwig5036c3902015-12-21 10:51:02 +11002902 mempool_destroy(log->io_pool);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07002903 kmem_cache_destroy(log->io_kc);
2904 kfree(log);
2905}