blob: ea20d740a67c1371171cdd73991d67aa1974c264 [file] [log] [blame]
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001/*
2 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/wait.h>
16#include <linux/blkdev.h>
17#include <linux/slab.h>
18#include <linux/raid/md_p.h>
Shaohua Li5cb2fbd2015-10-28 08:41:25 -070019#include <linux/crc32c.h>
Shaohua Lif6bed0e2015-08-13 14:31:59 -070020#include <linux/random.h>
21#include "md.h"
22#include "raid5.h"
23
24/*
25 * metadata/data stored in disk with 4k size unit (a block) regardless
26 * underneath hardware sector size. only works with PAGE_SIZE == 4096
27 */
28#define BLOCK_SECTORS (8)
29
Shaohua Li0576b1c2015-08-13 14:32:00 -070030/*
31 * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
32 * recovery scans a very long log
33 */
34#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
35#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
36
Shaohua Lif6bed0e2015-08-13 14:31:59 -070037struct r5l_log {
38 struct md_rdev *rdev;
39
40 u32 uuid_checksum;
41
42 sector_t device_size; /* log device size, round to
43 * BLOCK_SECTORS */
Shaohua Li0576b1c2015-08-13 14:32:00 -070044 sector_t max_free_space; /* reclaim run if free space is at
45 * this size */
Shaohua Lif6bed0e2015-08-13 14:31:59 -070046
47 sector_t last_checkpoint; /* log tail. where recovery scan
48 * starts from */
49 u64 last_cp_seq; /* log tail sequence */
50
51 sector_t log_start; /* log head. where new data appends */
52 u64 seq; /* log head sequence */
53
Christoph Hellwig17036462015-10-05 09:31:06 +020054 sector_t next_checkpoint;
55 u64 next_cp_seq;
56
Shaohua Lif6bed0e2015-08-13 14:31:59 -070057 struct mutex io_mutex;
58 struct r5l_io_unit *current_io; /* current io_unit accepting new data */
59
60 spinlock_t io_list_lock;
61 struct list_head running_ios; /* io_units which are still running,
62 * and have not yet been completely
63 * written to the log */
64 struct list_head io_end_ios; /* io_units which have been completely
65 * written to the log but not yet written
66 * to the RAID */
Shaohua Lia8c34f92015-09-02 13:49:46 -070067 struct list_head flushing_ios; /* io_units which are waiting for log
68 * cache flush */
Christoph Hellwig04732f72015-10-05 09:31:07 +020069 struct list_head finished_ios; /* io_units which settle down in log disk */
Shaohua Lia8c34f92015-09-02 13:49:46 -070070 struct bio flush_bio;
Shaohua Lif6bed0e2015-08-13 14:31:59 -070071
72 struct kmem_cache *io_kc;
73
Shaohua Li0576b1c2015-08-13 14:32:00 -070074 struct md_thread *reclaim_thread;
75 unsigned long reclaim_target; /* number of space that need to be
76 * reclaimed. if it's 0, reclaim spaces
77 * used by io_units which are in
78 * IO_UNIT_STRIPE_END state (eg, reclaim
79 * dones't wait for specific io_unit
80 * switching to IO_UNIT_STRIPE_END
81 * state) */
Shaohua Li0fd22b42015-09-02 13:49:47 -070082 wait_queue_head_t iounit_wait;
Shaohua Li0576b1c2015-08-13 14:32:00 -070083
Shaohua Lif6bed0e2015-08-13 14:31:59 -070084 struct list_head no_space_stripes; /* pending stripes, log has no space */
85 spinlock_t no_space_stripes_lock;
Christoph Hellwig56fef7c2015-10-05 09:31:09 +020086
87 bool need_cache_flush;
Shaohua Lif6bed0e2015-08-13 14:31:59 -070088};
89
90/*
91 * an IO range starts from a meta data block and end at the next meta data
92 * block. The io unit's the meta data block tracks data/parity followed it. io
93 * unit is written to log disk with normal write, as we always flush log disk
94 * first and then start move data to raid disks, there is no requirement to
95 * write io unit with FLUSH/FUA
96 */
97struct r5l_io_unit {
98 struct r5l_log *log;
99
100 struct page *meta_page; /* store meta block */
101 int meta_offset; /* current offset in meta_page */
102
103 struct bio_list bios;
104 atomic_t pending_io; /* pending bios not written to log yet */
105 struct bio *current_bio;/* current_bio accepting new data */
106
107 atomic_t pending_stripe;/* how many stripes not flushed to raid */
108 u64 seq; /* seq number of the metablock */
109 sector_t log_start; /* where the io_unit starts */
110 sector_t log_end; /* where the io_unit ends */
111 struct list_head log_sibling; /* log->running_ios */
112 struct list_head stripe_list; /* stripes added to the io_unit */
113
114 int state;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700115};
116
117/* r5l_io_unit state */
118enum r5l_io_unit_state {
119 IO_UNIT_RUNNING = 0, /* accepting new IO */
120 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
121 * don't accepting new bio */
122 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
Shaohua Lia8c34f92015-09-02 13:49:46 -0700123 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700124};
125
126static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
127{
128 start += inc;
129 if (start >= log->device_size)
130 start = start - log->device_size;
131 return start;
132}
133
134static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
135 sector_t end)
136{
137 if (end >= start)
138 return end - start;
139 else
140 return end + log->device_size - start;
141}
142
143static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
144{
145 sector_t used_size;
146
147 used_size = r5l_ring_distance(log, log->last_checkpoint,
148 log->log_start);
149
150 return log->device_size > used_size + size;
151}
152
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700153static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io)
154{
155 __free_page(io->meta_page);
156 kmem_cache_free(log->io_kc, io);
157}
158
159static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to,
160 enum r5l_io_unit_state state)
161{
162 struct r5l_io_unit *io;
163
164 while (!list_empty(from)) {
165 io = list_first_entry(from, struct r5l_io_unit, log_sibling);
166 /* don't change list order */
167 if (io->state >= state)
168 list_move_tail(&io->log_sibling, to);
169 else
170 break;
171 }
172}
173
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700174static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
175 enum r5l_io_unit_state state)
176{
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700177 if (WARN_ON(io->state >= state))
178 return;
179 io->state = state;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700180}
181
Christoph Hellwigd8858f42015-10-05 09:31:08 +0200182static void r5l_io_run_stripes(struct r5l_io_unit *io)
183{
184 struct stripe_head *sh, *next;
185
186 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
187 list_del_init(&sh->log_list);
188 set_bit(STRIPE_HANDLE, &sh->state);
189 raid5_release_stripe(sh);
190 }
191}
192
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700193/* XXX: totally ignores I/O errors */
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200194static void r5l_log_run_stripes(struct r5l_log *log)
195{
196 struct r5l_io_unit *io, *next;
197
198 assert_spin_locked(&log->io_list_lock);
199
200 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
201 /* don't change list order */
202 if (io->state < IO_UNIT_IO_END)
203 break;
204
205 list_move_tail(&io->log_sibling, &log->finished_ios);
206 r5l_io_run_stripes(io);
207 }
208}
209
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700210static void r5l_log_endio(struct bio *bio)
211{
212 struct r5l_io_unit *io = bio->bi_private;
213 struct r5l_log *log = io->log;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700214 unsigned long flags;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700215
216 bio_put(bio);
217
218 if (!atomic_dec_and_test(&io->pending_io))
219 return;
220
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700221 spin_lock_irqsave(&log->io_list_lock, flags);
222 __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200223 if (log->need_cache_flush)
224 r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios,
225 IO_UNIT_IO_END);
226 else
227 r5l_log_run_stripes(log);
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700228 spin_unlock_irqrestore(&log->io_list_lock, flags);
229
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200230 if (log->need_cache_flush)
231 md_wakeup_thread(log->rdev->mddev->thread);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700232}
233
234static void r5l_submit_current_io(struct r5l_log *log)
235{
236 struct r5l_io_unit *io = log->current_io;
237 struct r5l_meta_block *block;
238 struct bio *bio;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700239 unsigned long flags;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700240 u32 crc;
241
242 if (!io)
243 return;
244
245 block = page_address(io->meta_page);
246 block->meta_size = cpu_to_le32(io->meta_offset);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700247 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700248 block->checksum = cpu_to_le32(crc);
249
250 log->current_io = NULL;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700251 spin_lock_irqsave(&log->io_list_lock, flags);
252 __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
253 spin_unlock_irqrestore(&log->io_list_lock, flags);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700254
Christoph Hellwig1e932a32015-10-05 09:31:12 +0200255 while ((bio = bio_list_pop(&io->bios)))
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700256 submit_bio(WRITE, bio);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700257}
258
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200259static struct bio *r5l_bio_alloc(struct r5l_log *log, struct r5l_io_unit *io)
260{
261 struct bio *bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
262
263 bio->bi_rw = WRITE;
264 bio->bi_bdev = log->rdev->bdev;
Christoph Hellwig1e932a32015-10-05 09:31:12 +0200265 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200266 bio->bi_end_io = r5l_log_endio;
267 bio->bi_private = io;
268
269 bio_list_add(&io->bios, bio);
270 atomic_inc(&io->pending_io);
271 return bio;
272}
273
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200274static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
275{
276 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
277
278 /*
279 * If we filled up the log device start from the beginning again,
280 * which will require a new bio.
281 *
282 * Note: for this to work properly the log size needs to me a multiple
283 * of BLOCK_SECTORS.
284 */
285 if (log->log_start == 0)
286 io->current_bio = NULL;
287
288 io->log_end = log->log_start;
289}
290
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700291static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
292{
293 struct r5l_io_unit *io;
294 struct r5l_meta_block *block;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700295
Christoph Hellwig51039cd2015-10-05 09:31:13 +0200296 /* We can't handle memory allocate failure so far */
297 io = kmem_cache_zalloc(log->io_kc, GFP_NOIO | __GFP_NOFAIL);
298 io->log = log;
299 bio_list_init(&io->bios);
300 INIT_LIST_HEAD(&io->log_sibling);
301 INIT_LIST_HEAD(&io->stripe_list);
302 io->state = IO_UNIT_RUNNING;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700303
Christoph Hellwig51039cd2015-10-05 09:31:13 +0200304 io->meta_page = alloc_page(GFP_NOIO | __GFP_NOFAIL | __GFP_ZERO);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700305 block = page_address(io->meta_page);
306 block->magic = cpu_to_le32(R5LOG_MAGIC);
307 block->version = R5LOG_VERSION;
308 block->seq = cpu_to_le64(log->seq);
309 block->position = cpu_to_le64(log->log_start);
310
311 io->log_start = log->log_start;
312 io->meta_offset = sizeof(struct r5l_meta_block);
313 io->seq = log->seq;
314
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200315 io->current_bio = r5l_bio_alloc(log, io);
316 bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700317
318 log->seq++;
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200319 r5_reserve_log_entry(log, io);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700320
321 spin_lock_irq(&log->io_list_lock);
322 list_add_tail(&io->log_sibling, &log->running_ios);
323 spin_unlock_irq(&log->io_list_lock);
324
325 return io;
326}
327
328static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
329{
Christoph Hellwig22581f52015-10-05 09:31:10 +0200330 if (log->current_io &&
331 log->current_io->meta_offset + payload_size > PAGE_SIZE)
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700332 r5l_submit_current_io(log);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700333
Christoph Hellwig22581f52015-10-05 09:31:10 +0200334 if (!log->current_io)
335 log->current_io = r5l_new_meta(log);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700336 return 0;
337}
338
339static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
340 sector_t location,
341 u32 checksum1, u32 checksum2,
342 bool checksum2_valid)
343{
344 struct r5l_io_unit *io = log->current_io;
345 struct r5l_payload_data_parity *payload;
346
347 payload = page_address(io->meta_page) + io->meta_offset;
348 payload->header.type = cpu_to_le16(type);
349 payload->header.flags = cpu_to_le16(0);
350 payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
351 (PAGE_SHIFT - 9));
352 payload->location = cpu_to_le64(location);
353 payload->checksum[0] = cpu_to_le32(checksum1);
354 if (checksum2_valid)
355 payload->checksum[1] = cpu_to_le32(checksum2);
356
357 io->meta_offset += sizeof(struct r5l_payload_data_parity) +
358 sizeof(__le32) * (1 + !!checksum2_valid);
359}
360
361static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
362{
363 struct r5l_io_unit *io = log->current_io;
364
365alloc_bio:
Christoph Hellwigb349feb2015-10-05 09:31:11 +0200366 if (!io->current_bio)
367 io->current_bio = r5l_bio_alloc(log, io);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700368
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700369 if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) {
370 io->current_bio = NULL;
371 goto alloc_bio;
372 }
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700373
Christoph Hellwigc1b99192015-10-05 09:31:14 +0200374 r5_reserve_log_entry(log, io);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700375}
376
377static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
378 int data_pages, int parity_pages)
379{
380 int i;
381 int meta_size;
382 struct r5l_io_unit *io;
383
384 meta_size =
385 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
386 * data_pages) +
387 sizeof(struct r5l_payload_data_parity) +
388 sizeof(__le32) * parity_pages;
389
390 r5l_get_meta(log, meta_size);
391 io = log->current_io;
392
393 for (i = 0; i < sh->disks; i++) {
394 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
395 continue;
396 if (i == sh->pd_idx || i == sh->qd_idx)
397 continue;
398 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
399 raid5_compute_blocknr(sh, i, 0),
400 sh->dev[i].log_checksum, 0, false);
401 r5l_append_payload_page(log, sh->dev[i].page);
402 }
403
404 if (sh->qd_idx >= 0) {
405 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
406 sh->sector, sh->dev[sh->pd_idx].log_checksum,
407 sh->dev[sh->qd_idx].log_checksum, true);
408 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
409 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
410 } else {
411 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
412 sh->sector, sh->dev[sh->pd_idx].log_checksum,
413 0, false);
414 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
415 }
416
417 list_add_tail(&sh->log_list, &io->stripe_list);
418 atomic_inc(&io->pending_stripe);
419 sh->log_io = io;
420}
421
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700422static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700423/*
424 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
425 * data from log to raid disks), so we shouldn't wait for reclaim here
426 */
427int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
428{
429 int write_disks = 0;
430 int data_pages, parity_pages;
431 int meta_size;
432 int reserve;
433 int i;
434
435 if (!log)
436 return -EAGAIN;
437 /* Don't support stripe batch */
438 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
439 test_bit(STRIPE_SYNCING, &sh->state)) {
440 /* the stripe is written to log, we start writing it to raid */
441 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
442 return -EAGAIN;
443 }
444
445 for (i = 0; i < sh->disks; i++) {
446 void *addr;
447
448 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
449 continue;
450 write_disks++;
451 /* checksum is already calculated in last run */
452 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
453 continue;
454 addr = kmap_atomic(sh->dev[i].page);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700455 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
456 addr, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700457 kunmap_atomic(addr);
458 }
459 parity_pages = 1 + !!(sh->qd_idx >= 0);
460 data_pages = write_disks - parity_pages;
461
462 meta_size =
463 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
464 * data_pages) +
465 sizeof(struct r5l_payload_data_parity) +
466 sizeof(__le32) * parity_pages;
467 /* Doesn't work with very big raid array */
468 if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
469 return -EINVAL;
470
471 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
Shaohua Li253f9fd2015-09-04 14:14:16 -0700472 /*
473 * The stripe must enter state machine again to finish the write, so
474 * don't delay.
475 */
476 clear_bit(STRIPE_DELAYED, &sh->state);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700477 atomic_inc(&sh->count);
478
479 mutex_lock(&log->io_mutex);
480 /* meta + data */
481 reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
482 if (r5l_has_free_space(log, reserve))
483 r5l_log_stripe(log, sh, data_pages, parity_pages);
484 else {
485 spin_lock(&log->no_space_stripes_lock);
486 list_add_tail(&sh->log_list, &log->no_space_stripes);
487 spin_unlock(&log->no_space_stripes_lock);
488
489 r5l_wake_reclaim(log, reserve);
490 }
491 mutex_unlock(&log->io_mutex);
492
493 return 0;
494}
495
496void r5l_write_stripe_run(struct r5l_log *log)
497{
498 if (!log)
499 return;
500 mutex_lock(&log->io_mutex);
501 r5l_submit_current_io(log);
502 mutex_unlock(&log->io_mutex);
503}
504
Shaohua Li828cbe92015-09-02 13:49:49 -0700505int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
506{
507 if (!log)
508 return -ENODEV;
509 /*
510 * we flush log disk cache first, then write stripe data to raid disks.
511 * So if bio is finished, the log disk cache is flushed already. The
512 * recovery guarantees we can recovery the bio from log disk, so we
513 * don't need to flush again
514 */
515 if (bio->bi_iter.bi_size == 0) {
516 bio_endio(bio);
517 return 0;
518 }
519 bio->bi_rw &= ~REQ_FLUSH;
520 return -EAGAIN;
521}
522
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700523/* This will run after log space is reclaimed */
524static void r5l_run_no_space_stripes(struct r5l_log *log)
525{
526 struct stripe_head *sh;
527
528 spin_lock(&log->no_space_stripes_lock);
529 while (!list_empty(&log->no_space_stripes)) {
530 sh = list_first_entry(&log->no_space_stripes,
531 struct stripe_head, log_list);
532 list_del_init(&sh->log_list);
533 set_bit(STRIPE_HANDLE, &sh->state);
534 raid5_release_stripe(sh);
535 }
536 spin_unlock(&log->no_space_stripes_lock);
537}
538
Christoph Hellwig17036462015-10-05 09:31:06 +0200539static sector_t r5l_reclaimable_space(struct r5l_log *log)
540{
541 return r5l_ring_distance(log, log->last_checkpoint,
542 log->next_checkpoint);
543}
544
Christoph Hellwig04732f72015-10-05 09:31:07 +0200545static bool r5l_complete_finished_ios(struct r5l_log *log)
Christoph Hellwig17036462015-10-05 09:31:06 +0200546{
547 struct r5l_io_unit *io, *next;
548 bool found = false;
549
550 assert_spin_locked(&log->io_list_lock);
551
Christoph Hellwig04732f72015-10-05 09:31:07 +0200552 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
Christoph Hellwig17036462015-10-05 09:31:06 +0200553 /* don't change list order */
554 if (io->state < IO_UNIT_STRIPE_END)
555 break;
556
557 log->next_checkpoint = io->log_start;
558 log->next_cp_seq = io->seq;
559
560 list_del(&io->log_sibling);
561 r5l_free_io_unit(log, io);
562
563 found = true;
564 }
565
566 return found;
567}
568
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700569static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
570{
571 struct r5l_log *log = io->log;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700572 unsigned long flags;
573
574 spin_lock_irqsave(&log->io_list_lock, flags);
575 __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
Christoph Hellwig17036462015-10-05 09:31:06 +0200576
Christoph Hellwig04732f72015-10-05 09:31:07 +0200577 if (!r5l_complete_finished_ios(log)) {
Shaohua Li85f2f9a2015-09-04 14:14:05 -0700578 spin_unlock_irqrestore(&log->io_list_lock, flags);
579 return;
580 }
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700581
Christoph Hellwig17036462015-10-05 09:31:06 +0200582 if (r5l_reclaimable_space(log) > log->max_free_space)
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700583 r5l_wake_reclaim(log, 0);
584
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700585 spin_unlock_irqrestore(&log->io_list_lock, flags);
586 wake_up(&log->iounit_wait);
587}
588
Shaohua Li0576b1c2015-08-13 14:32:00 -0700589void r5l_stripe_write_finished(struct stripe_head *sh)
590{
591 struct r5l_io_unit *io;
592
Shaohua Li0576b1c2015-08-13 14:32:00 -0700593 io = sh->log_io;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700594 sh->log_io = NULL;
595
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700596 if (io && atomic_dec_and_test(&io->pending_stripe))
597 __r5l_stripe_write_finished(io);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700598}
599
Shaohua Lia8c34f92015-09-02 13:49:46 -0700600static void r5l_log_flush_endio(struct bio *bio)
601{
602 struct r5l_log *log = container_of(bio, struct r5l_log,
603 flush_bio);
604 unsigned long flags;
605 struct r5l_io_unit *io;
Shaohua Lia8c34f92015-09-02 13:49:46 -0700606
607 spin_lock_irqsave(&log->io_list_lock, flags);
Christoph Hellwigd8858f42015-10-05 09:31:08 +0200608 list_for_each_entry(io, &log->flushing_ios, log_sibling)
609 r5l_io_run_stripes(io);
Christoph Hellwig04732f72015-10-05 09:31:07 +0200610 list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
Shaohua Lia8c34f92015-09-02 13:49:46 -0700611 spin_unlock_irqrestore(&log->io_list_lock, flags);
612}
613
Shaohua Li0576b1c2015-08-13 14:32:00 -0700614/*
615 * Starting dispatch IO to raid.
616 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
617 * broken meta in the middle of a log causes recovery can't find meta at the
618 * head of log. If operations require meta at the head persistent in log, we
619 * must make sure meta before it persistent in log too. A case is:
620 *
621 * stripe data/parity is in log, we start write stripe to raid disks. stripe
622 * data/parity must be persistent in log before we do the write to raid disks.
623 *
624 * The solution is we restrictly maintain io_unit list order. In this case, we
625 * only write stripes of an io_unit to raid disks till the io_unit is the first
626 * one whose data/parity is in log.
627 */
628void r5l_flush_stripe_to_raid(struct r5l_log *log)
629{
Shaohua Lia8c34f92015-09-02 13:49:46 -0700630 bool do_flush;
Christoph Hellwig56fef7c2015-10-05 09:31:09 +0200631
632 if (!log || !log->need_cache_flush)
Shaohua Li0576b1c2015-08-13 14:32:00 -0700633 return;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700634
Shaohua Lia8c34f92015-09-02 13:49:46 -0700635 spin_lock_irq(&log->io_list_lock);
636 /* flush bio is running */
637 if (!list_empty(&log->flushing_ios)) {
638 spin_unlock_irq(&log->io_list_lock);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700639 return;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700640 }
Shaohua Lia8c34f92015-09-02 13:49:46 -0700641 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
642 do_flush = !list_empty(&log->flushing_ios);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700643 spin_unlock_irq(&log->io_list_lock);
Shaohua Lia8c34f92015-09-02 13:49:46 -0700644
645 if (!do_flush)
646 return;
647 bio_reset(&log->flush_bio);
648 log->flush_bio.bi_bdev = log->rdev->bdev;
649 log->flush_bio.bi_end_io = r5l_log_flush_endio;
650 submit_bio(WRITE_FLUSH, &log->flush_bio);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700651}
652
Shaohua Li0576b1c2015-08-13 14:32:00 -0700653static void r5l_write_super(struct r5l_log *log, sector_t cp);
654static void r5l_do_reclaim(struct r5l_log *log)
655{
Shaohua Li0576b1c2015-08-13 14:32:00 -0700656 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
Christoph Hellwig17036462015-10-05 09:31:06 +0200657 sector_t reclaimable;
658 sector_t next_checkpoint;
659 u64 next_cp_seq;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700660
661 spin_lock_irq(&log->io_list_lock);
662 /*
663 * move proper io_unit to reclaim list. We should not change the order.
664 * reclaimable/unreclaimable io_unit can be mixed in the list, we
665 * shouldn't reuse space of an unreclaimable io_unit
666 */
667 while (1) {
Christoph Hellwig17036462015-10-05 09:31:06 +0200668 reclaimable = r5l_reclaimable_space(log);
669 if (reclaimable >= reclaim_target ||
Shaohua Li0576b1c2015-08-13 14:32:00 -0700670 (list_empty(&log->running_ios) &&
671 list_empty(&log->io_end_ios) &&
Shaohua Lia8c34f92015-09-02 13:49:46 -0700672 list_empty(&log->flushing_ios) &&
Christoph Hellwig04732f72015-10-05 09:31:07 +0200673 list_empty(&log->finished_ios)))
Shaohua Li0576b1c2015-08-13 14:32:00 -0700674 break;
675
Christoph Hellwig17036462015-10-05 09:31:06 +0200676 md_wakeup_thread(log->rdev->mddev->thread);
677 wait_event_lock_irq(log->iounit_wait,
678 r5l_reclaimable_space(log) > reclaimable,
679 log->io_list_lock);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700680 }
Christoph Hellwig17036462015-10-05 09:31:06 +0200681
682 next_checkpoint = log->next_checkpoint;
683 next_cp_seq = log->next_cp_seq;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700684 spin_unlock_irq(&log->io_list_lock);
685
Christoph Hellwig17036462015-10-05 09:31:06 +0200686 BUG_ON(reclaimable < 0);
687 if (reclaimable == 0)
Shaohua Li0576b1c2015-08-13 14:32:00 -0700688 return;
689
Shaohua Li0576b1c2015-08-13 14:32:00 -0700690 /*
691 * write_super will flush cache of each raid disk. We must write super
692 * here, because the log area might be reused soon and we don't want to
693 * confuse recovery
694 */
Christoph Hellwig17036462015-10-05 09:31:06 +0200695 r5l_write_super(log, next_checkpoint);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700696
697 mutex_lock(&log->io_mutex);
Christoph Hellwig17036462015-10-05 09:31:06 +0200698 log->last_checkpoint = next_checkpoint;
699 log->last_cp_seq = next_cp_seq;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700700 mutex_unlock(&log->io_mutex);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700701
Christoph Hellwig17036462015-10-05 09:31:06 +0200702 r5l_run_no_space_stripes(log);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700703}
704
705static void r5l_reclaim_thread(struct md_thread *thread)
706{
707 struct mddev *mddev = thread->mddev;
708 struct r5conf *conf = mddev->private;
709 struct r5l_log *log = conf->log;
710
711 if (!log)
712 return;
713 r5l_do_reclaim(log);
714}
715
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700716static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
717{
Shaohua Li0576b1c2015-08-13 14:32:00 -0700718 unsigned long target;
719 unsigned long new = (unsigned long)space; /* overflow in theory */
720
721 do {
722 target = log->reclaim_target;
723 if (new < target)
724 return;
725 } while (cmpxchg(&log->reclaim_target, target, new) != target);
726 md_wakeup_thread(log->reclaim_thread);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700727}
728
Shaohua Lie6c033f2015-10-04 09:20:12 -0700729void r5l_quiesce(struct r5l_log *log, int state)
730{
731 if (!log || state == 2)
732 return;
733 if (state == 0) {
734 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
735 log->rdev->mddev, "reclaim");
736 } else if (state == 1) {
737 /*
738 * at this point all stripes are finished, so io_unit is at
739 * least in STRIPE_END state
740 */
741 r5l_wake_reclaim(log, -1L);
742 md_unregister_thread(&log->reclaim_thread);
743 r5l_do_reclaim(log);
744 }
745}
746
Shaohua Li355810d2015-08-13 14:32:01 -0700747struct r5l_recovery_ctx {
748 struct page *meta_page; /* current meta */
749 sector_t meta_total_blocks; /* total size of current meta and data */
750 sector_t pos; /* recovery position */
751 u64 seq; /* recovery position seq */
752};
753
754static int r5l_read_meta_block(struct r5l_log *log,
755 struct r5l_recovery_ctx *ctx)
756{
757 struct page *page = ctx->meta_page;
758 struct r5l_meta_block *mb;
759 u32 crc, stored_crc;
760
761 if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
762 return -EIO;
763
764 mb = page_address(page);
765 stored_crc = le32_to_cpu(mb->checksum);
766 mb->checksum = 0;
767
768 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
769 le64_to_cpu(mb->seq) != ctx->seq ||
770 mb->version != R5LOG_VERSION ||
771 le64_to_cpu(mb->position) != ctx->pos)
772 return -EINVAL;
773
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700774 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Li355810d2015-08-13 14:32:01 -0700775 if (stored_crc != crc)
776 return -EINVAL;
777
778 if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
779 return -EINVAL;
780
781 ctx->meta_total_blocks = BLOCK_SECTORS;
782
783 return 0;
784}
785
786static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
787 struct r5l_recovery_ctx *ctx,
788 sector_t stripe_sect,
789 int *offset, sector_t *log_offset)
790{
791 struct r5conf *conf = log->rdev->mddev->private;
792 struct stripe_head *sh;
793 struct r5l_payload_data_parity *payload;
794 int disk_index;
795
796 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
797 while (1) {
798 payload = page_address(ctx->meta_page) + *offset;
799
800 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
801 raid5_compute_sector(conf,
802 le64_to_cpu(payload->location), 0,
803 &disk_index, sh);
804
805 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
806 sh->dev[disk_index].page, READ, false);
807 sh->dev[disk_index].log_checksum =
808 le32_to_cpu(payload->checksum[0]);
809 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
810 ctx->meta_total_blocks += BLOCK_SECTORS;
811 } else {
812 disk_index = sh->pd_idx;
813 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
814 sh->dev[disk_index].page, READ, false);
815 sh->dev[disk_index].log_checksum =
816 le32_to_cpu(payload->checksum[0]);
817 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
818
819 if (sh->qd_idx >= 0) {
820 disk_index = sh->qd_idx;
821 sync_page_io(log->rdev,
822 r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
823 PAGE_SIZE, sh->dev[disk_index].page,
824 READ, false);
825 sh->dev[disk_index].log_checksum =
826 le32_to_cpu(payload->checksum[1]);
827 set_bit(R5_Wantwrite,
828 &sh->dev[disk_index].flags);
829 }
830 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
831 }
832
833 *log_offset = r5l_ring_add(log, *log_offset,
834 le32_to_cpu(payload->size));
835 *offset += sizeof(struct r5l_payload_data_parity) +
836 sizeof(__le32) *
837 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
838 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
839 break;
840 }
841
842 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
843 void *addr;
844 u32 checksum;
845
846 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
847 continue;
848 addr = kmap_atomic(sh->dev[disk_index].page);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700849 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
Shaohua Li355810d2015-08-13 14:32:01 -0700850 kunmap_atomic(addr);
851 if (checksum != sh->dev[disk_index].log_checksum)
852 goto error;
853 }
854
855 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
856 struct md_rdev *rdev, *rrdev;
857
858 if (!test_and_clear_bit(R5_Wantwrite,
859 &sh->dev[disk_index].flags))
860 continue;
861
862 /* in case device is broken */
863 rdev = rcu_dereference(conf->disks[disk_index].rdev);
864 if (rdev)
865 sync_page_io(rdev, stripe_sect, PAGE_SIZE,
866 sh->dev[disk_index].page, WRITE, false);
867 rrdev = rcu_dereference(conf->disks[disk_index].replacement);
868 if (rrdev)
869 sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
870 sh->dev[disk_index].page, WRITE, false);
871 }
872 raid5_release_stripe(sh);
873 return 0;
874
875error:
876 for (disk_index = 0; disk_index < sh->disks; disk_index++)
877 sh->dev[disk_index].flags = 0;
878 raid5_release_stripe(sh);
879 return -EINVAL;
880}
881
882static int r5l_recovery_flush_one_meta(struct r5l_log *log,
883 struct r5l_recovery_ctx *ctx)
884{
885 struct r5conf *conf = log->rdev->mddev->private;
886 struct r5l_payload_data_parity *payload;
887 struct r5l_meta_block *mb;
888 int offset;
889 sector_t log_offset;
890 sector_t stripe_sector;
891
892 mb = page_address(ctx->meta_page);
893 offset = sizeof(struct r5l_meta_block);
894 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
895
896 while (offset < le32_to_cpu(mb->meta_size)) {
897 int dd;
898
899 payload = (void *)mb + offset;
900 stripe_sector = raid5_compute_sector(conf,
901 le64_to_cpu(payload->location), 0, &dd, NULL);
902 if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
903 &offset, &log_offset))
904 return -EINVAL;
905 }
906 return 0;
907}
908
909/* copy data/parity from log to raid disks */
910static void r5l_recovery_flush_log(struct r5l_log *log,
911 struct r5l_recovery_ctx *ctx)
912{
913 while (1) {
914 if (r5l_read_meta_block(log, ctx))
915 return;
916 if (r5l_recovery_flush_one_meta(log, ctx))
917 return;
918 ctx->seq++;
919 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
920 }
921}
922
923static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
924 u64 seq)
925{
926 struct page *page;
927 struct r5l_meta_block *mb;
928 u32 crc;
929
930 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
931 if (!page)
932 return -ENOMEM;
933 mb = page_address(page);
934 mb->magic = cpu_to_le32(R5LOG_MAGIC);
935 mb->version = R5LOG_VERSION;
936 mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
937 mb->seq = cpu_to_le64(seq);
938 mb->position = cpu_to_le64(pos);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700939 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Li355810d2015-08-13 14:32:01 -0700940 mb->checksum = cpu_to_le32(crc);
941
942 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
943 __free_page(page);
944 return -EIO;
945 }
946 __free_page(page);
947 return 0;
948}
949
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700950static int r5l_recovery_log(struct r5l_log *log)
951{
Shaohua Li355810d2015-08-13 14:32:01 -0700952 struct r5l_recovery_ctx ctx;
953
954 ctx.pos = log->last_checkpoint;
955 ctx.seq = log->last_cp_seq;
956 ctx.meta_page = alloc_page(GFP_KERNEL);
957 if (!ctx.meta_page)
958 return -ENOMEM;
959
960 r5l_recovery_flush_log(log, &ctx);
961 __free_page(ctx.meta_page);
962
963 /*
964 * we did a recovery. Now ctx.pos points to an invalid meta block. New
965 * log will start here. but we can't let superblock point to last valid
966 * meta block. The log might looks like:
967 * | meta 1| meta 2| meta 3|
968 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
969 * superblock points to meta 1, we write a new valid meta 2n. if crash
970 * happens again, new recovery will start from meta 1. Since meta 2n is
971 * valid now, recovery will think meta 3 is valid, which is wrong.
972 * The solution is we create a new meta in meta2 with its seq == meta
973 * 1's seq + 10 and let superblock points to meta2. The same recovery will
974 * not think meta 3 is a valid meta, because its seq doesn't match
975 */
976 if (ctx.seq > log->last_cp_seq + 1) {
977 int ret;
978
979 ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
980 if (ret)
981 return ret;
982 log->seq = ctx.seq + 11;
983 log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
984 r5l_write_super(log, ctx.pos);
985 } else {
986 log->log_start = ctx.pos;
987 log->seq = ctx.seq;
988 }
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700989 return 0;
990}
991
992static void r5l_write_super(struct r5l_log *log, sector_t cp)
993{
994 struct mddev *mddev = log->rdev->mddev;
995
996 log->rdev->journal_tail = cp;
997 set_bit(MD_CHANGE_DEVS, &mddev->flags);
998}
999
1000static int r5l_load_log(struct r5l_log *log)
1001{
1002 struct md_rdev *rdev = log->rdev;
1003 struct page *page;
1004 struct r5l_meta_block *mb;
1005 sector_t cp = log->rdev->journal_tail;
1006 u32 stored_crc, expected_crc;
1007 bool create_super = false;
1008 int ret;
1009
1010 /* Make sure it's valid */
1011 if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
1012 cp = 0;
1013 page = alloc_page(GFP_KERNEL);
1014 if (!page)
1015 return -ENOMEM;
1016
1017 if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
1018 ret = -EIO;
1019 goto ioerr;
1020 }
1021 mb = page_address(page);
1022
1023 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1024 mb->version != R5LOG_VERSION) {
1025 create_super = true;
1026 goto create;
1027 }
1028 stored_crc = le32_to_cpu(mb->checksum);
1029 mb->checksum = 0;
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07001030 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001031 if (stored_crc != expected_crc) {
1032 create_super = true;
1033 goto create;
1034 }
1035 if (le64_to_cpu(mb->position) != cp) {
1036 create_super = true;
1037 goto create;
1038 }
1039create:
1040 if (create_super) {
1041 log->last_cp_seq = prandom_u32();
1042 cp = 0;
1043 /*
1044 * Make sure super points to correct address. Log might have
1045 * data very soon. If super hasn't correct log tail address,
1046 * recovery can't find the log
1047 */
1048 r5l_write_super(log, cp);
1049 } else
1050 log->last_cp_seq = le64_to_cpu(mb->seq);
1051
1052 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001053 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
1054 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
1055 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001056 log->last_checkpoint = cp;
1057
1058 __free_page(page);
1059
1060 return r5l_recovery_log(log);
1061ioerr:
1062 __free_page(page);
1063 return ret;
1064}
1065
1066int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
1067{
1068 struct r5l_log *log;
1069
1070 if (PAGE_SIZE != 4096)
1071 return -EINVAL;
1072 log = kzalloc(sizeof(*log), GFP_KERNEL);
1073 if (!log)
1074 return -ENOMEM;
1075 log->rdev = rdev;
1076
Christoph Hellwig56fef7c2015-10-05 09:31:09 +02001077 log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0);
1078
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07001079 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
1080 sizeof(rdev->mddev->uuid));
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001081
1082 mutex_init(&log->io_mutex);
1083
1084 spin_lock_init(&log->io_list_lock);
1085 INIT_LIST_HEAD(&log->running_ios);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001086 INIT_LIST_HEAD(&log->io_end_ios);
Shaohua Lia8c34f92015-09-02 13:49:46 -07001087 INIT_LIST_HEAD(&log->flushing_ios);
Christoph Hellwig04732f72015-10-05 09:31:07 +02001088 INIT_LIST_HEAD(&log->finished_ios);
Shaohua Lia8c34f92015-09-02 13:49:46 -07001089 bio_init(&log->flush_bio);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001090
1091 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
1092 if (!log->io_kc)
1093 goto io_kc;
1094
Shaohua Li0576b1c2015-08-13 14:32:00 -07001095 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
1096 log->rdev->mddev, "reclaim");
1097 if (!log->reclaim_thread)
1098 goto reclaim_thread;
Shaohua Li0fd22b42015-09-02 13:49:47 -07001099 init_waitqueue_head(&log->iounit_wait);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001100
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001101 INIT_LIST_HEAD(&log->no_space_stripes);
1102 spin_lock_init(&log->no_space_stripes_lock);
1103
1104 if (r5l_load_log(log))
1105 goto error;
1106
1107 conf->log = log;
1108 return 0;
1109error:
Shaohua Li0576b1c2015-08-13 14:32:00 -07001110 md_unregister_thread(&log->reclaim_thread);
1111reclaim_thread:
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001112 kmem_cache_destroy(log->io_kc);
1113io_kc:
1114 kfree(log);
1115 return -EINVAL;
1116}
1117
1118void r5l_exit_log(struct r5l_log *log)
1119{
Shaohua Li0576b1c2015-08-13 14:32:00 -07001120 md_unregister_thread(&log->reclaim_thread);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001121 kmem_cache_destroy(log->io_kc);
1122 kfree(log);
1123}