blob: 6479f15a543415f153858c5fc8bb4debfd55d524 [file] [log] [blame]
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001/*
2 * Copyright (C) 2015 Shaohua Li <shli@fb.com>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/wait.h>
16#include <linux/blkdev.h>
17#include <linux/slab.h>
18#include <linux/raid/md_p.h>
Shaohua Li5cb2fbd2015-10-28 08:41:25 -070019#include <linux/crc32c.h>
Shaohua Lif6bed0e2015-08-13 14:31:59 -070020#include <linux/random.h>
21#include "md.h"
22#include "raid5.h"
23
24/*
25 * metadata/data stored in disk with 4k size unit (a block) regardless
26 * underneath hardware sector size. only works with PAGE_SIZE == 4096
27 */
28#define BLOCK_SECTORS (8)
29
Shaohua Li0576b1c2015-08-13 14:32:00 -070030/*
31 * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
32 * recovery scans a very long log
33 */
34#define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
35#define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
36
Shaohua Lif6bed0e2015-08-13 14:31:59 -070037struct r5l_log {
38 struct md_rdev *rdev;
39
40 u32 uuid_checksum;
41
42 sector_t device_size; /* log device size, round to
43 * BLOCK_SECTORS */
Shaohua Li0576b1c2015-08-13 14:32:00 -070044 sector_t max_free_space; /* reclaim run if free space is at
45 * this size */
Shaohua Lif6bed0e2015-08-13 14:31:59 -070046
47 sector_t last_checkpoint; /* log tail. where recovery scan
48 * starts from */
49 u64 last_cp_seq; /* log tail sequence */
50
51 sector_t log_start; /* log head. where new data appends */
52 u64 seq; /* log head sequence */
53
54 struct mutex io_mutex;
55 struct r5l_io_unit *current_io; /* current io_unit accepting new data */
56
57 spinlock_t io_list_lock;
58 struct list_head running_ios; /* io_units which are still running,
59 * and have not yet been completely
60 * written to the log */
61 struct list_head io_end_ios; /* io_units which have been completely
62 * written to the log but not yet written
63 * to the RAID */
Shaohua Lia8c34f92015-09-02 13:49:46 -070064 struct list_head flushing_ios; /* io_units which are waiting for log
65 * cache flush */
66 struct list_head flushed_ios; /* io_units which settle down in log disk */
67 struct bio flush_bio;
Shaohua Li0576b1c2015-08-13 14:32:00 -070068 struct list_head stripe_end_ios;/* io_units which have been completely
69 * written to the RAID but have not yet
70 * been considered for updating super */
Shaohua Lif6bed0e2015-08-13 14:31:59 -070071
72 struct kmem_cache *io_kc;
73
Shaohua Li0576b1c2015-08-13 14:32:00 -070074 struct md_thread *reclaim_thread;
75 unsigned long reclaim_target; /* number of space that need to be
76 * reclaimed. if it's 0, reclaim spaces
77 * used by io_units which are in
78 * IO_UNIT_STRIPE_END state (eg, reclaim
79 * dones't wait for specific io_unit
80 * switching to IO_UNIT_STRIPE_END
81 * state) */
Shaohua Li0fd22b42015-09-02 13:49:47 -070082 wait_queue_head_t iounit_wait;
Shaohua Li0576b1c2015-08-13 14:32:00 -070083
Shaohua Lif6bed0e2015-08-13 14:31:59 -070084 struct list_head no_space_stripes; /* pending stripes, log has no space */
85 spinlock_t no_space_stripes_lock;
86};
87
88/*
89 * an IO range starts from a meta data block and end at the next meta data
90 * block. The io unit's the meta data block tracks data/parity followed it. io
91 * unit is written to log disk with normal write, as we always flush log disk
92 * first and then start move data to raid disks, there is no requirement to
93 * write io unit with FLUSH/FUA
94 */
95struct r5l_io_unit {
96 struct r5l_log *log;
97
98 struct page *meta_page; /* store meta block */
99 int meta_offset; /* current offset in meta_page */
100
101 struct bio_list bios;
102 atomic_t pending_io; /* pending bios not written to log yet */
103 struct bio *current_bio;/* current_bio accepting new data */
104
105 atomic_t pending_stripe;/* how many stripes not flushed to raid */
106 u64 seq; /* seq number of the metablock */
107 sector_t log_start; /* where the io_unit starts */
108 sector_t log_end; /* where the io_unit ends */
109 struct list_head log_sibling; /* log->running_ios */
110 struct list_head stripe_list; /* stripes added to the io_unit */
111
112 int state;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700113};
114
115/* r5l_io_unit state */
116enum r5l_io_unit_state {
117 IO_UNIT_RUNNING = 0, /* accepting new IO */
118 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
119 * don't accepting new bio */
120 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
Shaohua Lia8c34f92015-09-02 13:49:46 -0700121 IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700122};
123
124static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
125{
126 start += inc;
127 if (start >= log->device_size)
128 start = start - log->device_size;
129 return start;
130}
131
132static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
133 sector_t end)
134{
135 if (end >= start)
136 return end - start;
137 else
138 return end + log->device_size - start;
139}
140
141static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
142{
143 sector_t used_size;
144
145 used_size = r5l_ring_distance(log, log->last_checkpoint,
146 log->log_start);
147
148 return log->device_size > used_size + size;
149}
150
151static struct r5l_io_unit *r5l_alloc_io_unit(struct r5l_log *log)
152{
153 struct r5l_io_unit *io;
154 /* We can't handle memory allocate failure so far */
155 gfp_t gfp = GFP_NOIO | __GFP_NOFAIL;
156
157 io = kmem_cache_zalloc(log->io_kc, gfp);
158 io->log = log;
159 io->meta_page = alloc_page(gfp | __GFP_ZERO);
160
161 bio_list_init(&io->bios);
162 INIT_LIST_HEAD(&io->log_sibling);
163 INIT_LIST_HEAD(&io->stripe_list);
164 io->state = IO_UNIT_RUNNING;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700165 return io;
166}
167
168static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io)
169{
170 __free_page(io->meta_page);
171 kmem_cache_free(log->io_kc, io);
172}
173
174static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to,
175 enum r5l_io_unit_state state)
176{
177 struct r5l_io_unit *io;
178
179 while (!list_empty(from)) {
180 io = list_first_entry(from, struct r5l_io_unit, log_sibling);
181 /* don't change list order */
182 if (io->state >= state)
183 list_move_tail(&io->log_sibling, to);
184 else
185 break;
186 }
187}
188
Shaohua Li0576b1c2015-08-13 14:32:00 -0700189/*
190 * We don't want too many io_units reside in stripe_end_ios list, which will
191 * waste a lot of memory. So we try to remove some. But we must keep at least 2
192 * io_units. The superblock must point to a valid meta, if it's the last meta,
193 * recovery can scan less
194 */
195static void r5l_compress_stripe_end_list(struct r5l_log *log)
196{
197 struct r5l_io_unit *first, *last, *io;
198
199 first = list_first_entry(&log->stripe_end_ios,
200 struct r5l_io_unit, log_sibling);
201 last = list_last_entry(&log->stripe_end_ios,
202 struct r5l_io_unit, log_sibling);
203 if (first == last)
204 return;
205 list_del(&first->log_sibling);
206 list_del(&last->log_sibling);
207 while (!list_empty(&log->stripe_end_ios)) {
208 io = list_first_entry(&log->stripe_end_ios,
209 struct r5l_io_unit, log_sibling);
210 list_del(&io->log_sibling);
211 first->log_end = io->log_end;
212 r5l_free_io_unit(log, io);
213 }
214 list_add_tail(&first->log_sibling, &log->stripe_end_ios);
215 list_add_tail(&last->log_sibling, &log->stripe_end_ios);
216}
217
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700218static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
219 enum r5l_io_unit_state state)
220{
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700221 if (WARN_ON(io->state >= state))
222 return;
223 io->state = state;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700224}
225
226/* XXX: totally ignores I/O errors */
227static void r5l_log_endio(struct bio *bio)
228{
229 struct r5l_io_unit *io = bio->bi_private;
230 struct r5l_log *log = io->log;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700231 unsigned long flags;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700232
233 bio_put(bio);
234
235 if (!atomic_dec_and_test(&io->pending_io))
236 return;
237
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700238 spin_lock_irqsave(&log->io_list_lock, flags);
239 __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
240 r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios,
241 IO_UNIT_IO_END);
242 spin_unlock_irqrestore(&log->io_list_lock, flags);
243
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700244 md_wakeup_thread(log->rdev->mddev->thread);
245}
246
247static void r5l_submit_current_io(struct r5l_log *log)
248{
249 struct r5l_io_unit *io = log->current_io;
250 struct r5l_meta_block *block;
251 struct bio *bio;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700252 unsigned long flags;
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700253 u32 crc;
254
255 if (!io)
256 return;
257
258 block = page_address(io->meta_page);
259 block->meta_size = cpu_to_le32(io->meta_offset);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700260 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700261 block->checksum = cpu_to_le32(crc);
262
263 log->current_io = NULL;
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700264 spin_lock_irqsave(&log->io_list_lock, flags);
265 __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
266 spin_unlock_irqrestore(&log->io_list_lock, flags);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700267
268 while ((bio = bio_list_pop(&io->bios))) {
269 /* all IO must start from rdev->data_offset */
270 bio->bi_iter.bi_sector += log->rdev->data_offset;
271 submit_bio(WRITE, bio);
272 }
273}
274
275static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
276{
277 struct r5l_io_unit *io;
278 struct r5l_meta_block *block;
279 struct bio *bio;
280
281 io = r5l_alloc_io_unit(log);
282
283 block = page_address(io->meta_page);
284 block->magic = cpu_to_le32(R5LOG_MAGIC);
285 block->version = R5LOG_VERSION;
286 block->seq = cpu_to_le64(log->seq);
287 block->position = cpu_to_le64(log->log_start);
288
289 io->log_start = log->log_start;
290 io->meta_offset = sizeof(struct r5l_meta_block);
291 io->seq = log->seq;
292
293 bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
294 io->current_bio = bio;
295 bio->bi_rw = WRITE;
296 bio->bi_bdev = log->rdev->bdev;
297 bio->bi_iter.bi_sector = log->log_start;
298 bio_add_page(bio, io->meta_page, PAGE_SIZE, 0);
299 bio->bi_end_io = r5l_log_endio;
300 bio->bi_private = io;
301
302 bio_list_add(&io->bios, bio);
303 atomic_inc(&io->pending_io);
304
305 log->seq++;
306 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
307 io->log_end = log->log_start;
308 /* current bio hit disk end */
309 if (log->log_start == 0)
310 io->current_bio = NULL;
311
312 spin_lock_irq(&log->io_list_lock);
313 list_add_tail(&io->log_sibling, &log->running_ios);
314 spin_unlock_irq(&log->io_list_lock);
315
316 return io;
317}
318
319static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
320{
321 struct r5l_io_unit *io;
322
323 io = log->current_io;
324 if (io && io->meta_offset + payload_size > PAGE_SIZE)
325 r5l_submit_current_io(log);
326 io = log->current_io;
327 if (io)
328 return 0;
329
330 log->current_io = r5l_new_meta(log);
331 return 0;
332}
333
334static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
335 sector_t location,
336 u32 checksum1, u32 checksum2,
337 bool checksum2_valid)
338{
339 struct r5l_io_unit *io = log->current_io;
340 struct r5l_payload_data_parity *payload;
341
342 payload = page_address(io->meta_page) + io->meta_offset;
343 payload->header.type = cpu_to_le16(type);
344 payload->header.flags = cpu_to_le16(0);
345 payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
346 (PAGE_SHIFT - 9));
347 payload->location = cpu_to_le64(location);
348 payload->checksum[0] = cpu_to_le32(checksum1);
349 if (checksum2_valid)
350 payload->checksum[1] = cpu_to_le32(checksum2);
351
352 io->meta_offset += sizeof(struct r5l_payload_data_parity) +
353 sizeof(__le32) * (1 + !!checksum2_valid);
354}
355
356static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
357{
358 struct r5l_io_unit *io = log->current_io;
359
360alloc_bio:
361 if (!io->current_bio) {
362 struct bio *bio;
363
364 bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
365 bio->bi_rw = WRITE;
366 bio->bi_bdev = log->rdev->bdev;
367 bio->bi_iter.bi_sector = log->log_start;
368 bio->bi_end_io = r5l_log_endio;
369 bio->bi_private = io;
370 bio_list_add(&io->bios, bio);
371 atomic_inc(&io->pending_io);
372 io->current_bio = bio;
373 }
374 if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) {
375 io->current_bio = NULL;
376 goto alloc_bio;
377 }
378 log->log_start = r5l_ring_add(log, log->log_start,
379 BLOCK_SECTORS);
380 /* current bio hit disk end */
381 if (log->log_start == 0)
382 io->current_bio = NULL;
383
384 io->log_end = log->log_start;
385}
386
387static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
388 int data_pages, int parity_pages)
389{
390 int i;
391 int meta_size;
392 struct r5l_io_unit *io;
393
394 meta_size =
395 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
396 * data_pages) +
397 sizeof(struct r5l_payload_data_parity) +
398 sizeof(__le32) * parity_pages;
399
400 r5l_get_meta(log, meta_size);
401 io = log->current_io;
402
403 for (i = 0; i < sh->disks; i++) {
404 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
405 continue;
406 if (i == sh->pd_idx || i == sh->qd_idx)
407 continue;
408 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
409 raid5_compute_blocknr(sh, i, 0),
410 sh->dev[i].log_checksum, 0, false);
411 r5l_append_payload_page(log, sh->dev[i].page);
412 }
413
414 if (sh->qd_idx >= 0) {
415 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
416 sh->sector, sh->dev[sh->pd_idx].log_checksum,
417 sh->dev[sh->qd_idx].log_checksum, true);
418 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
419 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
420 } else {
421 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
422 sh->sector, sh->dev[sh->pd_idx].log_checksum,
423 0, false);
424 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
425 }
426
427 list_add_tail(&sh->log_list, &io->stripe_list);
428 atomic_inc(&io->pending_stripe);
429 sh->log_io = io;
430}
431
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700432static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700433/*
434 * running in raid5d, where reclaim could wait for raid5d too (when it flushes
435 * data from log to raid disks), so we shouldn't wait for reclaim here
436 */
437int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
438{
439 int write_disks = 0;
440 int data_pages, parity_pages;
441 int meta_size;
442 int reserve;
443 int i;
444
445 if (!log)
446 return -EAGAIN;
447 /* Don't support stripe batch */
448 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
449 test_bit(STRIPE_SYNCING, &sh->state)) {
450 /* the stripe is written to log, we start writing it to raid */
451 clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
452 return -EAGAIN;
453 }
454
455 for (i = 0; i < sh->disks; i++) {
456 void *addr;
457
458 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
459 continue;
460 write_disks++;
461 /* checksum is already calculated in last run */
462 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
463 continue;
464 addr = kmap_atomic(sh->dev[i].page);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700465 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
466 addr, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700467 kunmap_atomic(addr);
468 }
469 parity_pages = 1 + !!(sh->qd_idx >= 0);
470 data_pages = write_disks - parity_pages;
471
472 meta_size =
473 ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
474 * data_pages) +
475 sizeof(struct r5l_payload_data_parity) +
476 sizeof(__le32) * parity_pages;
477 /* Doesn't work with very big raid array */
478 if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
479 return -EINVAL;
480
481 set_bit(STRIPE_LOG_TRAPPED, &sh->state);
482 atomic_inc(&sh->count);
483
484 mutex_lock(&log->io_mutex);
485 /* meta + data */
486 reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
487 if (r5l_has_free_space(log, reserve))
488 r5l_log_stripe(log, sh, data_pages, parity_pages);
489 else {
490 spin_lock(&log->no_space_stripes_lock);
491 list_add_tail(&sh->log_list, &log->no_space_stripes);
492 spin_unlock(&log->no_space_stripes_lock);
493
494 r5l_wake_reclaim(log, reserve);
495 }
496 mutex_unlock(&log->io_mutex);
497
498 return 0;
499}
500
501void r5l_write_stripe_run(struct r5l_log *log)
502{
503 if (!log)
504 return;
505 mutex_lock(&log->io_mutex);
506 r5l_submit_current_io(log);
507 mutex_unlock(&log->io_mutex);
508}
509
510/* This will run after log space is reclaimed */
511static void r5l_run_no_space_stripes(struct r5l_log *log)
512{
513 struct stripe_head *sh;
514
515 spin_lock(&log->no_space_stripes_lock);
516 while (!list_empty(&log->no_space_stripes)) {
517 sh = list_first_entry(&log->no_space_stripes,
518 struct stripe_head, log_list);
519 list_del_init(&sh->log_list);
520 set_bit(STRIPE_HANDLE, &sh->state);
521 raid5_release_stripe(sh);
522 }
523 spin_unlock(&log->no_space_stripes_lock);
524}
525
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700526static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
527{
528 struct r5l_log *log = io->log;
529 struct r5l_io_unit *last;
530 sector_t reclaimable_space;
531 unsigned long flags;
532
533 spin_lock_irqsave(&log->io_list_lock, flags);
534 __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
535 r5l_move_io_unit_list(&log->flushed_ios, &log->stripe_end_ios,
536 IO_UNIT_STRIPE_END);
537
538 last = list_last_entry(&log->stripe_end_ios,
539 struct r5l_io_unit, log_sibling);
540 reclaimable_space = r5l_ring_distance(log, log->last_checkpoint,
541 last->log_end);
542 if (reclaimable_space >= log->max_free_space)
543 r5l_wake_reclaim(log, 0);
544
545 r5l_compress_stripe_end_list(log);
546 spin_unlock_irqrestore(&log->io_list_lock, flags);
547 wake_up(&log->iounit_wait);
548}
549
Shaohua Li0576b1c2015-08-13 14:32:00 -0700550void r5l_stripe_write_finished(struct stripe_head *sh)
551{
552 struct r5l_io_unit *io;
553
Shaohua Li0576b1c2015-08-13 14:32:00 -0700554 io = sh->log_io;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700555 sh->log_io = NULL;
556
Christoph Hellwig509ffec2015-09-02 13:49:48 -0700557 if (io && atomic_dec_and_test(&io->pending_stripe))
558 __r5l_stripe_write_finished(io);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700559}
560
Shaohua Lia8c34f92015-09-02 13:49:46 -0700561static void r5l_log_flush_endio(struct bio *bio)
562{
563 struct r5l_log *log = container_of(bio, struct r5l_log,
564 flush_bio);
565 unsigned long flags;
566 struct r5l_io_unit *io;
567 struct stripe_head *sh;
568
569 spin_lock_irqsave(&log->io_list_lock, flags);
570 list_for_each_entry(io, &log->flushing_ios, log_sibling) {
571 while (!list_empty(&io->stripe_list)) {
572 sh = list_first_entry(&io->stripe_list,
573 struct stripe_head, log_list);
574 list_del_init(&sh->log_list);
575 set_bit(STRIPE_HANDLE, &sh->state);
576 raid5_release_stripe(sh);
577 }
578 }
579 list_splice_tail_init(&log->flushing_ios, &log->flushed_ios);
580 spin_unlock_irqrestore(&log->io_list_lock, flags);
581}
582
Shaohua Li0576b1c2015-08-13 14:32:00 -0700583/*
584 * Starting dispatch IO to raid.
585 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
586 * broken meta in the middle of a log causes recovery can't find meta at the
587 * head of log. If operations require meta at the head persistent in log, we
588 * must make sure meta before it persistent in log too. A case is:
589 *
590 * stripe data/parity is in log, we start write stripe to raid disks. stripe
591 * data/parity must be persistent in log before we do the write to raid disks.
592 *
593 * The solution is we restrictly maintain io_unit list order. In this case, we
594 * only write stripes of an io_unit to raid disks till the io_unit is the first
595 * one whose data/parity is in log.
596 */
597void r5l_flush_stripe_to_raid(struct r5l_log *log)
598{
Shaohua Lia8c34f92015-09-02 13:49:46 -0700599 bool do_flush;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700600 if (!log)
601 return;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700602
Shaohua Lia8c34f92015-09-02 13:49:46 -0700603 spin_lock_irq(&log->io_list_lock);
604 /* flush bio is running */
605 if (!list_empty(&log->flushing_ios)) {
606 spin_unlock_irq(&log->io_list_lock);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700607 return;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700608 }
Shaohua Lia8c34f92015-09-02 13:49:46 -0700609 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
610 do_flush = !list_empty(&log->flushing_ios);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700611 spin_unlock_irq(&log->io_list_lock);
Shaohua Lia8c34f92015-09-02 13:49:46 -0700612
613 if (!do_flush)
614 return;
615 bio_reset(&log->flush_bio);
616 log->flush_bio.bi_bdev = log->rdev->bdev;
617 log->flush_bio.bi_end_io = r5l_log_flush_endio;
618 submit_bio(WRITE_FLUSH, &log->flush_bio);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700619}
620
Shaohua Li0fd22b42015-09-02 13:49:47 -0700621static void r5l_kick_io_unit(struct r5l_log *log)
Shaohua Li0576b1c2015-08-13 14:32:00 -0700622{
Shaohua Lia8c34f92015-09-02 13:49:46 -0700623 md_wakeup_thread(log->rdev->mddev->thread);
Shaohua Li0fd22b42015-09-02 13:49:47 -0700624 wait_event_lock_irq(log->iounit_wait, !list_empty(&log->stripe_end_ios),
625 log->io_list_lock);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700626}
627
628static void r5l_write_super(struct r5l_log *log, sector_t cp);
629static void r5l_do_reclaim(struct r5l_log *log)
630{
631 struct r5l_io_unit *io, *last;
632 LIST_HEAD(list);
633 sector_t free = 0;
634 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
635
636 spin_lock_irq(&log->io_list_lock);
637 /*
638 * move proper io_unit to reclaim list. We should not change the order.
639 * reclaimable/unreclaimable io_unit can be mixed in the list, we
640 * shouldn't reuse space of an unreclaimable io_unit
641 */
642 while (1) {
Shaohua Lia8c34f92015-09-02 13:49:46 -0700643 struct list_head *target_list = NULL;
644
Shaohua Li0576b1c2015-08-13 14:32:00 -0700645 while (!list_empty(&log->stripe_end_ios)) {
646 io = list_first_entry(&log->stripe_end_ios,
647 struct r5l_io_unit, log_sibling);
648 list_move_tail(&io->log_sibling, &list);
649 free += r5l_ring_distance(log, io->log_start,
650 io->log_end);
651 }
652
653 if (free >= reclaim_target ||
654 (list_empty(&log->running_ios) &&
655 list_empty(&log->io_end_ios) &&
Shaohua Lia8c34f92015-09-02 13:49:46 -0700656 list_empty(&log->flushing_ios) &&
657 list_empty(&log->flushed_ios)))
Shaohua Li0576b1c2015-08-13 14:32:00 -0700658 break;
659
660 /* Below waiting mostly happens when we shutdown the raid */
Shaohua Lia8c34f92015-09-02 13:49:46 -0700661 if (!list_empty(&log->flushed_ios))
662 target_list = &log->flushed_ios;
663 else if (!list_empty(&log->flushing_ios))
664 target_list = &log->flushing_ios;
665 else if (!list_empty(&log->io_end_ios))
666 target_list = &log->io_end_ios;
667 else if (!list_empty(&log->running_ios))
668 target_list = &log->running_ios;
Shaohua Li0576b1c2015-08-13 14:32:00 -0700669
Shaohua Li0fd22b42015-09-02 13:49:47 -0700670 r5l_kick_io_unit(log);
Shaohua Li0576b1c2015-08-13 14:32:00 -0700671 }
672 spin_unlock_irq(&log->io_list_lock);
673
674 if (list_empty(&list))
675 return;
676
677 /* super always point to last valid meta */
678 last = list_last_entry(&list, struct r5l_io_unit, log_sibling);
679 /*
680 * write_super will flush cache of each raid disk. We must write super
681 * here, because the log area might be reused soon and we don't want to
682 * confuse recovery
683 */
684 r5l_write_super(log, last->log_start);
685
686 mutex_lock(&log->io_mutex);
687 log->last_checkpoint = last->log_start;
688 log->last_cp_seq = last->seq;
689 mutex_unlock(&log->io_mutex);
690 r5l_run_no_space_stripes(log);
691
692 while (!list_empty(&list)) {
693 io = list_first_entry(&list, struct r5l_io_unit, log_sibling);
694 list_del(&io->log_sibling);
695 r5l_free_io_unit(log, io);
696 }
697}
698
699static void r5l_reclaim_thread(struct md_thread *thread)
700{
701 struct mddev *mddev = thread->mddev;
702 struct r5conf *conf = mddev->private;
703 struct r5l_log *log = conf->log;
704
705 if (!log)
706 return;
707 r5l_do_reclaim(log);
708}
709
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700710static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
711{
Shaohua Li0576b1c2015-08-13 14:32:00 -0700712 unsigned long target;
713 unsigned long new = (unsigned long)space; /* overflow in theory */
714
715 do {
716 target = log->reclaim_target;
717 if (new < target)
718 return;
719 } while (cmpxchg(&log->reclaim_target, target, new) != target);
720 md_wakeup_thread(log->reclaim_thread);
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700721}
722
Shaohua Li355810d2015-08-13 14:32:01 -0700723struct r5l_recovery_ctx {
724 struct page *meta_page; /* current meta */
725 sector_t meta_total_blocks; /* total size of current meta and data */
726 sector_t pos; /* recovery position */
727 u64 seq; /* recovery position seq */
728};
729
730static int r5l_read_meta_block(struct r5l_log *log,
731 struct r5l_recovery_ctx *ctx)
732{
733 struct page *page = ctx->meta_page;
734 struct r5l_meta_block *mb;
735 u32 crc, stored_crc;
736
737 if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
738 return -EIO;
739
740 mb = page_address(page);
741 stored_crc = le32_to_cpu(mb->checksum);
742 mb->checksum = 0;
743
744 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
745 le64_to_cpu(mb->seq) != ctx->seq ||
746 mb->version != R5LOG_VERSION ||
747 le64_to_cpu(mb->position) != ctx->pos)
748 return -EINVAL;
749
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700750 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Li355810d2015-08-13 14:32:01 -0700751 if (stored_crc != crc)
752 return -EINVAL;
753
754 if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
755 return -EINVAL;
756
757 ctx->meta_total_blocks = BLOCK_SECTORS;
758
759 return 0;
760}
761
762static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
763 struct r5l_recovery_ctx *ctx,
764 sector_t stripe_sect,
765 int *offset, sector_t *log_offset)
766{
767 struct r5conf *conf = log->rdev->mddev->private;
768 struct stripe_head *sh;
769 struct r5l_payload_data_parity *payload;
770 int disk_index;
771
772 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
773 while (1) {
774 payload = page_address(ctx->meta_page) + *offset;
775
776 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
777 raid5_compute_sector(conf,
778 le64_to_cpu(payload->location), 0,
779 &disk_index, sh);
780
781 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
782 sh->dev[disk_index].page, READ, false);
783 sh->dev[disk_index].log_checksum =
784 le32_to_cpu(payload->checksum[0]);
785 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
786 ctx->meta_total_blocks += BLOCK_SECTORS;
787 } else {
788 disk_index = sh->pd_idx;
789 sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
790 sh->dev[disk_index].page, READ, false);
791 sh->dev[disk_index].log_checksum =
792 le32_to_cpu(payload->checksum[0]);
793 set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
794
795 if (sh->qd_idx >= 0) {
796 disk_index = sh->qd_idx;
797 sync_page_io(log->rdev,
798 r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
799 PAGE_SIZE, sh->dev[disk_index].page,
800 READ, false);
801 sh->dev[disk_index].log_checksum =
802 le32_to_cpu(payload->checksum[1]);
803 set_bit(R5_Wantwrite,
804 &sh->dev[disk_index].flags);
805 }
806 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
807 }
808
809 *log_offset = r5l_ring_add(log, *log_offset,
810 le32_to_cpu(payload->size));
811 *offset += sizeof(struct r5l_payload_data_parity) +
812 sizeof(__le32) *
813 (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
814 if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
815 break;
816 }
817
818 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
819 void *addr;
820 u32 checksum;
821
822 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
823 continue;
824 addr = kmap_atomic(sh->dev[disk_index].page);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700825 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
Shaohua Li355810d2015-08-13 14:32:01 -0700826 kunmap_atomic(addr);
827 if (checksum != sh->dev[disk_index].log_checksum)
828 goto error;
829 }
830
831 for (disk_index = 0; disk_index < sh->disks; disk_index++) {
832 struct md_rdev *rdev, *rrdev;
833
834 if (!test_and_clear_bit(R5_Wantwrite,
835 &sh->dev[disk_index].flags))
836 continue;
837
838 /* in case device is broken */
839 rdev = rcu_dereference(conf->disks[disk_index].rdev);
840 if (rdev)
841 sync_page_io(rdev, stripe_sect, PAGE_SIZE,
842 sh->dev[disk_index].page, WRITE, false);
843 rrdev = rcu_dereference(conf->disks[disk_index].replacement);
844 if (rrdev)
845 sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
846 sh->dev[disk_index].page, WRITE, false);
847 }
848 raid5_release_stripe(sh);
849 return 0;
850
851error:
852 for (disk_index = 0; disk_index < sh->disks; disk_index++)
853 sh->dev[disk_index].flags = 0;
854 raid5_release_stripe(sh);
855 return -EINVAL;
856}
857
858static int r5l_recovery_flush_one_meta(struct r5l_log *log,
859 struct r5l_recovery_ctx *ctx)
860{
861 struct r5conf *conf = log->rdev->mddev->private;
862 struct r5l_payload_data_parity *payload;
863 struct r5l_meta_block *mb;
864 int offset;
865 sector_t log_offset;
866 sector_t stripe_sector;
867
868 mb = page_address(ctx->meta_page);
869 offset = sizeof(struct r5l_meta_block);
870 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
871
872 while (offset < le32_to_cpu(mb->meta_size)) {
873 int dd;
874
875 payload = (void *)mb + offset;
876 stripe_sector = raid5_compute_sector(conf,
877 le64_to_cpu(payload->location), 0, &dd, NULL);
878 if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
879 &offset, &log_offset))
880 return -EINVAL;
881 }
882 return 0;
883}
884
885/* copy data/parity from log to raid disks */
886static void r5l_recovery_flush_log(struct r5l_log *log,
887 struct r5l_recovery_ctx *ctx)
888{
889 while (1) {
890 if (r5l_read_meta_block(log, ctx))
891 return;
892 if (r5l_recovery_flush_one_meta(log, ctx))
893 return;
894 ctx->seq++;
895 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
896 }
897}
898
899static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
900 u64 seq)
901{
902 struct page *page;
903 struct r5l_meta_block *mb;
904 u32 crc;
905
906 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
907 if (!page)
908 return -ENOMEM;
909 mb = page_address(page);
910 mb->magic = cpu_to_le32(R5LOG_MAGIC);
911 mb->version = R5LOG_VERSION;
912 mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
913 mb->seq = cpu_to_le64(seq);
914 mb->position = cpu_to_le64(pos);
Shaohua Li5cb2fbd2015-10-28 08:41:25 -0700915 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Li355810d2015-08-13 14:32:01 -0700916 mb->checksum = cpu_to_le32(crc);
917
918 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
919 __free_page(page);
920 return -EIO;
921 }
922 __free_page(page);
923 return 0;
924}
925
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700926static int r5l_recovery_log(struct r5l_log *log)
927{
Shaohua Li355810d2015-08-13 14:32:01 -0700928 struct r5l_recovery_ctx ctx;
929
930 ctx.pos = log->last_checkpoint;
931 ctx.seq = log->last_cp_seq;
932 ctx.meta_page = alloc_page(GFP_KERNEL);
933 if (!ctx.meta_page)
934 return -ENOMEM;
935
936 r5l_recovery_flush_log(log, &ctx);
937 __free_page(ctx.meta_page);
938
939 /*
940 * we did a recovery. Now ctx.pos points to an invalid meta block. New
941 * log will start here. but we can't let superblock point to last valid
942 * meta block. The log might looks like:
943 * | meta 1| meta 2| meta 3|
944 * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
945 * superblock points to meta 1, we write a new valid meta 2n. if crash
946 * happens again, new recovery will start from meta 1. Since meta 2n is
947 * valid now, recovery will think meta 3 is valid, which is wrong.
948 * The solution is we create a new meta in meta2 with its seq == meta
949 * 1's seq + 10 and let superblock points to meta2. The same recovery will
950 * not think meta 3 is a valid meta, because its seq doesn't match
951 */
952 if (ctx.seq > log->last_cp_seq + 1) {
953 int ret;
954
955 ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
956 if (ret)
957 return ret;
958 log->seq = ctx.seq + 11;
959 log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
960 r5l_write_super(log, ctx.pos);
961 } else {
962 log->log_start = ctx.pos;
963 log->seq = ctx.seq;
964 }
Shaohua Lif6bed0e2015-08-13 14:31:59 -0700965 return 0;
966}
967
968static void r5l_write_super(struct r5l_log *log, sector_t cp)
969{
970 struct mddev *mddev = log->rdev->mddev;
971
972 log->rdev->journal_tail = cp;
973 set_bit(MD_CHANGE_DEVS, &mddev->flags);
974}
975
976static int r5l_load_log(struct r5l_log *log)
977{
978 struct md_rdev *rdev = log->rdev;
979 struct page *page;
980 struct r5l_meta_block *mb;
981 sector_t cp = log->rdev->journal_tail;
982 u32 stored_crc, expected_crc;
983 bool create_super = false;
984 int ret;
985
986 /* Make sure it's valid */
987 if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
988 cp = 0;
989 page = alloc_page(GFP_KERNEL);
990 if (!page)
991 return -ENOMEM;
992
993 if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
994 ret = -EIO;
995 goto ioerr;
996 }
997 mb = page_address(page);
998
999 if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
1000 mb->version != R5LOG_VERSION) {
1001 create_super = true;
1002 goto create;
1003 }
1004 stored_crc = le32_to_cpu(mb->checksum);
1005 mb->checksum = 0;
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07001006 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001007 if (stored_crc != expected_crc) {
1008 create_super = true;
1009 goto create;
1010 }
1011 if (le64_to_cpu(mb->position) != cp) {
1012 create_super = true;
1013 goto create;
1014 }
1015create:
1016 if (create_super) {
1017 log->last_cp_seq = prandom_u32();
1018 cp = 0;
1019 /*
1020 * Make sure super points to correct address. Log might have
1021 * data very soon. If super hasn't correct log tail address,
1022 * recovery can't find the log
1023 */
1024 r5l_write_super(log, cp);
1025 } else
1026 log->last_cp_seq = le64_to_cpu(mb->seq);
1027
1028 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001029 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
1030 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
1031 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001032 log->last_checkpoint = cp;
1033
1034 __free_page(page);
1035
1036 return r5l_recovery_log(log);
1037ioerr:
1038 __free_page(page);
1039 return ret;
1040}
1041
1042int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
1043{
1044 struct r5l_log *log;
1045
1046 if (PAGE_SIZE != 4096)
1047 return -EINVAL;
1048 log = kzalloc(sizeof(*log), GFP_KERNEL);
1049 if (!log)
1050 return -ENOMEM;
1051 log->rdev = rdev;
1052
Shaohua Li5cb2fbd2015-10-28 08:41:25 -07001053 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
1054 sizeof(rdev->mddev->uuid));
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001055
1056 mutex_init(&log->io_mutex);
1057
1058 spin_lock_init(&log->io_list_lock);
1059 INIT_LIST_HEAD(&log->running_ios);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001060 INIT_LIST_HEAD(&log->io_end_ios);
1061 INIT_LIST_HEAD(&log->stripe_end_ios);
Shaohua Lia8c34f92015-09-02 13:49:46 -07001062 INIT_LIST_HEAD(&log->flushing_ios);
1063 INIT_LIST_HEAD(&log->flushed_ios);
1064 bio_init(&log->flush_bio);
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001065
1066 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
1067 if (!log->io_kc)
1068 goto io_kc;
1069
Shaohua Li0576b1c2015-08-13 14:32:00 -07001070 log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
1071 log->rdev->mddev, "reclaim");
1072 if (!log->reclaim_thread)
1073 goto reclaim_thread;
Shaohua Li0fd22b42015-09-02 13:49:47 -07001074 init_waitqueue_head(&log->iounit_wait);
Shaohua Li0576b1c2015-08-13 14:32:00 -07001075
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001076 INIT_LIST_HEAD(&log->no_space_stripes);
1077 spin_lock_init(&log->no_space_stripes_lock);
1078
1079 if (r5l_load_log(log))
1080 goto error;
1081
1082 conf->log = log;
1083 return 0;
1084error:
Shaohua Li0576b1c2015-08-13 14:32:00 -07001085 md_unregister_thread(&log->reclaim_thread);
1086reclaim_thread:
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001087 kmem_cache_destroy(log->io_kc);
1088io_kc:
1089 kfree(log);
1090 return -EINVAL;
1091}
1092
1093void r5l_exit_log(struct r5l_log *log)
1094{
Shaohua Li0576b1c2015-08-13 14:32:00 -07001095 /*
1096 * at this point all stripes are finished, so io_unit is at least in
1097 * STRIPE_END state
1098 */
1099 r5l_wake_reclaim(log, -1L);
1100 md_unregister_thread(&log->reclaim_thread);
1101 r5l_do_reclaim(log);
1102 /*
1103 * force a super update, r5l_do_reclaim might updated the super.
1104 * mddev->thread is already stopped
1105 */
1106 md_update_sb(log->rdev->mddev, 1);
1107
Shaohua Lif6bed0e2015-08-13 14:31:59 -07001108 kmem_cache_destroy(log->io_kc);
1109 kfree(log);
1110}