blob: 29cccc510eb6e1564a858420a0c6242e8ee4229f [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * bcache journalling code, for btree insertions
3 *
4 * Copyright 2012 Google, Inc.
5 */
6
7#include "bcache.h"
8#include "btree.h"
9#include "debug.h"
Kent Overstreetcafe5632013-03-23 16:11:31 -070010
Kent Overstreetc37511b2013-04-26 15:39:55 -070011#include <trace/events/bcache.h>
12
Kent Overstreetcafe5632013-03-23 16:11:31 -070013/*
14 * Journal replay/recovery:
15 *
16 * This code is all driven from run_cache_set(); we first read the journal
17 * entries, do some other stuff, then we mark all the keys in the journal
18 * entries (same as garbage collection would), then we replay them - reinserting
19 * them into the cache in precisely the same order as they appear in the
20 * journal.
21 *
22 * We only journal keys that go in leaf nodes, which simplifies things quite a
23 * bit.
24 */
25
26static void journal_read_endio(struct bio *bio, int error)
27{
28 struct closure *cl = bio->bi_private;
29 closure_put(cl);
30}
31
32static int journal_read_bucket(struct cache *ca, struct list_head *list,
Kent Overstreetc18536a2013-07-24 17:44:17 -070033 unsigned bucket_index)
Kent Overstreetcafe5632013-03-23 16:11:31 -070034{
35 struct journal_device *ja = &ca->journal;
36 struct bio *bio = &ja->bio;
37
38 struct journal_replay *i;
39 struct jset *j, *data = ca->set->journal.w[0].data;
Kent Overstreetc18536a2013-07-24 17:44:17 -070040 struct closure cl;
Kent Overstreetcafe5632013-03-23 16:11:31 -070041 unsigned len, left, offset = 0;
42 int ret = 0;
43 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
44
Kent Overstreetc18536a2013-07-24 17:44:17 -070045 closure_init_stack(&cl);
46
Kent Overstreetb3fa7e72013-08-05 14:04:06 -070047 pr_debug("reading %u", bucket_index);
Kent Overstreetcafe5632013-03-23 16:11:31 -070048
49 while (offset < ca->sb.bucket_size) {
50reread: left = ca->sb.bucket_size - offset;
Kent Overstreetb3fa7e72013-08-05 14:04:06 -070051 len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
Kent Overstreetcafe5632013-03-23 16:11:31 -070052
53 bio_reset(bio);
Kent Overstreet4f024f32013-10-11 15:44:27 -070054 bio->bi_iter.bi_sector = bucket + offset;
Kent Overstreetcafe5632013-03-23 16:11:31 -070055 bio->bi_bdev = ca->bdev;
56 bio->bi_rw = READ;
Kent Overstreet4f024f32013-10-11 15:44:27 -070057 bio->bi_iter.bi_size = len << 9;
Kent Overstreetcafe5632013-03-23 16:11:31 -070058
59 bio->bi_end_io = journal_read_endio;
Kent Overstreetc18536a2013-07-24 17:44:17 -070060 bio->bi_private = &cl;
Kent Overstreet169ef1c2013-03-28 12:50:55 -060061 bch_bio_map(bio, data);
Kent Overstreetcafe5632013-03-23 16:11:31 -070062
Kent Overstreetc18536a2013-07-24 17:44:17 -070063 closure_bio_submit(bio, &cl, ca);
64 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -070065
66 /* This function could be simpler now since we no longer write
67 * journal entries that overlap bucket boundaries; this means
68 * the start of a bucket will always have a valid journal entry
69 * if it has any journal entries at all.
70 */
71
72 j = data;
73 while (len) {
74 struct list_head *where;
75 size_t blocks, bytes = set_bytes(j);
76
Kent Overstreetb3fa7e72013-08-05 14:04:06 -070077 if (j->magic != jset_magic(&ca->sb)) {
78 pr_debug("%u: bad magic", bucket_index);
Kent Overstreetcafe5632013-03-23 16:11:31 -070079 return ret;
Kent Overstreetb3fa7e72013-08-05 14:04:06 -070080 }
Kent Overstreetcafe5632013-03-23 16:11:31 -070081
Kent Overstreetb3fa7e72013-08-05 14:04:06 -070082 if (bytes > left << 9 ||
83 bytes > PAGE_SIZE << JSET_BITS) {
84 pr_info("%u: too big, %zu bytes, offset %u",
85 bucket_index, bytes, offset);
Kent Overstreetcafe5632013-03-23 16:11:31 -070086 return ret;
Kent Overstreetb3fa7e72013-08-05 14:04:06 -070087 }
Kent Overstreetcafe5632013-03-23 16:11:31 -070088
89 if (bytes > len << 9)
90 goto reread;
91
Kent Overstreetb3fa7e72013-08-05 14:04:06 -070092 if (j->csum != csum_set(j)) {
93 pr_info("%u: bad csum, %zu bytes, offset %u",
94 bucket_index, bytes, offset);
Kent Overstreetcafe5632013-03-23 16:11:31 -070095 return ret;
Kent Overstreetb3fa7e72013-08-05 14:04:06 -070096 }
Kent Overstreetcafe5632013-03-23 16:11:31 -070097
98 blocks = set_blocks(j, ca->set);
99
100 while (!list_empty(list)) {
101 i = list_first_entry(list,
102 struct journal_replay, list);
103 if (i->j.seq >= j->last_seq)
104 break;
105 list_del(&i->list);
106 kfree(i);
107 }
108
109 list_for_each_entry_reverse(i, list, list) {
110 if (j->seq == i->j.seq)
111 goto next_set;
112
113 if (j->seq < i->j.last_seq)
114 goto next_set;
115
116 if (j->seq > i->j.seq) {
117 where = &i->list;
118 goto add;
119 }
120 }
121
122 where = list;
123add:
124 i = kmalloc(offsetof(struct journal_replay, j) +
125 bytes, GFP_KERNEL);
126 if (!i)
127 return -ENOMEM;
128 memcpy(&i->j, j, bytes);
129 list_add(&i->list, where);
130 ret = 1;
131
132 ja->seq[bucket_index] = j->seq;
133next_set:
134 offset += blocks * ca->sb.block_size;
135 len -= blocks * ca->sb.block_size;
136 j = ((void *) j) + blocks * block_bytes(ca);
137 }
138 }
139
140 return ret;
141}
142
Kent Overstreetc18536a2013-07-24 17:44:17 -0700143int bch_journal_read(struct cache_set *c, struct list_head *list)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700144{
145#define read_bucket(b) \
146 ({ \
Kent Overstreetc18536a2013-07-24 17:44:17 -0700147 int ret = journal_read_bucket(ca, list, b); \
Kent Overstreetcafe5632013-03-23 16:11:31 -0700148 __set_bit(b, bitmap); \
149 if (ret < 0) \
150 return ret; \
151 ret; \
152 })
153
154 struct cache *ca;
155 unsigned iter;
156
157 for_each_cache(ca, c, iter) {
158 struct journal_device *ja = &ca->journal;
159 unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG];
160 unsigned i, l, r, m;
161 uint64_t seq;
162
163 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
164 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
165
Kent Overstreetc426c4f2013-09-23 23:17:29 -0700166 /*
167 * Read journal buckets ordered by golden ratio hash to quickly
Kent Overstreetcafe5632013-03-23 16:11:31 -0700168 * find a sequence of buckets with valid journal entries
169 */
170 for (i = 0; i < ca->sb.njournal_buckets; i++) {
171 l = (i * 2654435769U) % ca->sb.njournal_buckets;
172
173 if (test_bit(l, bitmap))
174 break;
175
176 if (read_bucket(l))
177 goto bsearch;
178 }
179
Kent Overstreetc426c4f2013-09-23 23:17:29 -0700180 /*
181 * If that fails, check all the buckets we haven't checked
Kent Overstreetcafe5632013-03-23 16:11:31 -0700182 * already
183 */
184 pr_debug("falling back to linear search");
185
Kent Overstreetc426c4f2013-09-23 23:17:29 -0700186 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
187 l < ca->sb.njournal_buckets;
188 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700189 if (read_bucket(l))
190 goto bsearch;
Kent Overstreetc426c4f2013-09-23 23:17:29 -0700191
192 if (list_empty(list))
193 continue;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700194bsearch:
195 /* Binary search */
196 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
197 pr_debug("starting binary search, l %u r %u", l, r);
198
199 while (l + 1 < r) {
Kent Overstreetfaa56732013-07-11 22:42:14 -0700200 seq = list_entry(list->prev, struct journal_replay,
201 list)->j.seq;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700202
Kent Overstreetfaa56732013-07-11 22:42:14 -0700203 m = (l + r) >> 1;
204 read_bucket(m);
205
206 if (seq != list_entry(list->prev, struct journal_replay,
207 list)->j.seq)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700208 l = m;
209 else
210 r = m;
211 }
212
Kent Overstreetc426c4f2013-09-23 23:17:29 -0700213 /*
214 * Read buckets in reverse order until we stop finding more
Kent Overstreetcafe5632013-03-23 16:11:31 -0700215 * journal entries
216 */
Kent Overstreetc426c4f2013-09-23 23:17:29 -0700217 pr_debug("finishing up: m %u njournal_buckets %u",
218 m, ca->sb.njournal_buckets);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700219 l = m;
220
221 while (1) {
222 if (!l--)
223 l = ca->sb.njournal_buckets - 1;
224
225 if (l == m)
226 break;
227
228 if (test_bit(l, bitmap))
229 continue;
230
231 if (!read_bucket(l))
232 break;
233 }
234
235 seq = 0;
236
237 for (i = 0; i < ca->sb.njournal_buckets; i++)
238 if (ja->seq[i] > seq) {
239 seq = ja->seq[i];
240 ja->cur_idx = ja->discard_idx =
241 ja->last_idx = i;
242
243 }
244 }
245
Kent Overstreetc426c4f2013-09-23 23:17:29 -0700246 if (!list_empty(list))
247 c->journal.seq = list_entry(list->prev,
248 struct journal_replay,
249 list)->j.seq;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700250
251 return 0;
252#undef read_bucket
253}
254
255void bch_journal_mark(struct cache_set *c, struct list_head *list)
256{
257 atomic_t p = { 0 };
258 struct bkey *k;
259 struct journal_replay *i;
260 struct journal *j = &c->journal;
261 uint64_t last = j->seq;
262
263 /*
264 * journal.pin should never fill up - we never write a journal
265 * entry when it would fill up. But if for some reason it does, we
266 * iterate over the list in reverse order so that we can just skip that
267 * refcount instead of bugging.
268 */
269
270 list_for_each_entry_reverse(i, list, list) {
271 BUG_ON(last < i->j.seq);
272 i->pin = NULL;
273
274 while (last-- != i->j.seq)
275 if (fifo_free(&j->pin) > 1) {
276 fifo_push_front(&j->pin, p);
277 atomic_set(&fifo_front(&j->pin), 0);
278 }
279
280 if (fifo_free(&j->pin) > 1) {
281 fifo_push_front(&j->pin, p);
282 i->pin = &fifo_front(&j->pin);
283 atomic_set(i->pin, 1);
284 }
285
286 for (k = i->j.start;
287 k < end(&i->j);
288 k = bkey_next(k)) {
289 unsigned j;
290
291 for (j = 0; j < KEY_PTRS(k); j++) {
292 struct bucket *g = PTR_BUCKET(c, k, j);
293 atomic_inc(&g->pin);
294
295 if (g->prio == BTREE_PRIO &&
296 !ptr_stale(c, k, j))
297 g->prio = INITIAL_PRIO;
298 }
299
300 __bch_btree_mark_key(c, 0, k);
301 }
302 }
303}
304
Kent Overstreetc18536a2013-07-24 17:44:17 -0700305int bch_journal_replay(struct cache_set *s, struct list_head *list)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700306{
307 int ret = 0, keys = 0, entries = 0;
308 struct bkey *k;
309 struct journal_replay *i =
310 list_entry(list->prev, struct journal_replay, list);
311
312 uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
Kent Overstreet0b932072013-07-24 17:26:51 -0700313 struct keylist keylist;
314
315 bch_keylist_init(&keylist);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700316
317 list_for_each_entry(i, list, list) {
318 BUG_ON(i->pin && atomic_read(i->pin) != 1);
319
Kent Overstreet77c320e2013-07-11 19:42:51 -0700320 cache_set_err_on(n != i->j.seq, s,
321"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
322 n, i->j.seq - 1, start, end);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700323
324 for (k = i->j.start;
325 k < end(&i->j);
326 k = bkey_next(k)) {
Kent Overstreetc37511b2013-04-26 15:39:55 -0700327 trace_bcache_journal_replay_key(k);
328
Kent Overstreet0b932072013-07-24 17:26:51 -0700329 bkey_copy(keylist.top, k);
330 bch_keylist_push(&keylist);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700331
Kent Overstreetcc7b8812013-07-24 18:07:22 -0700332 ret = bch_btree_insert(s, &keylist, i->pin, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700333 if (ret)
334 goto err;
335
Kent Overstreet0b932072013-07-24 17:26:51 -0700336 BUG_ON(!bch_keylist_empty(&keylist));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700337 keys++;
338
339 cond_resched();
340 }
341
342 if (i->pin)
343 atomic_dec(i->pin);
344 n = i->j.seq + 1;
345 entries++;
346 }
347
348 pr_info("journal replay done, %i keys in %i entries, seq %llu",
349 keys, entries, end);
Kent Overstreetb54d6932013-07-24 18:04:18 -0700350err:
Kent Overstreetcafe5632013-03-23 16:11:31 -0700351 while (!list_empty(list)) {
352 i = list_first_entry(list, struct journal_replay, list);
353 list_del(&i->list);
354 kfree(i);
355 }
Kent Overstreetb54d6932013-07-24 18:04:18 -0700356
Kent Overstreetcafe5632013-03-23 16:11:31 -0700357 return ret;
358}
359
360/* Journalling */
361
362static void btree_flush_write(struct cache_set *c)
363{
364 /*
365 * Try to find the btree node with that references the oldest journal
366 * entry, best is our current candidate and is locked if non NULL:
367 */
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700368 struct btree *b, *best;
369 unsigned i;
370retry:
371 best = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700372
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700373 for_each_cached_btree(b, c, i)
374 if (btree_current_write(b)->journal) {
375 if (!best)
376 best = b;
377 else if (journal_pin_cmp(c,
Kent Overstreetc18536a2013-07-24 17:44:17 -0700378 btree_current_write(best)->journal,
379 btree_current_write(b)->journal)) {
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700380 best = b;
381 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700382 }
383
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700384 b = best;
385 if (b) {
386 rw_lock(true, b, b->level);
387
388 if (!btree_current_write(b)->journal) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700389 rw_unlock(true, b);
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700390 /* We raced */
391 goto retry;
392 }
393
394 bch_btree_node_write(b, NULL);
395 rw_unlock(true, b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700396 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700397}
398
399#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
400
401static void journal_discard_endio(struct bio *bio, int error)
402{
403 struct journal_device *ja =
404 container_of(bio, struct journal_device, discard_bio);
405 struct cache *ca = container_of(ja, struct cache, journal);
406
407 atomic_set(&ja->discard_in_flight, DISCARD_DONE);
408
409 closure_wake_up(&ca->set->journal.wait);
410 closure_put(&ca->set->cl);
411}
412
413static void journal_discard_work(struct work_struct *work)
414{
415 struct journal_device *ja =
416 container_of(work, struct journal_device, discard_work);
417
418 submit_bio(0, &ja->discard_bio);
419}
420
421static void do_journal_discard(struct cache *ca)
422{
423 struct journal_device *ja = &ca->journal;
424 struct bio *bio = &ja->discard_bio;
425
426 if (!ca->discard) {
427 ja->discard_idx = ja->last_idx;
428 return;
429 }
430
Kent Overstreet6d9d21e2013-09-23 23:17:27 -0700431 switch (atomic_read(&ja->discard_in_flight)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700432 case DISCARD_IN_FLIGHT:
433 return;
434
435 case DISCARD_DONE:
436 ja->discard_idx = (ja->discard_idx + 1) %
437 ca->sb.njournal_buckets;
438
439 atomic_set(&ja->discard_in_flight, DISCARD_READY);
440 /* fallthrough */
441
442 case DISCARD_READY:
443 if (ja->discard_idx == ja->last_idx)
444 return;
445
446 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
447
448 bio_init(bio);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700449 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
Kent Overstreetb1a67b02013-03-25 11:46:44 -0700450 ca->sb.d[ja->discard_idx]);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700451 bio->bi_bdev = ca->bdev;
452 bio->bi_rw = REQ_WRITE|REQ_DISCARD;
453 bio->bi_max_vecs = 1;
454 bio->bi_io_vec = bio->bi_inline_vecs;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700455 bio->bi_iter.bi_size = bucket_bytes(ca);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700456 bio->bi_end_io = journal_discard_endio;
457
458 closure_get(&ca->set->cl);
459 INIT_WORK(&ja->discard_work, journal_discard_work);
460 schedule_work(&ja->discard_work);
461 }
462}
463
464static void journal_reclaim(struct cache_set *c)
465{
466 struct bkey *k = &c->journal.key;
467 struct cache *ca;
468 uint64_t last_seq;
469 unsigned iter, n = 0;
470 atomic_t p;
471
472 while (!atomic_read(&fifo_front(&c->journal.pin)))
473 fifo_pop(&c->journal.pin, p);
474
475 last_seq = last_seq(&c->journal);
476
477 /* Update last_idx */
478
479 for_each_cache(ca, c, iter) {
480 struct journal_device *ja = &ca->journal;
481
482 while (ja->last_idx != ja->cur_idx &&
483 ja->seq[ja->last_idx] < last_seq)
484 ja->last_idx = (ja->last_idx + 1) %
485 ca->sb.njournal_buckets;
486 }
487
488 for_each_cache(ca, c, iter)
489 do_journal_discard(ca);
490
491 if (c->journal.blocks_free)
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700492 goto out;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700493
494 /*
495 * Allocate:
496 * XXX: Sort by free journal space
497 */
498
499 for_each_cache(ca, c, iter) {
500 struct journal_device *ja = &ca->journal;
501 unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
502
503 /* No space available on this device */
504 if (next == ja->discard_idx)
505 continue;
506
507 ja->cur_idx = next;
508 k->ptr[n++] = PTR(0,
509 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
510 ca->sb.nr_this_dev);
511 }
512
513 bkey_init(k);
514 SET_KEY_PTRS(k, n);
515
516 if (n)
517 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700518out:
Kent Overstreetcafe5632013-03-23 16:11:31 -0700519 if (!journal_full(&c->journal))
520 __closure_wake_up(&c->journal.wait);
521}
522
523void bch_journal_next(struct journal *j)
524{
525 atomic_t p = { 1 };
526
527 j->cur = (j->cur == j->w)
528 ? &j->w[1]
529 : &j->w[0];
530
531 /*
532 * The fifo_push() needs to happen at the same time as j->seq is
533 * incremented for last_seq() to be calculated correctly
534 */
535 BUG_ON(!fifo_push(&j->pin, p));
536 atomic_set(&fifo_back(&j->pin), 1);
537
538 j->cur->data->seq = ++j->seq;
539 j->cur->need_write = false;
540 j->cur->data->keys = 0;
541
542 if (fifo_full(&j->pin))
543 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
544}
545
546static void journal_write_endio(struct bio *bio, int error)
547{
548 struct journal_write *w = bio->bi_private;
549
550 cache_set_err_on(error, w->c, "journal io error");
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700551 closure_put(&w->c->journal.io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700552}
553
554static void journal_write(struct closure *);
555
556static void journal_write_done(struct closure *cl)
557{
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700558 struct journal *j = container_of(cl, struct journal, io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700559 struct journal_write *w = (j->cur == j->w)
560 ? &j->w[1]
561 : &j->w[0];
562
563 __closure_wake_up(&w->wait);
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700564 continue_at_nobarrier(cl, journal_write, system_wq);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700565}
566
567static void journal_write_unlocked(struct closure *cl)
Kent Overstreetc19ed232013-03-26 13:49:02 -0700568 __releases(c->journal.lock)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700569{
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700570 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700571 struct cache *ca;
572 struct journal_write *w = c->journal.cur;
573 struct bkey *k = &c->journal.key;
574 unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size;
575
576 struct bio *bio;
577 struct bio_list list;
578 bio_list_init(&list);
579
580 if (!w->need_write) {
581 /*
582 * XXX: have to unlock closure before we unlock journal lock,
583 * else we race with bch_journal(). But this way we race
584 * against cache set unregister. Doh.
585 */
586 set_closure_fn(cl, NULL, NULL);
587 closure_sub(cl, CLOSURE_RUNNING + 1);
588 spin_unlock(&c->journal.lock);
589 return;
590 } else if (journal_full(&c->journal)) {
591 journal_reclaim(c);
592 spin_unlock(&c->journal.lock);
593
594 btree_flush_write(c);
595 continue_at(cl, journal_write, system_wq);
596 }
597
598 c->journal.blocks_free -= set_blocks(w->data, c);
599
600 w->data->btree_level = c->root->level;
601
602 bkey_copy(&w->data->btree_root, &c->root->key);
603 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
604
605 for_each_cache(ca, c, i)
606 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
607
Kent Overstreet81ab4192013-10-31 15:46:42 -0700608 w->data->magic = jset_magic(&c->sb);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700609 w->data->version = BCACHE_JSET_VERSION;
610 w->data->last_seq = last_seq(&c->journal);
611 w->data->csum = csum_set(w->data);
612
613 for (i = 0; i < KEY_PTRS(k); i++) {
614 ca = PTR_CACHE(c, k, i);
615 bio = &ca->journal.bio;
616
617 atomic_long_add(sectors, &ca->meta_sectors_written);
618
619 bio_reset(bio);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700620 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700621 bio->bi_bdev = ca->bdev;
Kent Overstreete49c7c32013-06-26 17:25:38 -0700622 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700623 bio->bi_iter.bi_size = sectors << 9;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700624
625 bio->bi_end_io = journal_write_endio;
626 bio->bi_private = w;
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600627 bch_bio_map(bio, w->data);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700628
629 trace_bcache_journal_write(bio);
630 bio_list_add(&list, bio);
631
632 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
633
634 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
635 }
636
637 atomic_dec_bug(&fifo_back(&c->journal.pin));
638 bch_journal_next(&c->journal);
639 journal_reclaim(c);
640
641 spin_unlock(&c->journal.lock);
642
643 while ((bio = bio_list_pop(&list)))
644 closure_bio_submit(bio, cl, c->cache[0]);
645
646 continue_at(cl, journal_write_done, NULL);
647}
648
649static void journal_write(struct closure *cl)
650{
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700651 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700652
653 spin_lock(&c->journal.lock);
654 journal_write_unlocked(cl);
655}
656
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700657static void journal_try_write(struct cache_set *c)
Kent Overstreetc19ed232013-03-26 13:49:02 -0700658 __releases(c->journal.lock)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700659{
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700660 struct closure *cl = &c->journal.io;
661 struct journal_write *w = c->journal.cur;
662
663 w->need_write = true;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700664
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700665 if (closure_trylock(cl, &c->cl))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700666 journal_write_unlocked(cl);
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700667 else
668 spin_unlock(&c->journal.lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700669}
670
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700671static struct journal_write *journal_wait_for_write(struct cache_set *c,
672 unsigned nkeys)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700673{
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700674 size_t sectors;
675 struct closure cl;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700676
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700677 closure_init_stack(&cl);
678
679 spin_lock(&c->journal.lock);
680
681 while (1) {
682 struct journal_write *w = c->journal.cur;
683
684 sectors = __set_blocks(w->data, w->data->keys + nkeys,
685 c) * c->sb.block_size;
686
687 if (sectors <= min_t(size_t,
688 c->journal.blocks_free * c->sb.block_size,
689 PAGE_SECTORS << JSET_BITS))
690 return w;
691
692 /* XXX: tracepoint */
693 if (!journal_full(&c->journal)) {
694 trace_bcache_journal_entry_full(c);
695
696 /*
697 * XXX: If we were inserting so many keys that they
698 * won't fit in an _empty_ journal write, we'll
699 * deadlock. For now, handle this in
700 * bch_keylist_realloc() - but something to think about.
701 */
702 BUG_ON(!w->data->keys);
703
704 closure_wait(&w->wait, &cl);
705 journal_try_write(c); /* unlocks */
706 } else {
707 trace_bcache_journal_full(c);
708
709 closure_wait(&c->journal.wait, &cl);
710 journal_reclaim(c);
711 spin_unlock(&c->journal.lock);
712
713 btree_flush_write(c);
714 }
715
716 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700717 spin_lock(&c->journal.lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700718 }
719}
720
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700721static void journal_write_work(struct work_struct *work)
722{
723 struct cache_set *c = container_of(to_delayed_work(work),
724 struct cache_set,
725 journal.work);
726 spin_lock(&c->journal.lock);
727 journal_try_write(c);
728}
729
Kent Overstreetcafe5632013-03-23 16:11:31 -0700730/*
731 * Entry point to the journalling code - bio_insert() and btree_invalidate()
732 * pass bch_journal() a list of keys to be journalled, and then
733 * bch_journal() hands those same keys off to btree_insert_async()
734 */
735
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700736atomic_t *bch_journal(struct cache_set *c,
737 struct keylist *keys,
738 struct closure *parent)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700739{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700740 struct journal_write *w;
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700741 atomic_t *ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700742
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700743 if (!CACHE_SYNC(&c->sb))
744 return NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700745
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700746 w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700747
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700748 memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
749 w->data->keys += bch_keylist_nkeys(keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700750
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700751 ret = &fifo_back(&c->journal.pin);
752 atomic_inc(ret);
Kent Overstreetc37511b2013-04-26 15:39:55 -0700753
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700754 if (parent) {
755 closure_wait(&w->wait, parent);
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700756 journal_try_write(c);
757 } else if (!w->need_write) {
758 schedule_delayed_work(&c->journal.work,
759 msecs_to_jiffies(c->journal_delay_ms));
760 spin_unlock(&c->journal.lock);
761 } else {
762 spin_unlock(&c->journal.lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700763 }
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700764
765
766 return ret;
767}
768
769void bch_journal_meta(struct cache_set *c, struct closure *cl)
770{
771 struct keylist keys;
772 atomic_t *ref;
773
774 bch_keylist_init(&keys);
775
776 ref = bch_journal(c, &keys, cl);
777 if (ref)
778 atomic_dec_bug(ref);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700779}
780
781void bch_journal_free(struct cache_set *c)
782{
783 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
784 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
785 free_fifo(&c->journal.pin);
786}
787
788int bch_journal_alloc(struct cache_set *c)
789{
790 struct journal *j = &c->journal;
791
792 closure_init_unlocked(&j->io);
793 spin_lock_init(&j->lock);
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700794 INIT_DELAYED_WORK(&j->work, journal_write_work);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700795
796 c->journal_delay_ms = 100;
797
798 j->w[0].c = c;
799 j->w[1].c = c;
800
801 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
802 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
803 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
804 return -ENOMEM;
805
806 return 0;
807}