blob: 725c8eb9a62a6d2d1b1aa8410f6a8ee185c76071 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * bcache journalling code, for btree insertions
3 *
4 * Copyright 2012 Google, Inc.
5 */
6
7#include "bcache.h"
8#include "btree.h"
9#include "debug.h"
Kent Overstreetcafe5632013-03-23 16:11:31 -070010
Kent Overstreetc37511b2013-04-26 15:39:55 -070011#include <trace/events/bcache.h>
12
Kent Overstreetcafe5632013-03-23 16:11:31 -070013/*
14 * Journal replay/recovery:
15 *
16 * This code is all driven from run_cache_set(); we first read the journal
17 * entries, do some other stuff, then we mark all the keys in the journal
18 * entries (same as garbage collection would), then we replay them - reinserting
19 * them into the cache in precisely the same order as they appear in the
20 * journal.
21 *
22 * We only journal keys that go in leaf nodes, which simplifies things quite a
23 * bit.
24 */
25
26static void journal_read_endio(struct bio *bio, int error)
27{
28 struct closure *cl = bio->bi_private;
29 closure_put(cl);
30}
31
32static int journal_read_bucket(struct cache *ca, struct list_head *list,
Kent Overstreetc18536a2013-07-24 17:44:17 -070033 unsigned bucket_index)
Kent Overstreetcafe5632013-03-23 16:11:31 -070034{
35 struct journal_device *ja = &ca->journal;
36 struct bio *bio = &ja->bio;
37
38 struct journal_replay *i;
39 struct jset *j, *data = ca->set->journal.w[0].data;
Kent Overstreetc18536a2013-07-24 17:44:17 -070040 struct closure cl;
Kent Overstreetcafe5632013-03-23 16:11:31 -070041 unsigned len, left, offset = 0;
42 int ret = 0;
43 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
44
Kent Overstreetc18536a2013-07-24 17:44:17 -070045 closure_init_stack(&cl);
46
Kent Overstreetcafe5632013-03-23 16:11:31 -070047 pr_debug("reading %llu", (uint64_t) bucket);
48
49 while (offset < ca->sb.bucket_size) {
50reread: left = ca->sb.bucket_size - offset;
51 len = min_t(unsigned, left, PAGE_SECTORS * 8);
52
53 bio_reset(bio);
54 bio->bi_sector = bucket + offset;
55 bio->bi_bdev = ca->bdev;
56 bio->bi_rw = READ;
57 bio->bi_size = len << 9;
58
59 bio->bi_end_io = journal_read_endio;
Kent Overstreetc18536a2013-07-24 17:44:17 -070060 bio->bi_private = &cl;
Kent Overstreet169ef1c2013-03-28 12:50:55 -060061 bch_bio_map(bio, data);
Kent Overstreetcafe5632013-03-23 16:11:31 -070062
Kent Overstreetc18536a2013-07-24 17:44:17 -070063 closure_bio_submit(bio, &cl, ca);
64 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -070065
66 /* This function could be simpler now since we no longer write
67 * journal entries that overlap bucket boundaries; this means
68 * the start of a bucket will always have a valid journal entry
69 * if it has any journal entries at all.
70 */
71
72 j = data;
73 while (len) {
74 struct list_head *where;
75 size_t blocks, bytes = set_bytes(j);
76
77 if (j->magic != jset_magic(ca->set))
78 return ret;
79
80 if (bytes > left << 9)
81 return ret;
82
83 if (bytes > len << 9)
84 goto reread;
85
86 if (j->csum != csum_set(j))
87 return ret;
88
89 blocks = set_blocks(j, ca->set);
90
91 while (!list_empty(list)) {
92 i = list_first_entry(list,
93 struct journal_replay, list);
94 if (i->j.seq >= j->last_seq)
95 break;
96 list_del(&i->list);
97 kfree(i);
98 }
99
100 list_for_each_entry_reverse(i, list, list) {
101 if (j->seq == i->j.seq)
102 goto next_set;
103
104 if (j->seq < i->j.last_seq)
105 goto next_set;
106
107 if (j->seq > i->j.seq) {
108 where = &i->list;
109 goto add;
110 }
111 }
112
113 where = list;
114add:
115 i = kmalloc(offsetof(struct journal_replay, j) +
116 bytes, GFP_KERNEL);
117 if (!i)
118 return -ENOMEM;
119 memcpy(&i->j, j, bytes);
120 list_add(&i->list, where);
121 ret = 1;
122
123 ja->seq[bucket_index] = j->seq;
124next_set:
125 offset += blocks * ca->sb.block_size;
126 len -= blocks * ca->sb.block_size;
127 j = ((void *) j) + blocks * block_bytes(ca);
128 }
129 }
130
131 return ret;
132}
133
Kent Overstreetc18536a2013-07-24 17:44:17 -0700134int bch_journal_read(struct cache_set *c, struct list_head *list)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700135{
136#define read_bucket(b) \
137 ({ \
Kent Overstreetc18536a2013-07-24 17:44:17 -0700138 int ret = journal_read_bucket(ca, list, b); \
Kent Overstreetcafe5632013-03-23 16:11:31 -0700139 __set_bit(b, bitmap); \
140 if (ret < 0) \
141 return ret; \
142 ret; \
143 })
144
145 struct cache *ca;
146 unsigned iter;
147
148 for_each_cache(ca, c, iter) {
149 struct journal_device *ja = &ca->journal;
150 unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG];
151 unsigned i, l, r, m;
152 uint64_t seq;
153
154 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
155 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
156
Kent Overstreetc426c4f2013-09-23 23:17:29 -0700157 /*
158 * Read journal buckets ordered by golden ratio hash to quickly
Kent Overstreetcafe5632013-03-23 16:11:31 -0700159 * find a sequence of buckets with valid journal entries
160 */
161 for (i = 0; i < ca->sb.njournal_buckets; i++) {
162 l = (i * 2654435769U) % ca->sb.njournal_buckets;
163
164 if (test_bit(l, bitmap))
165 break;
166
167 if (read_bucket(l))
168 goto bsearch;
169 }
170
Kent Overstreetc426c4f2013-09-23 23:17:29 -0700171 /*
172 * If that fails, check all the buckets we haven't checked
Kent Overstreetcafe5632013-03-23 16:11:31 -0700173 * already
174 */
175 pr_debug("falling back to linear search");
176
Kent Overstreetc426c4f2013-09-23 23:17:29 -0700177 for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
178 l < ca->sb.njournal_buckets;
179 l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700180 if (read_bucket(l))
181 goto bsearch;
Kent Overstreetc426c4f2013-09-23 23:17:29 -0700182
183 if (list_empty(list))
184 continue;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700185bsearch:
186 /* Binary search */
187 m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
188 pr_debug("starting binary search, l %u r %u", l, r);
189
190 while (l + 1 < r) {
Kent Overstreetfaa56732013-07-11 22:42:14 -0700191 seq = list_entry(list->prev, struct journal_replay,
192 list)->j.seq;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700193
Kent Overstreetfaa56732013-07-11 22:42:14 -0700194 m = (l + r) >> 1;
195 read_bucket(m);
196
197 if (seq != list_entry(list->prev, struct journal_replay,
198 list)->j.seq)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700199 l = m;
200 else
201 r = m;
202 }
203
Kent Overstreetc426c4f2013-09-23 23:17:29 -0700204 /*
205 * Read buckets in reverse order until we stop finding more
Kent Overstreetcafe5632013-03-23 16:11:31 -0700206 * journal entries
207 */
Kent Overstreetc426c4f2013-09-23 23:17:29 -0700208 pr_debug("finishing up: m %u njournal_buckets %u",
209 m, ca->sb.njournal_buckets);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700210 l = m;
211
212 while (1) {
213 if (!l--)
214 l = ca->sb.njournal_buckets - 1;
215
216 if (l == m)
217 break;
218
219 if (test_bit(l, bitmap))
220 continue;
221
222 if (!read_bucket(l))
223 break;
224 }
225
226 seq = 0;
227
228 for (i = 0; i < ca->sb.njournal_buckets; i++)
229 if (ja->seq[i] > seq) {
230 seq = ja->seq[i];
231 ja->cur_idx = ja->discard_idx =
232 ja->last_idx = i;
233
234 }
235 }
236
Kent Overstreetc426c4f2013-09-23 23:17:29 -0700237 if (!list_empty(list))
238 c->journal.seq = list_entry(list->prev,
239 struct journal_replay,
240 list)->j.seq;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700241
242 return 0;
243#undef read_bucket
244}
245
246void bch_journal_mark(struct cache_set *c, struct list_head *list)
247{
248 atomic_t p = { 0 };
249 struct bkey *k;
250 struct journal_replay *i;
251 struct journal *j = &c->journal;
252 uint64_t last = j->seq;
253
254 /*
255 * journal.pin should never fill up - we never write a journal
256 * entry when it would fill up. But if for some reason it does, we
257 * iterate over the list in reverse order so that we can just skip that
258 * refcount instead of bugging.
259 */
260
261 list_for_each_entry_reverse(i, list, list) {
262 BUG_ON(last < i->j.seq);
263 i->pin = NULL;
264
265 while (last-- != i->j.seq)
266 if (fifo_free(&j->pin) > 1) {
267 fifo_push_front(&j->pin, p);
268 atomic_set(&fifo_front(&j->pin), 0);
269 }
270
271 if (fifo_free(&j->pin) > 1) {
272 fifo_push_front(&j->pin, p);
273 i->pin = &fifo_front(&j->pin);
274 atomic_set(i->pin, 1);
275 }
276
277 for (k = i->j.start;
278 k < end(&i->j);
279 k = bkey_next(k)) {
280 unsigned j;
281
282 for (j = 0; j < KEY_PTRS(k); j++) {
283 struct bucket *g = PTR_BUCKET(c, k, j);
284 atomic_inc(&g->pin);
285
286 if (g->prio == BTREE_PRIO &&
287 !ptr_stale(c, k, j))
288 g->prio = INITIAL_PRIO;
289 }
290
291 __bch_btree_mark_key(c, 0, k);
292 }
293 }
294}
295
Kent Overstreetc18536a2013-07-24 17:44:17 -0700296int bch_journal_replay(struct cache_set *s, struct list_head *list)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700297{
298 int ret = 0, keys = 0, entries = 0;
299 struct bkey *k;
300 struct journal_replay *i =
301 list_entry(list->prev, struct journal_replay, list);
302
303 uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
Kent Overstreet0b932072013-07-24 17:26:51 -0700304 struct keylist keylist;
Kent Overstreetc18536a2013-07-24 17:44:17 -0700305 struct btree_op op;
Kent Overstreet0b932072013-07-24 17:26:51 -0700306
307 bch_keylist_init(&keylist);
Kent Overstreetc18536a2013-07-24 17:44:17 -0700308 bch_btree_op_init_stack(&op);
309 op.lock = SHRT_MAX;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700310
311 list_for_each_entry(i, list, list) {
312 BUG_ON(i->pin && atomic_read(i->pin) != 1);
313
Kent Overstreet77c320e2013-07-11 19:42:51 -0700314 cache_set_err_on(n != i->j.seq, s,
315"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
316 n, i->j.seq - 1, start, end);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700317
318 for (k = i->j.start;
319 k < end(&i->j);
320 k = bkey_next(k)) {
Kent Overstreetc37511b2013-04-26 15:39:55 -0700321 trace_bcache_journal_replay_key(k);
322
Kent Overstreet0b932072013-07-24 17:26:51 -0700323 bkey_copy(keylist.top, k);
324 bch_keylist_push(&keylist);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700325
Kent Overstreetc18536a2013-07-24 17:44:17 -0700326 ret = bch_btree_insert(&op, s, &keylist, i->pin);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700327 if (ret)
328 goto err;
329
Kent Overstreet0b932072013-07-24 17:26:51 -0700330 BUG_ON(!bch_keylist_empty(&keylist));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700331 keys++;
332
333 cond_resched();
334 }
335
336 if (i->pin)
337 atomic_dec(i->pin);
338 n = i->j.seq + 1;
339 entries++;
340 }
341
342 pr_info("journal replay done, %i keys in %i entries, seq %llu",
343 keys, entries, end);
344
345 while (!list_empty(list)) {
346 i = list_first_entry(list, struct journal_replay, list);
347 list_del(&i->list);
348 kfree(i);
349 }
350err:
Kent Overstreetc18536a2013-07-24 17:44:17 -0700351 closure_sync(&op.cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700352 return ret;
353}
354
355/* Journalling */
356
357static void btree_flush_write(struct cache_set *c)
358{
359 /*
360 * Try to find the btree node with that references the oldest journal
361 * entry, best is our current candidate and is locked if non NULL:
362 */
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700363 struct btree *b, *best;
364 unsigned i;
365retry:
366 best = NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700367
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700368 for_each_cached_btree(b, c, i)
369 if (btree_current_write(b)->journal) {
370 if (!best)
371 best = b;
372 else if (journal_pin_cmp(c,
Kent Overstreetc18536a2013-07-24 17:44:17 -0700373 btree_current_write(best)->journal,
374 btree_current_write(b)->journal)) {
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700375 best = b;
376 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700377 }
378
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700379 b = best;
380 if (b) {
381 rw_lock(true, b, b->level);
382
383 if (!btree_current_write(b)->journal) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700384 rw_unlock(true, b);
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700385 /* We raced */
386 goto retry;
387 }
388
389 bch_btree_node_write(b, NULL);
390 rw_unlock(true, b);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700391 }
Kent Overstreetcafe5632013-03-23 16:11:31 -0700392}
393
394#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
395
396static void journal_discard_endio(struct bio *bio, int error)
397{
398 struct journal_device *ja =
399 container_of(bio, struct journal_device, discard_bio);
400 struct cache *ca = container_of(ja, struct cache, journal);
401
402 atomic_set(&ja->discard_in_flight, DISCARD_DONE);
403
404 closure_wake_up(&ca->set->journal.wait);
405 closure_put(&ca->set->cl);
406}
407
408static void journal_discard_work(struct work_struct *work)
409{
410 struct journal_device *ja =
411 container_of(work, struct journal_device, discard_work);
412
413 submit_bio(0, &ja->discard_bio);
414}
415
416static void do_journal_discard(struct cache *ca)
417{
418 struct journal_device *ja = &ca->journal;
419 struct bio *bio = &ja->discard_bio;
420
421 if (!ca->discard) {
422 ja->discard_idx = ja->last_idx;
423 return;
424 }
425
Kent Overstreet6d9d21e2013-09-23 23:17:27 -0700426 switch (atomic_read(&ja->discard_in_flight)) {
Kent Overstreetcafe5632013-03-23 16:11:31 -0700427 case DISCARD_IN_FLIGHT:
428 return;
429
430 case DISCARD_DONE:
431 ja->discard_idx = (ja->discard_idx + 1) %
432 ca->sb.njournal_buckets;
433
434 atomic_set(&ja->discard_in_flight, DISCARD_READY);
435 /* fallthrough */
436
437 case DISCARD_READY:
438 if (ja->discard_idx == ja->last_idx)
439 return;
440
441 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
442
443 bio_init(bio);
444 bio->bi_sector = bucket_to_sector(ca->set,
Kent Overstreetb1a67b02013-03-25 11:46:44 -0700445 ca->sb.d[ja->discard_idx]);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700446 bio->bi_bdev = ca->bdev;
447 bio->bi_rw = REQ_WRITE|REQ_DISCARD;
448 bio->bi_max_vecs = 1;
449 bio->bi_io_vec = bio->bi_inline_vecs;
450 bio->bi_size = bucket_bytes(ca);
451 bio->bi_end_io = journal_discard_endio;
452
453 closure_get(&ca->set->cl);
454 INIT_WORK(&ja->discard_work, journal_discard_work);
455 schedule_work(&ja->discard_work);
456 }
457}
458
459static void journal_reclaim(struct cache_set *c)
460{
461 struct bkey *k = &c->journal.key;
462 struct cache *ca;
463 uint64_t last_seq;
464 unsigned iter, n = 0;
465 atomic_t p;
466
467 while (!atomic_read(&fifo_front(&c->journal.pin)))
468 fifo_pop(&c->journal.pin, p);
469
470 last_seq = last_seq(&c->journal);
471
472 /* Update last_idx */
473
474 for_each_cache(ca, c, iter) {
475 struct journal_device *ja = &ca->journal;
476
477 while (ja->last_idx != ja->cur_idx &&
478 ja->seq[ja->last_idx] < last_seq)
479 ja->last_idx = (ja->last_idx + 1) %
480 ca->sb.njournal_buckets;
481 }
482
483 for_each_cache(ca, c, iter)
484 do_journal_discard(ca);
485
486 if (c->journal.blocks_free)
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700487 goto out;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700488
489 /*
490 * Allocate:
491 * XXX: Sort by free journal space
492 */
493
494 for_each_cache(ca, c, iter) {
495 struct journal_device *ja = &ca->journal;
496 unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
497
498 /* No space available on this device */
499 if (next == ja->discard_idx)
500 continue;
501
502 ja->cur_idx = next;
503 k->ptr[n++] = PTR(0,
504 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
505 ca->sb.nr_this_dev);
506 }
507
508 bkey_init(k);
509 SET_KEY_PTRS(k, n);
510
511 if (n)
512 c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700513out:
Kent Overstreetcafe5632013-03-23 16:11:31 -0700514 if (!journal_full(&c->journal))
515 __closure_wake_up(&c->journal.wait);
516}
517
518void bch_journal_next(struct journal *j)
519{
520 atomic_t p = { 1 };
521
522 j->cur = (j->cur == j->w)
523 ? &j->w[1]
524 : &j->w[0];
525
526 /*
527 * The fifo_push() needs to happen at the same time as j->seq is
528 * incremented for last_seq() to be calculated correctly
529 */
530 BUG_ON(!fifo_push(&j->pin, p));
531 atomic_set(&fifo_back(&j->pin), 1);
532
533 j->cur->data->seq = ++j->seq;
534 j->cur->need_write = false;
535 j->cur->data->keys = 0;
536
537 if (fifo_full(&j->pin))
538 pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
539}
540
541static void journal_write_endio(struct bio *bio, int error)
542{
543 struct journal_write *w = bio->bi_private;
544
545 cache_set_err_on(error, w->c, "journal io error");
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700546 closure_put(&w->c->journal.io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700547}
548
549static void journal_write(struct closure *);
550
551static void journal_write_done(struct closure *cl)
552{
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700553 struct journal *j = container_of(cl, struct journal, io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700554 struct journal_write *w = (j->cur == j->w)
555 ? &j->w[1]
556 : &j->w[0];
557
558 __closure_wake_up(&w->wait);
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700559 continue_at_nobarrier(cl, journal_write, system_wq);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700560}
561
562static void journal_write_unlocked(struct closure *cl)
Kent Overstreetc19ed232013-03-26 13:49:02 -0700563 __releases(c->journal.lock)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700564{
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700565 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700566 struct cache *ca;
567 struct journal_write *w = c->journal.cur;
568 struct bkey *k = &c->journal.key;
569 unsigned i, sectors = set_blocks(w->data, c) * c->sb.block_size;
570
571 struct bio *bio;
572 struct bio_list list;
573 bio_list_init(&list);
574
575 if (!w->need_write) {
576 /*
577 * XXX: have to unlock closure before we unlock journal lock,
578 * else we race with bch_journal(). But this way we race
579 * against cache set unregister. Doh.
580 */
581 set_closure_fn(cl, NULL, NULL);
582 closure_sub(cl, CLOSURE_RUNNING + 1);
583 spin_unlock(&c->journal.lock);
584 return;
585 } else if (journal_full(&c->journal)) {
586 journal_reclaim(c);
587 spin_unlock(&c->journal.lock);
588
589 btree_flush_write(c);
590 continue_at(cl, journal_write, system_wq);
591 }
592
593 c->journal.blocks_free -= set_blocks(w->data, c);
594
595 w->data->btree_level = c->root->level;
596
597 bkey_copy(&w->data->btree_root, &c->root->key);
598 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
599
600 for_each_cache(ca, c, i)
601 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
602
603 w->data->magic = jset_magic(c);
604 w->data->version = BCACHE_JSET_VERSION;
605 w->data->last_seq = last_seq(&c->journal);
606 w->data->csum = csum_set(w->data);
607
608 for (i = 0; i < KEY_PTRS(k); i++) {
609 ca = PTR_CACHE(c, k, i);
610 bio = &ca->journal.bio;
611
612 atomic_long_add(sectors, &ca->meta_sectors_written);
613
614 bio_reset(bio);
615 bio->bi_sector = PTR_OFFSET(k, i);
616 bio->bi_bdev = ca->bdev;
Kent Overstreete49c7c32013-06-26 17:25:38 -0700617 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700618 bio->bi_size = sectors << 9;
619
620 bio->bi_end_io = journal_write_endio;
621 bio->bi_private = w;
Kent Overstreet169ef1c2013-03-28 12:50:55 -0600622 bch_bio_map(bio, w->data);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700623
624 trace_bcache_journal_write(bio);
625 bio_list_add(&list, bio);
626
627 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
628
629 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
630 }
631
632 atomic_dec_bug(&fifo_back(&c->journal.pin));
633 bch_journal_next(&c->journal);
634 journal_reclaim(c);
635
636 spin_unlock(&c->journal.lock);
637
638 while ((bio = bio_list_pop(&list)))
639 closure_bio_submit(bio, cl, c->cache[0]);
640
641 continue_at(cl, journal_write_done, NULL);
642}
643
644static void journal_write(struct closure *cl)
645{
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700646 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700647
648 spin_lock(&c->journal.lock);
649 journal_write_unlocked(cl);
650}
651
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700652static void journal_try_write(struct cache_set *c)
Kent Overstreetc19ed232013-03-26 13:49:02 -0700653 __releases(c->journal.lock)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700654{
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700655 struct closure *cl = &c->journal.io;
656 struct journal_write *w = c->journal.cur;
657
658 w->need_write = true;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700659
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700660 if (closure_trylock(cl, &c->cl))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700661 journal_write_unlocked(cl);
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700662 else
663 spin_unlock(&c->journal.lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700664}
665
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700666static struct journal_write *journal_wait_for_write(struct cache_set *c,
667 unsigned nkeys)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700668{
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700669 size_t sectors;
670 struct closure cl;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700671
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700672 closure_init_stack(&cl);
673
674 spin_lock(&c->journal.lock);
675
676 while (1) {
677 struct journal_write *w = c->journal.cur;
678
679 sectors = __set_blocks(w->data, w->data->keys + nkeys,
680 c) * c->sb.block_size;
681
682 if (sectors <= min_t(size_t,
683 c->journal.blocks_free * c->sb.block_size,
684 PAGE_SECTORS << JSET_BITS))
685 return w;
686
687 /* XXX: tracepoint */
688 if (!journal_full(&c->journal)) {
689 trace_bcache_journal_entry_full(c);
690
691 /*
692 * XXX: If we were inserting so many keys that they
693 * won't fit in an _empty_ journal write, we'll
694 * deadlock. For now, handle this in
695 * bch_keylist_realloc() - but something to think about.
696 */
697 BUG_ON(!w->data->keys);
698
699 closure_wait(&w->wait, &cl);
700 journal_try_write(c); /* unlocks */
701 } else {
702 trace_bcache_journal_full(c);
703
704 closure_wait(&c->journal.wait, &cl);
705 journal_reclaim(c);
706 spin_unlock(&c->journal.lock);
707
708 btree_flush_write(c);
709 }
710
711 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700712 spin_lock(&c->journal.lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700713 }
714}
715
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700716static void journal_write_work(struct work_struct *work)
717{
718 struct cache_set *c = container_of(to_delayed_work(work),
719 struct cache_set,
720 journal.work);
721 spin_lock(&c->journal.lock);
722 journal_try_write(c);
723}
724
Kent Overstreetcafe5632013-03-23 16:11:31 -0700725/*
726 * Entry point to the journalling code - bio_insert() and btree_invalidate()
727 * pass bch_journal() a list of keys to be journalled, and then
728 * bch_journal() hands those same keys off to btree_insert_async()
729 */
730
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700731atomic_t *bch_journal(struct cache_set *c,
732 struct keylist *keys,
733 struct closure *parent)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700734{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700735 struct journal_write *w;
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700736 atomic_t *ret;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700737
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700738 if (!CACHE_SYNC(&c->sb))
739 return NULL;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700740
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700741 w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
Kent Overstreetcafe5632013-03-23 16:11:31 -0700742
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700743 memcpy(end(w->data), keys->keys, bch_keylist_bytes(keys));
744 w->data->keys += bch_keylist_nkeys(keys);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700745
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700746 ret = &fifo_back(&c->journal.pin);
747 atomic_inc(ret);
Kent Overstreetc37511b2013-04-26 15:39:55 -0700748
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700749 if (parent) {
750 closure_wait(&w->wait, parent);
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700751 journal_try_write(c);
752 } else if (!w->need_write) {
753 schedule_delayed_work(&c->journal.work,
754 msecs_to_jiffies(c->journal_delay_ms));
755 spin_unlock(&c->journal.lock);
756 } else {
757 spin_unlock(&c->journal.lock);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700758 }
Kent Overstreeta34a8bf2013-10-24 17:07:04 -0700759
760
761 return ret;
762}
763
764void bch_journal_meta(struct cache_set *c, struct closure *cl)
765{
766 struct keylist keys;
767 atomic_t *ref;
768
769 bch_keylist_init(&keys);
770
771 ref = bch_journal(c, &keys, cl);
772 if (ref)
773 atomic_dec_bug(ref);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700774}
775
776void bch_journal_free(struct cache_set *c)
777{
778 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
779 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
780 free_fifo(&c->journal.pin);
781}
782
783int bch_journal_alloc(struct cache_set *c)
784{
785 struct journal *j = &c->journal;
786
787 closure_init_unlocked(&j->io);
788 spin_lock_init(&j->lock);
Kent Overstreet7857d5d42013-10-08 15:50:46 -0700789 INIT_DELAYED_WORK(&j->work, journal_write_work);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700790
791 c->journal_delay_ms = 100;
792
793 j->w[0].c = c;
794 j->w[1].c = c;
795
796 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
797 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
798 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
799 return -ENOMEM;
800
801 return 0;
802}