blob: 601c96a62b30a66f0b9a89626e399e2b4390196c [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * Moving/copying garbage collector
3 *
4 * Copyright 2012 Google, Inc.
5 */
6
7#include "bcache.h"
8#include "btree.h"
9#include "debug.h"
10#include "request.h"
11
Kent Overstreetc37511b2013-04-26 15:39:55 -070012#include <trace/events/bcache.h>
13
Kent Overstreetcafe5632013-03-23 16:11:31 -070014struct moving_io {
15 struct keybuf_key *w;
16 struct search s;
17 struct bbio bio;
18};
19
20static bool moving_pred(struct keybuf *buf, struct bkey *k)
21{
22 struct cache_set *c = container_of(buf, struct cache_set,
23 moving_gc_keys);
24 unsigned i;
25
26 for (i = 0; i < KEY_PTRS(k); i++) {
27 struct cache *ca = PTR_CACHE(c, k, i);
28 struct bucket *g = PTR_BUCKET(c, k, i);
29
30 if (GC_SECTORS_USED(g) < ca->gc_move_threshold)
31 return true;
32 }
33
34 return false;
35}
36
37/* Moving GC - IO loop */
38
39static void moving_io_destructor(struct closure *cl)
40{
41 struct moving_io *io = container_of(cl, struct moving_io, s.cl);
42 kfree(io);
43}
44
45static void write_moving_finish(struct closure *cl)
46{
47 struct moving_io *io = container_of(cl, struct moving_io, s.cl);
48 struct bio *bio = &io->bio.bio;
Kent Overstreet8e51e412013-06-06 18:15:57 -070049 struct bio_vec *bv;
50 int i;
Kent Overstreetcafe5632013-03-23 16:11:31 -070051
Kent Overstreet8e51e412013-06-06 18:15:57 -070052 bio_for_each_segment_all(bv, bio, i)
Kent Overstreetcafe5632013-03-23 16:11:31 -070053 __free_page(bv->bv_page);
54
Kent Overstreet6054c6d2013-07-24 18:06:22 -070055 if (io->s.insert_collision)
Kent Overstreetc37511b2013-04-26 15:39:55 -070056 trace_bcache_gc_copy_collision(&io->w->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -070057
Kent Overstreetc18536a2013-07-24 17:44:17 -070058 bch_keybuf_del(&io->s.c->moving_gc_keys, io->w);
Kent Overstreetcafe5632013-03-23 16:11:31 -070059
Kent Overstreetc18536a2013-07-24 17:44:17 -070060 up(&io->s.c->moving_in_flight);
Kent Overstreetcafe5632013-03-23 16:11:31 -070061
62 closure_return_with_destructor(cl, moving_io_destructor);
63}
64
65static void read_moving_endio(struct bio *bio, int error)
66{
67 struct moving_io *io = container_of(bio->bi_private,
68 struct moving_io, s.cl);
69
70 if (error)
71 io->s.error = error;
72
Kent Overstreetc18536a2013-07-24 17:44:17 -070073 bch_bbio_endio(io->s.c, bio, error, "reading data to move");
Kent Overstreetcafe5632013-03-23 16:11:31 -070074}
75
76static void moving_init(struct moving_io *io)
77{
78 struct bio *bio = &io->bio.bio;
79
80 bio_init(bio);
81 bio_get(bio);
82 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
83
84 bio->bi_size = KEY_SIZE(&io->w->key) << 9;
85 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
86 PAGE_SECTORS);
87 bio->bi_private = &io->s.cl;
88 bio->bi_io_vec = bio->bi_inline_vecs;
Kent Overstreet169ef1c2013-03-28 12:50:55 -060089 bch_bio_map(bio, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -070090}
91
92static void write_moving(struct closure *cl)
93{
94 struct search *s = container_of(cl, struct search, cl);
95 struct moving_io *io = container_of(s, struct moving_io, s);
96
97 if (!s->error) {
Kent Overstreetcafe5632013-03-23 16:11:31 -070098 moving_init(io);
99
100 io->bio.bio.bi_sector = KEY_START(&io->w->key);
101 s->op.lock = -1;
Kent Overstreetc18536a2013-07-24 17:44:17 -0700102 s->write_prio = 1;
103 s->cache_bio = &io->bio.bio;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700104
105 s->writeback = KEY_DIRTY(&io->w->key);
Kent Overstreetc18536a2013-07-24 17:44:17 -0700106 s->csum = KEY_CSUM(&io->w->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700107
Kent Overstreet1b207d82013-09-10 18:52:54 -0700108 bkey_copy(&s->replace_key, &io->w->key);
109 s->replace = true;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700110
Kent Overstreetb54d6932013-07-24 18:04:18 -0700111 closure_init(&s->btree, cl);
112 bch_data_insert(&s->btree);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700113 }
114
Kent Overstreet72a44512013-10-24 17:19:26 -0700115 continue_at(cl, write_moving_finish, system_wq);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700116}
117
118static void read_moving_submit(struct closure *cl)
119{
120 struct search *s = container_of(cl, struct search, cl);
121 struct moving_io *io = container_of(s, struct moving_io, s);
122 struct bio *bio = &io->bio.bio;
123
Kent Overstreetc18536a2013-07-24 17:44:17 -0700124 bch_submit_bbio(bio, s->c, &io->w->key, 0);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700125
Kent Overstreet72a44512013-10-24 17:19:26 -0700126 continue_at(cl, write_moving, system_wq);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700127}
128
Kent Overstreet72a44512013-10-24 17:19:26 -0700129static void read_moving(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700130{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700131 struct keybuf_key *w;
132 struct moving_io *io;
133 struct bio *bio;
Kent Overstreet72a44512013-10-24 17:19:26 -0700134 struct closure cl;
135
136 closure_init_stack(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700137
138 /* XXX: if we error, background writeback could stall indefinitely */
139
140 while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
Kent Overstreet72c27062013-06-05 06:24:39 -0700141 w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
142 &MAX_KEY, moving_pred);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700143 if (!w)
144 break;
145
146 io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
147 * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
148 GFP_KERNEL);
149 if (!io)
150 goto err;
151
152 w->private = io;
153 io->w = w;
Kent Overstreetc18536a2013-07-24 17:44:17 -0700154 io->s.inode = KEY_INODE(&w->key);
155 io->s.c = c;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700156
157 moving_init(io);
158 bio = &io->bio.bio;
159
160 bio->bi_rw = READ;
161 bio->bi_end_io = read_moving_endio;
162
Kent Overstreet8e51e412013-06-06 18:15:57 -0700163 if (bio_alloc_pages(bio, GFP_KERNEL))
Kent Overstreetcafe5632013-03-23 16:11:31 -0700164 goto err;
165
Kent Overstreetc37511b2013-04-26 15:39:55 -0700166 trace_bcache_gc_copy(&w->key);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700167
Kent Overstreet72a44512013-10-24 17:19:26 -0700168 down(&c->moving_in_flight);
169 closure_call(&io->s.cl, read_moving_submit, NULL, &cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700170 }
171
172 if (0) {
173err: if (!IS_ERR_OR_NULL(w->private))
174 kfree(w->private);
175
176 bch_keybuf_del(&c->moving_gc_keys, w);
177 }
178
Kent Overstreet72a44512013-10-24 17:19:26 -0700179 closure_sync(&cl);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700180}
181
Kent Overstreetb1a67b02013-03-25 11:46:44 -0700182static bool bucket_cmp(struct bucket *l, struct bucket *r)
183{
184 return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
185}
186
187static unsigned bucket_heap_top(struct cache *ca)
188{
189 return GC_SECTORS_USED(heap_peek(&ca->heap));
190}
191
Kent Overstreet72a44512013-10-24 17:19:26 -0700192void bch_moving_gc(struct cache_set *c)
Kent Overstreetcafe5632013-03-23 16:11:31 -0700193{
Kent Overstreetcafe5632013-03-23 16:11:31 -0700194 struct cache *ca;
195 struct bucket *b;
196 unsigned i;
197
Kent Overstreetcafe5632013-03-23 16:11:31 -0700198 if (!c->copy_gc_enabled)
Kent Overstreet72a44512013-10-24 17:19:26 -0700199 return;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700200
201 mutex_lock(&c->bucket_lock);
202
203 for_each_cache(ca, c, i) {
204 unsigned sectors_to_move = 0;
205 unsigned reserve_sectors = ca->sb.bucket_size *
206 min(fifo_used(&ca->free), ca->free.size / 2);
207
208 ca->heap.used = 0;
209
210 for_each_bucket(b, ca) {
211 if (!GC_SECTORS_USED(b))
212 continue;
213
214 if (!heap_full(&ca->heap)) {
215 sectors_to_move += GC_SECTORS_USED(b);
216 heap_add(&ca->heap, b, bucket_cmp);
217 } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
Kent Overstreetb1a67b02013-03-25 11:46:44 -0700218 sectors_to_move -= bucket_heap_top(ca);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700219 sectors_to_move += GC_SECTORS_USED(b);
220
221 ca->heap.data[0] = b;
222 heap_sift(&ca->heap, 0, bucket_cmp);
223 }
224 }
225
226 while (sectors_to_move > reserve_sectors) {
227 heap_pop(&ca->heap, b, bucket_cmp);
228 sectors_to_move -= GC_SECTORS_USED(b);
229 }
230
Kent Overstreetb1a67b02013-03-25 11:46:44 -0700231 ca->gc_move_threshold = bucket_heap_top(ca);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700232
233 pr_debug("threshold %u", ca->gc_move_threshold);
234 }
235
236 mutex_unlock(&c->bucket_lock);
237
238 c->moving_gc_keys.last_scanned = ZERO_KEY;
239
Kent Overstreet72a44512013-10-24 17:19:26 -0700240 read_moving(c);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700241}
242
243void bch_moving_init_cache_set(struct cache_set *c)
244{
Kent Overstreet72c27062013-06-05 06:24:39 -0700245 bch_keybuf_init(&c->moving_gc_keys);
Kent Overstreet72a44512013-10-24 17:19:26 -0700246 sema_init(&c->moving_in_flight, 64);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700247}