Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Moving/copying garbage collector |
| 3 | * |
| 4 | * Copyright 2012 Google, Inc. |
| 5 | */ |
| 6 | |
| 7 | #include "bcache.h" |
| 8 | #include "btree.h" |
| 9 | #include "debug.h" |
| 10 | #include "request.h" |
| 11 | |
Kent Overstreet | c37511b | 2013-04-26 15:39:55 -0700 | [diff] [blame] | 12 | #include <trace/events/bcache.h> |
| 13 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 14 | struct moving_io { |
| 15 | struct keybuf_key *w; |
| 16 | struct search s; |
| 17 | struct bbio bio; |
| 18 | }; |
| 19 | |
| 20 | static bool moving_pred(struct keybuf *buf, struct bkey *k) |
| 21 | { |
| 22 | struct cache_set *c = container_of(buf, struct cache_set, |
| 23 | moving_gc_keys); |
| 24 | unsigned i; |
| 25 | |
| 26 | for (i = 0; i < KEY_PTRS(k); i++) { |
| 27 | struct cache *ca = PTR_CACHE(c, k, i); |
| 28 | struct bucket *g = PTR_BUCKET(c, k, i); |
| 29 | |
| 30 | if (GC_SECTORS_USED(g) < ca->gc_move_threshold) |
| 31 | return true; |
| 32 | } |
| 33 | |
| 34 | return false; |
| 35 | } |
| 36 | |
| 37 | /* Moving GC - IO loop */ |
| 38 | |
| 39 | static void moving_io_destructor(struct closure *cl) |
| 40 | { |
| 41 | struct moving_io *io = container_of(cl, struct moving_io, s.cl); |
| 42 | kfree(io); |
| 43 | } |
| 44 | |
| 45 | static void write_moving_finish(struct closure *cl) |
| 46 | { |
| 47 | struct moving_io *io = container_of(cl, struct moving_io, s.cl); |
| 48 | struct bio *bio = &io->bio.bio; |
Kent Overstreet | 8e51e41 | 2013-06-06 18:15:57 -0700 | [diff] [blame] | 49 | struct bio_vec *bv; |
| 50 | int i; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 51 | |
Kent Overstreet | 8e51e41 | 2013-06-06 18:15:57 -0700 | [diff] [blame] | 52 | bio_for_each_segment_all(bv, bio, i) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 53 | __free_page(bv->bv_page); |
| 54 | |
Kent Overstreet | c37511b | 2013-04-26 15:39:55 -0700 | [diff] [blame] | 55 | if (io->s.op.insert_collision) |
| 56 | trace_bcache_gc_copy_collision(&io->w->key); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 57 | |
| 58 | bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w); |
| 59 | |
| 60 | atomic_dec_bug(&io->s.op.c->in_flight); |
| 61 | closure_wake_up(&io->s.op.c->moving_gc_wait); |
| 62 | |
| 63 | closure_return_with_destructor(cl, moving_io_destructor); |
| 64 | } |
| 65 | |
| 66 | static void read_moving_endio(struct bio *bio, int error) |
| 67 | { |
| 68 | struct moving_io *io = container_of(bio->bi_private, |
| 69 | struct moving_io, s.cl); |
| 70 | |
| 71 | if (error) |
| 72 | io->s.error = error; |
| 73 | |
| 74 | bch_bbio_endio(io->s.op.c, bio, error, "reading data to move"); |
| 75 | } |
| 76 | |
| 77 | static void moving_init(struct moving_io *io) |
| 78 | { |
| 79 | struct bio *bio = &io->bio.bio; |
| 80 | |
| 81 | bio_init(bio); |
| 82 | bio_get(bio); |
| 83 | bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); |
| 84 | |
| 85 | bio->bi_size = KEY_SIZE(&io->w->key) << 9; |
| 86 | bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), |
| 87 | PAGE_SECTORS); |
| 88 | bio->bi_private = &io->s.cl; |
| 89 | bio->bi_io_vec = bio->bi_inline_vecs; |
Kent Overstreet | 169ef1c | 2013-03-28 12:50:55 -0600 | [diff] [blame] | 90 | bch_bio_map(bio, NULL); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | static void write_moving(struct closure *cl) |
| 94 | { |
| 95 | struct search *s = container_of(cl, struct search, cl); |
| 96 | struct moving_io *io = container_of(s, struct moving_io, s); |
| 97 | |
| 98 | if (!s->error) { |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 99 | moving_init(io); |
| 100 | |
| 101 | io->bio.bio.bi_sector = KEY_START(&io->w->key); |
| 102 | s->op.lock = -1; |
| 103 | s->op.write_prio = 1; |
| 104 | s->op.cache_bio = &io->bio.bio; |
| 105 | |
| 106 | s->writeback = KEY_DIRTY(&io->w->key); |
| 107 | s->op.csum = KEY_CSUM(&io->w->key); |
| 108 | |
| 109 | s->op.type = BTREE_REPLACE; |
| 110 | bkey_copy(&s->op.replace, &io->w->key); |
| 111 | |
| 112 | closure_init(&s->op.cl, cl); |
| 113 | bch_insert_data(&s->op.cl); |
| 114 | } |
| 115 | |
| 116 | continue_at(cl, write_moving_finish, NULL); |
| 117 | } |
| 118 | |
| 119 | static void read_moving_submit(struct closure *cl) |
| 120 | { |
| 121 | struct search *s = container_of(cl, struct search, cl); |
| 122 | struct moving_io *io = container_of(s, struct moving_io, s); |
| 123 | struct bio *bio = &io->bio.bio; |
| 124 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 125 | bch_submit_bbio(bio, s->op.c, &io->w->key, 0); |
| 126 | |
| 127 | continue_at(cl, write_moving, bch_gc_wq); |
| 128 | } |
| 129 | |
| 130 | static void read_moving(struct closure *cl) |
| 131 | { |
| 132 | struct cache_set *c = container_of(cl, struct cache_set, moving_gc); |
| 133 | struct keybuf_key *w; |
| 134 | struct moving_io *io; |
| 135 | struct bio *bio; |
| 136 | |
| 137 | /* XXX: if we error, background writeback could stall indefinitely */ |
| 138 | |
| 139 | while (!test_bit(CACHE_SET_STOPPING, &c->flags)) { |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 140 | w = bch_keybuf_next_rescan(c, &c->moving_gc_keys, |
| 141 | &MAX_KEY, moving_pred); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 142 | if (!w) |
| 143 | break; |
| 144 | |
| 145 | io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec) |
| 146 | * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), |
| 147 | GFP_KERNEL); |
| 148 | if (!io) |
| 149 | goto err; |
| 150 | |
| 151 | w->private = io; |
| 152 | io->w = w; |
| 153 | io->s.op.inode = KEY_INODE(&w->key); |
| 154 | io->s.op.c = c; |
| 155 | |
| 156 | moving_init(io); |
| 157 | bio = &io->bio.bio; |
| 158 | |
| 159 | bio->bi_rw = READ; |
| 160 | bio->bi_end_io = read_moving_endio; |
| 161 | |
Kent Overstreet | 8e51e41 | 2013-06-06 18:15:57 -0700 | [diff] [blame] | 162 | if (bio_alloc_pages(bio, GFP_KERNEL)) |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 163 | goto err; |
| 164 | |
Kent Overstreet | c37511b | 2013-04-26 15:39:55 -0700 | [diff] [blame] | 165 | trace_bcache_gc_copy(&w->key); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 166 | |
| 167 | closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl); |
| 168 | |
| 169 | if (atomic_inc_return(&c->in_flight) >= 64) { |
| 170 | closure_wait_event(&c->moving_gc_wait, cl, |
| 171 | atomic_read(&c->in_flight) < 64); |
| 172 | continue_at(cl, read_moving, bch_gc_wq); |
| 173 | } |
| 174 | } |
| 175 | |
| 176 | if (0) { |
| 177 | err: if (!IS_ERR_OR_NULL(w->private)) |
| 178 | kfree(w->private); |
| 179 | |
| 180 | bch_keybuf_del(&c->moving_gc_keys, w); |
| 181 | } |
| 182 | |
| 183 | closure_return(cl); |
| 184 | } |
| 185 | |
Kent Overstreet | b1a67b0 | 2013-03-25 11:46:44 -0700 | [diff] [blame] | 186 | static bool bucket_cmp(struct bucket *l, struct bucket *r) |
| 187 | { |
| 188 | return GC_SECTORS_USED(l) < GC_SECTORS_USED(r); |
| 189 | } |
| 190 | |
| 191 | static unsigned bucket_heap_top(struct cache *ca) |
| 192 | { |
| 193 | return GC_SECTORS_USED(heap_peek(&ca->heap)); |
| 194 | } |
| 195 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 196 | void bch_moving_gc(struct closure *cl) |
| 197 | { |
| 198 | struct cache_set *c = container_of(cl, struct cache_set, gc.cl); |
| 199 | struct cache *ca; |
| 200 | struct bucket *b; |
| 201 | unsigned i; |
| 202 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 203 | if (!c->copy_gc_enabled) |
| 204 | closure_return(cl); |
| 205 | |
| 206 | mutex_lock(&c->bucket_lock); |
| 207 | |
| 208 | for_each_cache(ca, c, i) { |
| 209 | unsigned sectors_to_move = 0; |
| 210 | unsigned reserve_sectors = ca->sb.bucket_size * |
| 211 | min(fifo_used(&ca->free), ca->free.size / 2); |
| 212 | |
| 213 | ca->heap.used = 0; |
| 214 | |
| 215 | for_each_bucket(b, ca) { |
| 216 | if (!GC_SECTORS_USED(b)) |
| 217 | continue; |
| 218 | |
| 219 | if (!heap_full(&ca->heap)) { |
| 220 | sectors_to_move += GC_SECTORS_USED(b); |
| 221 | heap_add(&ca->heap, b, bucket_cmp); |
| 222 | } else if (bucket_cmp(b, heap_peek(&ca->heap))) { |
Kent Overstreet | b1a67b0 | 2013-03-25 11:46:44 -0700 | [diff] [blame] | 223 | sectors_to_move -= bucket_heap_top(ca); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 224 | sectors_to_move += GC_SECTORS_USED(b); |
| 225 | |
| 226 | ca->heap.data[0] = b; |
| 227 | heap_sift(&ca->heap, 0, bucket_cmp); |
| 228 | } |
| 229 | } |
| 230 | |
| 231 | while (sectors_to_move > reserve_sectors) { |
| 232 | heap_pop(&ca->heap, b, bucket_cmp); |
| 233 | sectors_to_move -= GC_SECTORS_USED(b); |
| 234 | } |
| 235 | |
Kent Overstreet | b1a67b0 | 2013-03-25 11:46:44 -0700 | [diff] [blame] | 236 | ca->gc_move_threshold = bucket_heap_top(ca); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 237 | |
| 238 | pr_debug("threshold %u", ca->gc_move_threshold); |
| 239 | } |
| 240 | |
| 241 | mutex_unlock(&c->bucket_lock); |
| 242 | |
| 243 | c->moving_gc_keys.last_scanned = ZERO_KEY; |
| 244 | |
| 245 | closure_init(&c->moving_gc, cl); |
| 246 | read_moving(&c->moving_gc); |
| 247 | |
| 248 | closure_return(cl); |
| 249 | } |
| 250 | |
| 251 | void bch_moving_init_cache_set(struct cache_set *c) |
| 252 | { |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 253 | bch_keybuf_init(&c->moving_gc_keys); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 254 | } |