Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Some low level IO code, and hacks for various block layer limitations |
| 3 | * |
| 4 | * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> |
| 5 | * Copyright 2012 Google, Inc. |
| 6 | */ |
| 7 | |
| 8 | #include "bcache.h" |
| 9 | #include "bset.h" |
| 10 | #include "debug.h" |
| 11 | |
Kent Overstreet | c37511b | 2013-04-26 15:39:55 -0700 | [diff] [blame] | 12 | #include <linux/blkdev.h> |
| 13 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 14 | static unsigned bch_bio_max_sectors(struct bio *bio) |
| 15 | { |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 16 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 17 | struct bio_vec bv; |
| 18 | struct bvec_iter iter; |
| 19 | unsigned ret = 0, seg = 0; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 20 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 21 | if (bio->bi_rw & REQ_DISCARD) |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 22 | return min(bio_sectors(bio), q->limits.max_discard_sectors); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 23 | |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 24 | bio_for_each_segment(bv, bio, iter) { |
| 25 | struct bvec_merge_data bvm = { |
| 26 | .bi_bdev = bio->bi_bdev, |
| 27 | .bi_sector = bio->bi_iter.bi_sector, |
| 28 | .bi_size = ret << 9, |
| 29 | .bi_rw = bio->bi_rw, |
| 30 | }; |
Kent Overstreet | 8e51e41 | 2013-06-06 18:15:57 -0700 | [diff] [blame] | 31 | |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 32 | if (seg == min_t(unsigned, BIO_MAX_PAGES, |
| 33 | queue_max_segments(q))) |
| 34 | break; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 35 | |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 36 | if (q->merge_bvec_fn && |
| 37 | q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) |
| 38 | break; |
Kent Overstreet | a09ded8 | 2013-04-22 14:44:24 -0700 | [diff] [blame] | 39 | |
Kent Overstreet | 458b76e | 2013-09-24 16:26:05 -0700 | [diff] [blame] | 40 | seg++; |
| 41 | ret += bv.bv_len >> 9; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 42 | } |
| 43 | |
| 44 | ret = min(ret, queue_max_sectors(q)); |
| 45 | |
| 46 | WARN_ON(!ret); |
Kent Overstreet | a4ad39b1 | 2013-08-07 14:24:32 -0700 | [diff] [blame] | 47 | ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 48 | |
| 49 | return ret; |
| 50 | } |
| 51 | |
| 52 | static void bch_bio_submit_split_done(struct closure *cl) |
| 53 | { |
| 54 | struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl); |
| 55 | |
| 56 | s->bio->bi_end_io = s->bi_end_io; |
| 57 | s->bio->bi_private = s->bi_private; |
Mike Snitzer | 326e1db | 2015-05-22 09:14:03 -0400 | [diff] [blame] | 58 | bio_endio(s->bio, 0); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 59 | |
| 60 | closure_debug_destroy(&s->cl); |
| 61 | mempool_free(s, s->p->bio_split_hook); |
| 62 | } |
| 63 | |
| 64 | static void bch_bio_submit_split_endio(struct bio *bio, int error) |
| 65 | { |
| 66 | struct closure *cl = bio->bi_private; |
| 67 | struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl); |
| 68 | |
| 69 | if (error) |
| 70 | clear_bit(BIO_UPTODATE, &s->bio->bi_flags); |
| 71 | |
| 72 | bio_put(bio); |
| 73 | closure_put(cl); |
| 74 | } |
| 75 | |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 76 | void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) |
| 77 | { |
| 78 | struct bio_split_hook *s; |
Kent Overstreet | 8e51e41 | 2013-06-06 18:15:57 -0700 | [diff] [blame] | 79 | struct bio *n; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 80 | |
| 81 | if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD)) |
| 82 | goto submit; |
| 83 | |
| 84 | if (bio_sectors(bio) <= bch_bio_max_sectors(bio)) |
| 85 | goto submit; |
| 86 | |
| 87 | s = mempool_alloc(p->bio_split_hook, GFP_NOIO); |
Kent Overstreet | 8e51e41 | 2013-06-06 18:15:57 -0700 | [diff] [blame] | 88 | closure_init(&s->cl, NULL); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 89 | |
| 90 | s->bio = bio; |
| 91 | s->p = p; |
| 92 | s->bi_end_io = bio->bi_end_io; |
| 93 | s->bi_private = bio->bi_private; |
| 94 | bio_get(bio); |
| 95 | |
Kent Overstreet | 8e51e41 | 2013-06-06 18:15:57 -0700 | [diff] [blame] | 96 | do { |
Kent Overstreet | 20d0189 | 2013-11-23 18:21:01 -0800 | [diff] [blame] | 97 | n = bio_next_split(bio, bch_bio_max_sectors(bio), |
| 98 | GFP_NOIO, s->p->bio_split); |
Kent Overstreet | 8e51e41 | 2013-06-06 18:15:57 -0700 | [diff] [blame] | 99 | |
| 100 | n->bi_end_io = bch_bio_submit_split_endio; |
| 101 | n->bi_private = &s->cl; |
| 102 | |
| 103 | closure_get(&s->cl); |
Kent Overstreet | e90abc8 | 2013-08-07 14:31:42 -0700 | [diff] [blame] | 104 | generic_make_request(n); |
Kent Overstreet | 8e51e41 | 2013-06-06 18:15:57 -0700 | [diff] [blame] | 105 | } while (n != bio); |
| 106 | |
| 107 | continue_at(&s->cl, bch_bio_submit_split_done, NULL); |
Jens Axboe | 77b5a08 | 2015-03-06 08:37:46 -0700 | [diff] [blame^] | 108 | return; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 109 | submit: |
Kent Overstreet | e90abc8 | 2013-08-07 14:31:42 -0700 | [diff] [blame] | 110 | generic_make_request(bio); |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 111 | } |
| 112 | |
| 113 | /* Bios with headers */ |
| 114 | |
| 115 | void bch_bbio_free(struct bio *bio, struct cache_set *c) |
| 116 | { |
| 117 | struct bbio *b = container_of(bio, struct bbio, bio); |
| 118 | mempool_free(b, c->bio_meta); |
| 119 | } |
| 120 | |
| 121 | struct bio *bch_bbio_alloc(struct cache_set *c) |
| 122 | { |
| 123 | struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO); |
| 124 | struct bio *bio = &b->bio; |
| 125 | |
| 126 | bio_init(bio); |
| 127 | bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET; |
| 128 | bio->bi_max_vecs = bucket_pages(c); |
| 129 | bio->bi_io_vec = bio->bi_inline_vecs; |
| 130 | |
| 131 | return bio; |
| 132 | } |
| 133 | |
| 134 | void __bch_submit_bbio(struct bio *bio, struct cache_set *c) |
| 135 | { |
| 136 | struct bbio *b = container_of(bio, struct bbio, bio); |
| 137 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 138 | bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); |
| 139 | bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 140 | |
| 141 | b->submit_time_us = local_clock_us(); |
| 142 | closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); |
| 143 | } |
| 144 | |
| 145 | void bch_submit_bbio(struct bio *bio, struct cache_set *c, |
| 146 | struct bkey *k, unsigned ptr) |
| 147 | { |
| 148 | struct bbio *b = container_of(bio, struct bbio, bio); |
| 149 | bch_bkey_copy_single_ptr(&b->key, k, ptr); |
| 150 | __bch_submit_bbio(bio, c); |
| 151 | } |
| 152 | |
| 153 | /* IO errors */ |
| 154 | |
| 155 | void bch_count_io_errors(struct cache *ca, int error, const char *m) |
| 156 | { |
| 157 | /* |
| 158 | * The halflife of an error is: |
| 159 | * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh |
| 160 | */ |
| 161 | |
| 162 | if (ca->set->error_decay) { |
| 163 | unsigned count = atomic_inc_return(&ca->io_count); |
| 164 | |
| 165 | while (count > ca->set->error_decay) { |
| 166 | unsigned errors; |
| 167 | unsigned old = count; |
| 168 | unsigned new = count - ca->set->error_decay; |
| 169 | |
| 170 | /* |
| 171 | * First we subtract refresh from count; each time we |
| 172 | * succesfully do so, we rescale the errors once: |
| 173 | */ |
| 174 | |
| 175 | count = atomic_cmpxchg(&ca->io_count, old, new); |
| 176 | |
| 177 | if (count == old) { |
| 178 | count = new; |
| 179 | |
| 180 | errors = atomic_read(&ca->io_errors); |
| 181 | do { |
| 182 | old = errors; |
| 183 | new = ((uint64_t) errors * 127) / 128; |
| 184 | errors = atomic_cmpxchg(&ca->io_errors, |
| 185 | old, new); |
| 186 | } while (old != errors); |
| 187 | } |
| 188 | } |
| 189 | } |
| 190 | |
| 191 | if (error) { |
| 192 | char buf[BDEVNAME_SIZE]; |
| 193 | unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT, |
| 194 | &ca->io_errors); |
| 195 | errors >>= IO_ERROR_SHIFT; |
| 196 | |
| 197 | if (errors < ca->set->error_limit) |
| 198 | pr_err("%s: IO error on %s, recovering", |
| 199 | bdevname(ca->bdev, buf), m); |
| 200 | else |
| 201 | bch_cache_set_error(ca->set, |
| 202 | "%s: too many IO errors %s", |
| 203 | bdevname(ca->bdev, buf), m); |
| 204 | } |
| 205 | } |
| 206 | |
| 207 | void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, |
| 208 | int error, const char *m) |
| 209 | { |
| 210 | struct bbio *b = container_of(bio, struct bbio, bio); |
| 211 | struct cache *ca = PTR_CACHE(c, &b->key, 0); |
| 212 | |
| 213 | unsigned threshold = bio->bi_rw & REQ_WRITE |
| 214 | ? c->congested_write_threshold_us |
| 215 | : c->congested_read_threshold_us; |
| 216 | |
| 217 | if (threshold) { |
| 218 | unsigned t = local_clock_us(); |
| 219 | |
| 220 | int us = t - b->submit_time_us; |
| 221 | int congested = atomic_read(&c->congested); |
| 222 | |
| 223 | if (us > (int) threshold) { |
| 224 | int ms = us / 1024; |
| 225 | c->congested_last_us = t; |
| 226 | |
| 227 | ms = min(ms, CONGESTED_MAX + congested); |
| 228 | atomic_sub(ms, &c->congested); |
| 229 | } else if (congested < 0) |
| 230 | atomic_inc(&c->congested); |
| 231 | } |
| 232 | |
| 233 | bch_count_io_errors(ca, error, m); |
| 234 | } |
| 235 | |
| 236 | void bch_bbio_endio(struct cache_set *c, struct bio *bio, |
| 237 | int error, const char *m) |
| 238 | { |
| 239 | struct closure *cl = bio->bi_private; |
| 240 | |
| 241 | bch_bbio_count_io_errors(c, bio, error, m); |
| 242 | bio_put(bio); |
| 243 | closure_put(cl); |
| 244 | } |