blob: bf6a9ca18403f3a8bb0d289f43235eb59d79c786 [file] [log] [blame]
Kent Overstreetcafe5632013-03-23 16:11:31 -07001/*
2 * Some low level IO code, and hacks for various block layer limitations
3 *
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
6 */
7
8#include "bcache.h"
9#include "bset.h"
10#include "debug.h"
11
Kent Overstreetc37511b2013-04-26 15:39:55 -070012#include <linux/blkdev.h>
13
Kent Overstreetcafe5632013-03-23 16:11:31 -070014static unsigned bch_bio_max_sectors(struct bio *bio)
15{
Kent Overstreetcafe5632013-03-23 16:11:31 -070016 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
Kent Overstreet458b76e2013-09-24 16:26:05 -070017 struct bio_vec bv;
18 struct bvec_iter iter;
19 unsigned ret = 0, seg = 0;
Kent Overstreetcafe5632013-03-23 16:11:31 -070020
Kent Overstreetcafe5632013-03-23 16:11:31 -070021 if (bio->bi_rw & REQ_DISCARD)
Kent Overstreet458b76e2013-09-24 16:26:05 -070022 return min(bio_sectors(bio), q->limits.max_discard_sectors);
Kent Overstreetcafe5632013-03-23 16:11:31 -070023
Kent Overstreet458b76e2013-09-24 16:26:05 -070024 bio_for_each_segment(bv, bio, iter) {
25 struct bvec_merge_data bvm = {
26 .bi_bdev = bio->bi_bdev,
27 .bi_sector = bio->bi_iter.bi_sector,
28 .bi_size = ret << 9,
29 .bi_rw = bio->bi_rw,
30 };
Kent Overstreet8e51e412013-06-06 18:15:57 -070031
Kent Overstreet458b76e2013-09-24 16:26:05 -070032 if (seg == min_t(unsigned, BIO_MAX_PAGES,
33 queue_max_segments(q)))
34 break;
Kent Overstreetcafe5632013-03-23 16:11:31 -070035
Kent Overstreet458b76e2013-09-24 16:26:05 -070036 if (q->merge_bvec_fn &&
37 q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
38 break;
Kent Overstreeta09ded82013-04-22 14:44:24 -070039
Kent Overstreet458b76e2013-09-24 16:26:05 -070040 seg++;
41 ret += bv.bv_len >> 9;
Kent Overstreetcafe5632013-03-23 16:11:31 -070042 }
43
44 ret = min(ret, queue_max_sectors(q));
45
46 WARN_ON(!ret);
Kent Overstreeta4ad39b12013-08-07 14:24:32 -070047 ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
Kent Overstreetcafe5632013-03-23 16:11:31 -070048
49 return ret;
50}
51
52static void bch_bio_submit_split_done(struct closure *cl)
53{
54 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
55
56 s->bio->bi_end_io = s->bi_end_io;
57 s->bio->bi_private = s->bi_private;
Mike Snitzer326e1db2015-05-22 09:14:03 -040058 bio_endio(s->bio, 0);
Kent Overstreetcafe5632013-03-23 16:11:31 -070059
60 closure_debug_destroy(&s->cl);
61 mempool_free(s, s->p->bio_split_hook);
62}
63
64static void bch_bio_submit_split_endio(struct bio *bio, int error)
65{
66 struct closure *cl = bio->bi_private;
67 struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
68
69 if (error)
70 clear_bit(BIO_UPTODATE, &s->bio->bi_flags);
71
72 bio_put(bio);
73 closure_put(cl);
74}
75
Kent Overstreetcafe5632013-03-23 16:11:31 -070076void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
77{
78 struct bio_split_hook *s;
Kent Overstreet8e51e412013-06-06 18:15:57 -070079 struct bio *n;
Kent Overstreetcafe5632013-03-23 16:11:31 -070080
81 if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
82 goto submit;
83
84 if (bio_sectors(bio) <= bch_bio_max_sectors(bio))
85 goto submit;
86
87 s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
Kent Overstreet8e51e412013-06-06 18:15:57 -070088 closure_init(&s->cl, NULL);
Kent Overstreetcafe5632013-03-23 16:11:31 -070089
90 s->bio = bio;
91 s->p = p;
92 s->bi_end_io = bio->bi_end_io;
93 s->bi_private = bio->bi_private;
94 bio_get(bio);
95
Kent Overstreet8e51e412013-06-06 18:15:57 -070096 do {
Kent Overstreet20d01892013-11-23 18:21:01 -080097 n = bio_next_split(bio, bch_bio_max_sectors(bio),
98 GFP_NOIO, s->p->bio_split);
Kent Overstreet8e51e412013-06-06 18:15:57 -070099
100 n->bi_end_io = bch_bio_submit_split_endio;
101 n->bi_private = &s->cl;
102
103 closure_get(&s->cl);
Kent Overstreete90abc82013-08-07 14:31:42 -0700104 generic_make_request(n);
Kent Overstreet8e51e412013-06-06 18:15:57 -0700105 } while (n != bio);
106
107 continue_at(&s->cl, bch_bio_submit_split_done, NULL);
Jens Axboe77b5a082015-03-06 08:37:46 -0700108 return;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700109submit:
Kent Overstreete90abc82013-08-07 14:31:42 -0700110 generic_make_request(bio);
Kent Overstreetcafe5632013-03-23 16:11:31 -0700111}
112
113/* Bios with headers */
114
115void bch_bbio_free(struct bio *bio, struct cache_set *c)
116{
117 struct bbio *b = container_of(bio, struct bbio, bio);
118 mempool_free(b, c->bio_meta);
119}
120
121struct bio *bch_bbio_alloc(struct cache_set *c)
122{
123 struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
124 struct bio *bio = &b->bio;
125
126 bio_init(bio);
127 bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
128 bio->bi_max_vecs = bucket_pages(c);
129 bio->bi_io_vec = bio->bi_inline_vecs;
130
131 return bio;
132}
133
134void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
135{
136 struct bbio *b = container_of(bio, struct bbio, bio);
137
Kent Overstreet4f024f32013-10-11 15:44:27 -0700138 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
139 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
Kent Overstreetcafe5632013-03-23 16:11:31 -0700140
141 b->submit_time_us = local_clock_us();
142 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
143}
144
145void bch_submit_bbio(struct bio *bio, struct cache_set *c,
146 struct bkey *k, unsigned ptr)
147{
148 struct bbio *b = container_of(bio, struct bbio, bio);
149 bch_bkey_copy_single_ptr(&b->key, k, ptr);
150 __bch_submit_bbio(bio, c);
151}
152
153/* IO errors */
154
155void bch_count_io_errors(struct cache *ca, int error, const char *m)
156{
157 /*
158 * The halflife of an error is:
159 * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
160 */
161
162 if (ca->set->error_decay) {
163 unsigned count = atomic_inc_return(&ca->io_count);
164
165 while (count > ca->set->error_decay) {
166 unsigned errors;
167 unsigned old = count;
168 unsigned new = count - ca->set->error_decay;
169
170 /*
171 * First we subtract refresh from count; each time we
172 * succesfully do so, we rescale the errors once:
173 */
174
175 count = atomic_cmpxchg(&ca->io_count, old, new);
176
177 if (count == old) {
178 count = new;
179
180 errors = atomic_read(&ca->io_errors);
181 do {
182 old = errors;
183 new = ((uint64_t) errors * 127) / 128;
184 errors = atomic_cmpxchg(&ca->io_errors,
185 old, new);
186 } while (old != errors);
187 }
188 }
189 }
190
191 if (error) {
192 char buf[BDEVNAME_SIZE];
193 unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
194 &ca->io_errors);
195 errors >>= IO_ERROR_SHIFT;
196
197 if (errors < ca->set->error_limit)
198 pr_err("%s: IO error on %s, recovering",
199 bdevname(ca->bdev, buf), m);
200 else
201 bch_cache_set_error(ca->set,
202 "%s: too many IO errors %s",
203 bdevname(ca->bdev, buf), m);
204 }
205}
206
207void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
208 int error, const char *m)
209{
210 struct bbio *b = container_of(bio, struct bbio, bio);
211 struct cache *ca = PTR_CACHE(c, &b->key, 0);
212
213 unsigned threshold = bio->bi_rw & REQ_WRITE
214 ? c->congested_write_threshold_us
215 : c->congested_read_threshold_us;
216
217 if (threshold) {
218 unsigned t = local_clock_us();
219
220 int us = t - b->submit_time_us;
221 int congested = atomic_read(&c->congested);
222
223 if (us > (int) threshold) {
224 int ms = us / 1024;
225 c->congested_last_us = t;
226
227 ms = min(ms, CONGESTED_MAX + congested);
228 atomic_sub(ms, &c->congested);
229 } else if (congested < 0)
230 atomic_inc(&c->congested);
231 }
232
233 bch_count_io_errors(ca, error, m);
234}
235
236void bch_bbio_endio(struct cache_set *c, struct bio *bio,
237 int error, const char *m)
238{
239 struct closure *cl = bio->bi_private;
240
241 bch_bbio_count_io_errors(c, bio, error, m);
242 bio_put(bio);
243 closure_put(cl);
244}