blob: 301eaf5651673ffd0c2401efb97a70fe07e5cb44 [file] [log] [blame]
Kent Overstreet279afba2013-06-05 06:21:07 -07001#ifndef _BCACHE_WRITEBACK_H
2#define _BCACHE_WRITEBACK_H
3
Kent Overstreet72c27062013-06-05 06:24:39 -07004#define CUTOFF_WRITEBACK 40
5#define CUTOFF_WRITEBACK_SYNC 70
6
Kent Overstreet279afba2013-06-05 06:21:07 -07007static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
8{
9 uint64_t i, ret = 0;
10
11 for (i = 0; i < d->nr_stripes; i++)
12 ret += atomic_read(d->stripe_sectors_dirty + i);
13
14 return ret;
15}
16
Kent Overstreet48a915a2013-10-31 15:43:22 -070017static inline unsigned offset_to_stripe(struct bcache_device *d,
18 uint64_t offset)
19{
20 do_div(offset, d->stripe_size);
21 return offset;
22}
23
24static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
Kent Overstreet72c27062013-06-05 06:24:39 -070025 uint64_t offset,
26 unsigned nr_sectors)
27{
Kent Overstreet48a915a2013-10-31 15:43:22 -070028 unsigned stripe = offset_to_stripe(&dc->disk, offset);
Kent Overstreet72c27062013-06-05 06:24:39 -070029
30 while (1) {
Kent Overstreet48a915a2013-10-31 15:43:22 -070031 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
Kent Overstreet72c27062013-06-05 06:24:39 -070032 return true;
33
Kent Overstreet48a915a2013-10-31 15:43:22 -070034 if (nr_sectors <= dc->disk.stripe_size)
Kent Overstreet72c27062013-06-05 06:24:39 -070035 return false;
36
Kent Overstreet48a915a2013-10-31 15:43:22 -070037 nr_sectors -= dc->disk.stripe_size;
Kent Overstreet72c27062013-06-05 06:24:39 -070038 stripe++;
39 }
40}
41
42static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
43 unsigned cache_mode, bool would_skip)
44{
45 unsigned in_use = dc->disk.c->gc_stats.in_use;
46
47 if (cache_mode != CACHE_MODE_WRITEBACK ||
Kent Overstreetc4d951d2013-08-21 17:49:09 -070048 test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
Kent Overstreet72c27062013-06-05 06:24:39 -070049 in_use > CUTOFF_WRITEBACK_SYNC)
50 return false;
51
52 if (dc->partial_stripes_expensive &&
Kent Overstreet4f024f32013-10-11 15:44:27 -070053 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
Kent Overstreet72c27062013-06-05 06:24:39 -070054 bio_sectors(bio)))
55 return true;
56
57 if (would_skip)
58 return false;
59
Jens Axboe1eff9d32016-08-05 15:35:16 -060060 return bio->bi_opf & REQ_SYNC ||
Kent Overstreet72c27062013-06-05 06:24:39 -070061 in_use <= CUTOFF_WRITEBACK;
62}
63
Kent Overstreet5e6926d2013-07-24 17:50:06 -070064static inline void bch_writeback_queue(struct cached_dev *dc)
65{
Stefan Bader8d16ce52015-11-29 18:44:49 -080066 if (!IS_ERR_OR_NULL(dc->writeback_thread))
67 wake_up_process(dc->writeback_thread);
Kent Overstreet5e6926d2013-07-24 17:50:06 -070068}
69
70static inline void bch_writeback_add(struct cached_dev *dc)
71{
72 if (!atomic_read(&dc->has_dirty) &&
73 !atomic_xchg(&dc->has_dirty, 1)) {
74 atomic_inc(&dc->count);
75
76 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
77 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
78 /* XXX: should do this synchronously */
79 bch_write_bdev_super(dc, NULL);
80 }
81
82 bch_writeback_queue(dc);
83 }
84}
85
Kent Overstreet279afba2013-06-05 06:21:07 -070086void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
Kent Overstreet279afba2013-06-05 06:21:07 -070087
88void bch_sectors_dirty_init(struct cached_dev *dc);
Slava Pestov9e5c3532014-05-01 13:48:57 -070089void bch_cached_dev_writeback_init(struct cached_dev *);
90int bch_cached_dev_writeback_start(struct cached_dev *);
Kent Overstreet279afba2013-06-05 06:21:07 -070091
92#endif