blob: 8789b9c8c48406ba4db49d5f7c5874e6997dabcc [file] [log] [blame]
Kent Overstreet279afba2013-06-05 06:21:07 -07001#ifndef _BCACHE_WRITEBACK_H
2#define _BCACHE_WRITEBACK_H
3
Kent Overstreet72c27062013-06-05 06:24:39 -07004#define CUTOFF_WRITEBACK 40
5#define CUTOFF_WRITEBACK_SYNC 70
6
Kent Overstreet279afba2013-06-05 06:21:07 -07007static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
8{
9 uint64_t i, ret = 0;
10
11 for (i = 0; i < d->nr_stripes; i++)
12 ret += atomic_read(d->stripe_sectors_dirty + i);
13
14 return ret;
15}
16
Tang Junhuia8394092017-09-06 14:25:56 +080017static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
18{
19 uint64_t i, ret = 0;
20
21 mutex_lock(&bch_register_lock);
22
23 for (i = 0; i < c->nr_uuids; i++) {
24 struct bcache_device *d = c->devices[i];
25
26 if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
27 continue;
28 ret += bcache_dev_sectors_dirty(d);
29 }
30
31 mutex_unlock(&bch_register_lock);
32
33 return ret;
34}
35
Kent Overstreet48a915a2013-10-31 15:43:22 -070036static inline unsigned offset_to_stripe(struct bcache_device *d,
37 uint64_t offset)
38{
39 do_div(offset, d->stripe_size);
40 return offset;
41}
42
43static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
Kent Overstreet72c27062013-06-05 06:24:39 -070044 uint64_t offset,
45 unsigned nr_sectors)
46{
Kent Overstreet48a915a2013-10-31 15:43:22 -070047 unsigned stripe = offset_to_stripe(&dc->disk, offset);
Kent Overstreet72c27062013-06-05 06:24:39 -070048
49 while (1) {
Kent Overstreet48a915a2013-10-31 15:43:22 -070050 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
Kent Overstreet72c27062013-06-05 06:24:39 -070051 return true;
52
Kent Overstreet48a915a2013-10-31 15:43:22 -070053 if (nr_sectors <= dc->disk.stripe_size)
Kent Overstreet72c27062013-06-05 06:24:39 -070054 return false;
55
Kent Overstreet48a915a2013-10-31 15:43:22 -070056 nr_sectors -= dc->disk.stripe_size;
Kent Overstreet72c27062013-06-05 06:24:39 -070057 stripe++;
58 }
59}
60
61static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
62 unsigned cache_mode, bool would_skip)
63{
64 unsigned in_use = dc->disk.c->gc_stats.in_use;
65
66 if (cache_mode != CACHE_MODE_WRITEBACK ||
Kent Overstreetc4d951d2013-08-21 17:49:09 -070067 test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
Kent Overstreet72c27062013-06-05 06:24:39 -070068 in_use > CUTOFF_WRITEBACK_SYNC)
69 return false;
70
71 if (dc->partial_stripes_expensive &&
Kent Overstreet4f024f32013-10-11 15:44:27 -070072 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
Kent Overstreet72c27062013-06-05 06:24:39 -070073 bio_sectors(bio)))
74 return true;
75
76 if (would_skip)
77 return false;
78
Christoph Hellwig83b5df62016-11-01 07:40:05 -060079 return op_is_sync(bio->bi_opf) || in_use <= CUTOFF_WRITEBACK;
Kent Overstreet72c27062013-06-05 06:24:39 -070080}
81
Kent Overstreet5e6926da2013-07-24 17:50:06 -070082static inline void bch_writeback_queue(struct cached_dev *dc)
83{
Stefan Bader8d16ce52015-11-29 18:44:49 -080084 if (!IS_ERR_OR_NULL(dc->writeback_thread))
85 wake_up_process(dc->writeback_thread);
Kent Overstreet5e6926da2013-07-24 17:50:06 -070086}
87
88static inline void bch_writeback_add(struct cached_dev *dc)
89{
90 if (!atomic_read(&dc->has_dirty) &&
91 !atomic_xchg(&dc->has_dirty, 1)) {
92 atomic_inc(&dc->count);
93
94 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
95 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
96 /* XXX: should do this synchronously */
97 bch_write_bdev_super(dc, NULL);
98 }
99
100 bch_writeback_queue(dc);
101 }
102}
103
Kent Overstreet279afba2013-06-05 06:21:07 -0700104void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
Kent Overstreet279afba2013-06-05 06:21:07 -0700105
106void bch_sectors_dirty_init(struct cached_dev *dc);
Slava Pestov9e5c3532014-05-01 13:48:57 -0700107void bch_cached_dev_writeback_init(struct cached_dev *);
108int bch_cached_dev_writeback_start(struct cached_dev *);
Kent Overstreet279afba2013-06-05 06:21:07 -0700109
110#endif