blob: 60516bfa6052b98be50fd73a2deed15bb1b4592e [file] [log] [blame]
Kent Overstreet279afba2013-06-05 06:21:07 -07001#ifndef _BCACHE_WRITEBACK_H
2#define _BCACHE_WRITEBACK_H
3
Kent Overstreet72c27062013-06-05 06:24:39 -07004#define CUTOFF_WRITEBACK 40
5#define CUTOFF_WRITEBACK_SYNC 70
6
Kent Overstreet279afba2013-06-05 06:21:07 -07007static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
8{
9 uint64_t i, ret = 0;
10
11 for (i = 0; i < d->nr_stripes; i++)
12 ret += atomic_read(d->stripe_sectors_dirty + i);
13
14 return ret;
15}
16
Kent Overstreet72c27062013-06-05 06:24:39 -070017static inline bool bcache_dev_stripe_dirty(struct bcache_device *d,
18 uint64_t offset,
19 unsigned nr_sectors)
20{
Kent Overstreet2d679fc2013-08-17 02:13:15 -070021 uint64_t stripe = offset;
22
23 do_div(stripe, d->stripe_size);
Kent Overstreet72c27062013-06-05 06:24:39 -070024
25 while (1) {
26 if (atomic_read(d->stripe_sectors_dirty + stripe))
27 return true;
28
Kent Overstreet2d679fc2013-08-17 02:13:15 -070029 if (nr_sectors <= d->stripe_size)
Kent Overstreet72c27062013-06-05 06:24:39 -070030 return false;
31
Kent Overstreet2d679fc2013-08-17 02:13:15 -070032 nr_sectors -= d->stripe_size;
Kent Overstreet72c27062013-06-05 06:24:39 -070033 stripe++;
34 }
35}
36
37static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
38 unsigned cache_mode, bool would_skip)
39{
40 unsigned in_use = dc->disk.c->gc_stats.in_use;
41
42 if (cache_mode != CACHE_MODE_WRITEBACK ||
43 atomic_read(&dc->disk.detaching) ||
44 in_use > CUTOFF_WRITEBACK_SYNC)
45 return false;
46
47 if (dc->partial_stripes_expensive &&
48 bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector,
49 bio_sectors(bio)))
50 return true;
51
52 if (would_skip)
53 return false;
54
55 return bio->bi_rw & REQ_SYNC ||
56 in_use <= CUTOFF_WRITEBACK;
57}
58
Kent Overstreet5e6926d2013-07-24 17:50:06 -070059static inline void bch_writeback_queue(struct cached_dev *dc)
60{
61 wake_up_process(dc->writeback_thread);
62}
63
64static inline void bch_writeback_add(struct cached_dev *dc)
65{
66 if (!atomic_read(&dc->has_dirty) &&
67 !atomic_xchg(&dc->has_dirty, 1)) {
68 atomic_inc(&dc->count);
69
70 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
71 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
72 /* XXX: should do this synchronously */
73 bch_write_bdev_super(dc, NULL);
74 }
75
76 bch_writeback_queue(dc);
77 }
78}
79
Kent Overstreet279afba2013-06-05 06:21:07 -070080void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
Kent Overstreet279afba2013-06-05 06:21:07 -070081
82void bch_sectors_dirty_init(struct cached_dev *dc);
Kent Overstreet5e6926d2013-07-24 17:50:06 -070083int bch_cached_dev_writeback_init(struct cached_dev *);
Kent Overstreet279afba2013-06-05 06:21:07 -070084
85#endif