blob: 610fb01de629c9612209ed4ef38a4bbea7a3d247 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Kent Overstreet279afba2013-06-05 06:21:07 -07002#ifndef _BCACHE_WRITEBACK_H
3#define _BCACHE_WRITEBACK_H
4
Kent Overstreet72c27062013-06-05 06:24:39 -07005#define CUTOFF_WRITEBACK 40
6#define CUTOFF_WRITEBACK_SYNC 70
7
Tang Junhui539d39e2018-01-08 12:21:22 -08008#define MAX_WRITEBACKS_IN_PASS 5
9#define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
10
Coly Li7a5e3ec2018-02-07 11:41:44 -080011#define WRITEBACK_RATE_UPDATE_SECS_MAX 60
12#define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5
13
Michael Lyle616486a2018-01-08 12:21:30 -080014/*
15 * 14 (16384ths) is chosen here as something that each backing device
16 * should be a reasonable fraction of the share, and not to blow up
17 * until individual backing devices are a petabyte.
18 */
19#define WRITEBACK_SHARE_SHIFT 14
20
Kent Overstreet279afba2013-06-05 06:21:07 -070021static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
22{
23 uint64_t i, ret = 0;
24
25 for (i = 0; i < d->nr_stripes; i++)
26 ret += atomic_read(d->stripe_sectors_dirty + i);
27
28 return ret;
29}
30
Tang Junhuia8394092017-09-06 14:25:56 +080031static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
32{
33 uint64_t i, ret = 0;
34
35 mutex_lock(&bch_register_lock);
36
Coly Li28312312018-01-08 12:21:28 -080037 for (i = 0; i < c->devices_max_used; i++) {
Tang Junhuia8394092017-09-06 14:25:56 +080038 struct bcache_device *d = c->devices[i];
39
40 if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
41 continue;
Bart Van Asschefd019912018-03-18 17:36:26 -070042 ret += bcache_dev_sectors_dirty(d);
Tang Junhuia8394092017-09-06 14:25:56 +080043 }
44
45 mutex_unlock(&bch_register_lock);
46
47 return ret;
48}
49
Kent Overstreet48a915a2013-10-31 15:43:22 -070050static inline unsigned offset_to_stripe(struct bcache_device *d,
51 uint64_t offset)
52{
53 do_div(offset, d->stripe_size);
54 return offset;
55}
56
57static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
Kent Overstreet72c27062013-06-05 06:24:39 -070058 uint64_t offset,
59 unsigned nr_sectors)
60{
Kent Overstreet48a915a2013-10-31 15:43:22 -070061 unsigned stripe = offset_to_stripe(&dc->disk, offset);
Kent Overstreet72c27062013-06-05 06:24:39 -070062
63 while (1) {
Kent Overstreet48a915a2013-10-31 15:43:22 -070064 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
Kent Overstreet72c27062013-06-05 06:24:39 -070065 return true;
66
Kent Overstreet48a915a2013-10-31 15:43:22 -070067 if (nr_sectors <= dc->disk.stripe_size)
Kent Overstreet72c27062013-06-05 06:24:39 -070068 return false;
69
Kent Overstreet48a915a2013-10-31 15:43:22 -070070 nr_sectors -= dc->disk.stripe_size;
Kent Overstreet72c27062013-06-05 06:24:39 -070071 stripe++;
72 }
73}
74
75static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
76 unsigned cache_mode, bool would_skip)
77{
78 unsigned in_use = dc->disk.c->gc_stats.in_use;
79
80 if (cache_mode != CACHE_MODE_WRITEBACK ||
Kent Overstreetc4d951d2013-08-21 17:49:09 -070081 test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
Kent Overstreet72c27062013-06-05 06:24:39 -070082 in_use > CUTOFF_WRITEBACK_SYNC)
83 return false;
84
85 if (dc->partial_stripes_expensive &&
Kent Overstreet4f024f32013-10-11 15:44:27 -070086 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
Kent Overstreet72c27062013-06-05 06:24:39 -070087 bio_sectors(bio)))
88 return true;
89
90 if (would_skip)
91 return false;
92
Eric Wheelerb41c9b02017-10-13 16:35:33 -070093 return (op_is_sync(bio->bi_opf) ||
94 bio->bi_opf & (REQ_META|REQ_PRIO) ||
95 in_use <= CUTOFF_WRITEBACK);
Kent Overstreet72c27062013-06-05 06:24:39 -070096}
97
Kent Overstreet5e6926da2013-07-24 17:50:06 -070098static inline void bch_writeback_queue(struct cached_dev *dc)
99{
Stefan Bader8d16ce52015-11-29 18:44:49 -0800100 if (!IS_ERR_OR_NULL(dc->writeback_thread))
101 wake_up_process(dc->writeback_thread);
Kent Overstreet5e6926da2013-07-24 17:50:06 -0700102}
103
104static inline void bch_writeback_add(struct cached_dev *dc)
105{
106 if (!atomic_read(&dc->has_dirty) &&
107 !atomic_xchg(&dc->has_dirty, 1)) {
Kent Overstreet5e6926da2013-07-24 17:50:06 -0700108 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
109 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
110 /* XXX: should do this synchronously */
111 bch_write_bdev_super(dc, NULL);
112 }
113
114 bch_writeback_queue(dc);
115 }
116}
117
Kent Overstreet279afba2013-06-05 06:21:07 -0700118void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
Kent Overstreet279afba2013-06-05 06:21:07 -0700119
Tang Junhui175206c2017-09-07 01:28:53 +0800120void bch_sectors_dirty_init(struct bcache_device *);
Slava Pestov9e5c3532014-05-01 13:48:57 -0700121void bch_cached_dev_writeback_init(struct cached_dev *);
122int bch_cached_dev_writeback_start(struct cached_dev *);
Kent Overstreet279afba2013-06-05 06:21:07 -0700123
124#endif