blob: 66f1c527fa243c8c22e2ae9451af7e90727dbd26 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Kent Overstreet279afba2013-06-05 06:21:07 -07002#ifndef _BCACHE_WRITEBACK_H
3#define _BCACHE_WRITEBACK_H
4
Kent Overstreet72c27062013-06-05 06:24:39 -07005#define CUTOFF_WRITEBACK 40
6#define CUTOFF_WRITEBACK_SYNC 70
7
Tang Junhui539d39e2018-01-08 12:21:22 -08008#define MAX_WRITEBACKS_IN_PASS 5
9#define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
10
Michael Lyle616486a2018-01-08 12:21:30 -080011/*
12 * 14 (16384ths) is chosen here as something that each backing device
13 * should be a reasonable fraction of the share, and not to blow up
14 * until individual backing devices are a petabyte.
15 */
16#define WRITEBACK_SHARE_SHIFT 14
17
Kent Overstreet279afba2013-06-05 06:21:07 -070018static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
19{
20 uint64_t i, ret = 0;
21
22 for (i = 0; i < d->nr_stripes; i++)
23 ret += atomic_read(d->stripe_sectors_dirty + i);
24
25 return ret;
26}
27
Tang Junhuia8394092017-09-06 14:25:56 +080028static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
29{
30 uint64_t i, ret = 0;
31
32 mutex_lock(&bch_register_lock);
33
Coly Li28312312018-01-08 12:21:28 -080034 for (i = 0; i < c->devices_max_used; i++) {
Tang Junhuia8394092017-09-06 14:25:56 +080035 struct bcache_device *d = c->devices[i];
36
37 if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
38 continue;
39 ret += bcache_dev_sectors_dirty(d);
40 }
41
42 mutex_unlock(&bch_register_lock);
43
44 return ret;
45}
46
Kent Overstreet48a915a2013-10-31 15:43:22 -070047static inline unsigned offset_to_stripe(struct bcache_device *d,
48 uint64_t offset)
49{
50 do_div(offset, d->stripe_size);
51 return offset;
52}
53
54static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
Kent Overstreet72c27062013-06-05 06:24:39 -070055 uint64_t offset,
56 unsigned nr_sectors)
57{
Kent Overstreet48a915a2013-10-31 15:43:22 -070058 unsigned stripe = offset_to_stripe(&dc->disk, offset);
Kent Overstreet72c27062013-06-05 06:24:39 -070059
60 while (1) {
Kent Overstreet48a915a2013-10-31 15:43:22 -070061 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
Kent Overstreet72c27062013-06-05 06:24:39 -070062 return true;
63
Kent Overstreet48a915a2013-10-31 15:43:22 -070064 if (nr_sectors <= dc->disk.stripe_size)
Kent Overstreet72c27062013-06-05 06:24:39 -070065 return false;
66
Kent Overstreet48a915a2013-10-31 15:43:22 -070067 nr_sectors -= dc->disk.stripe_size;
Kent Overstreet72c27062013-06-05 06:24:39 -070068 stripe++;
69 }
70}
71
72static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
73 unsigned cache_mode, bool would_skip)
74{
75 unsigned in_use = dc->disk.c->gc_stats.in_use;
76
77 if (cache_mode != CACHE_MODE_WRITEBACK ||
Kent Overstreetc4d951d2013-08-21 17:49:09 -070078 test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
Kent Overstreet72c27062013-06-05 06:24:39 -070079 in_use > CUTOFF_WRITEBACK_SYNC)
80 return false;
81
82 if (dc->partial_stripes_expensive &&
Kent Overstreet4f024f32013-10-11 15:44:27 -070083 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
Kent Overstreet72c27062013-06-05 06:24:39 -070084 bio_sectors(bio)))
85 return true;
86
87 if (would_skip)
88 return false;
89
Eric Wheelerb41c9b02017-10-13 16:35:33 -070090 return (op_is_sync(bio->bi_opf) ||
91 bio->bi_opf & (REQ_META|REQ_PRIO) ||
92 in_use <= CUTOFF_WRITEBACK);
Kent Overstreet72c27062013-06-05 06:24:39 -070093}
94
Kent Overstreet5e6926da2013-07-24 17:50:06 -070095static inline void bch_writeback_queue(struct cached_dev *dc)
96{
Stefan Bader8d16ce52015-11-29 18:44:49 -080097 if (!IS_ERR_OR_NULL(dc->writeback_thread))
98 wake_up_process(dc->writeback_thread);
Kent Overstreet5e6926da2013-07-24 17:50:06 -070099}
100
101static inline void bch_writeback_add(struct cached_dev *dc)
102{
103 if (!atomic_read(&dc->has_dirty) &&
104 !atomic_xchg(&dc->has_dirty, 1)) {
Elena Reshetova3b304d22017-10-30 14:46:32 -0700105 refcount_inc(&dc->count);
Kent Overstreet5e6926da2013-07-24 17:50:06 -0700106
107 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
108 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
109 /* XXX: should do this synchronously */
110 bch_write_bdev_super(dc, NULL);
111 }
112
113 bch_writeback_queue(dc);
114 }
115}
116
Kent Overstreet279afba2013-06-05 06:21:07 -0700117void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
Kent Overstreet279afba2013-06-05 06:21:07 -0700118
Tang Junhui175206c2017-09-07 01:28:53 +0800119void bch_sectors_dirty_init(struct bcache_device *);
Slava Pestov9e5c3532014-05-01 13:48:57 -0700120void bch_cached_dev_writeback_init(struct cached_dev *);
121int bch_cached_dev_writeback_start(struct cached_dev *);
Kent Overstreet279afba2013-06-05 06:21:07 -0700122
123#endif