Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 2 | #ifndef _BCACHE_WRITEBACK_H |
| 3 | #define _BCACHE_WRITEBACK_H |
| 4 | |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 5 | #define CUTOFF_WRITEBACK 40 |
| 6 | #define CUTOFF_WRITEBACK_SYNC 70 |
| 7 | |
Tang Junhui | 539d39e | 2018-01-08 12:21:22 -0800 | [diff] [blame] | 8 | #define MAX_WRITEBACKS_IN_PASS 5 |
| 9 | #define MAX_WRITESIZE_IN_PASS 5000 /* *512b */ |
| 10 | |
Coly Li | 7a5e3ec | 2018-02-07 11:41:44 -0800 | [diff] [blame] | 11 | #define WRITEBACK_RATE_UPDATE_SECS_MAX 60 |
| 12 | #define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5 |
| 13 | |
Michael Lyle | 616486a | 2018-01-08 12:21:30 -0800 | [diff] [blame] | 14 | /* |
| 15 | * 14 (16384ths) is chosen here as something that each backing device |
| 16 | * should be a reasonable fraction of the share, and not to blow up |
| 17 | * until individual backing devices are a petabyte. |
| 18 | */ |
| 19 | #define WRITEBACK_SHARE_SHIFT 14 |
| 20 | |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 21 | static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) |
| 22 | { |
| 23 | uint64_t i, ret = 0; |
| 24 | |
| 25 | for (i = 0; i < d->nr_stripes; i++) |
| 26 | ret += atomic_read(d->stripe_sectors_dirty + i); |
| 27 | |
| 28 | return ret; |
| 29 | } |
| 30 | |
Tang Junhui | a839409 | 2017-09-06 14:25:56 +0800 | [diff] [blame] | 31 | static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c) |
| 32 | { |
| 33 | uint64_t i, ret = 0; |
| 34 | |
| 35 | mutex_lock(&bch_register_lock); |
| 36 | |
Coly Li | 2831231 | 2018-01-08 12:21:28 -0800 | [diff] [blame] | 37 | for (i = 0; i < c->devices_max_used; i++) { |
Tang Junhui | a839409 | 2017-09-06 14:25:56 +0800 | [diff] [blame] | 38 | struct bcache_device *d = c->devices[i]; |
| 39 | |
| 40 | if (!d || !UUID_FLASH_ONLY(&c->uuids[i])) |
| 41 | continue; |
Bart Van Assche | fd01991 | 2018-03-18 17:36:26 -0700 | [diff] [blame] | 42 | ret += bcache_dev_sectors_dirty(d); |
Tang Junhui | a839409 | 2017-09-06 14:25:56 +0800 | [diff] [blame] | 43 | } |
| 44 | |
| 45 | mutex_unlock(&bch_register_lock); |
| 46 | |
| 47 | return ret; |
| 48 | } |
| 49 | |
Kent Overstreet | 48a915a | 2013-10-31 15:43:22 -0700 | [diff] [blame] | 50 | static inline unsigned offset_to_stripe(struct bcache_device *d, |
| 51 | uint64_t offset) |
| 52 | { |
| 53 | do_div(offset, d->stripe_size); |
| 54 | return offset; |
| 55 | } |
| 56 | |
| 57 | static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 58 | uint64_t offset, |
| 59 | unsigned nr_sectors) |
| 60 | { |
Kent Overstreet | 48a915a | 2013-10-31 15:43:22 -0700 | [diff] [blame] | 61 | unsigned stripe = offset_to_stripe(&dc->disk, offset); |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 62 | |
| 63 | while (1) { |
Kent Overstreet | 48a915a | 2013-10-31 15:43:22 -0700 | [diff] [blame] | 64 | if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 65 | return true; |
| 66 | |
Kent Overstreet | 48a915a | 2013-10-31 15:43:22 -0700 | [diff] [blame] | 67 | if (nr_sectors <= dc->disk.stripe_size) |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 68 | return false; |
| 69 | |
Kent Overstreet | 48a915a | 2013-10-31 15:43:22 -0700 | [diff] [blame] | 70 | nr_sectors -= dc->disk.stripe_size; |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 71 | stripe++; |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, |
| 76 | unsigned cache_mode, bool would_skip) |
| 77 | { |
| 78 | unsigned in_use = dc->disk.c->gc_stats.in_use; |
| 79 | |
| 80 | if (cache_mode != CACHE_MODE_WRITEBACK || |
Kent Overstreet | c4d951d | 2013-08-21 17:49:09 -0700 | [diff] [blame] | 81 | test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 82 | in_use > CUTOFF_WRITEBACK_SYNC) |
| 83 | return false; |
| 84 | |
| 85 | if (dc->partial_stripes_expensive && |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 86 | bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 87 | bio_sectors(bio))) |
| 88 | return true; |
| 89 | |
| 90 | if (would_skip) |
| 91 | return false; |
| 92 | |
Eric Wheeler | b41c9b0 | 2017-10-13 16:35:33 -0700 | [diff] [blame] | 93 | return (op_is_sync(bio->bi_opf) || |
| 94 | bio->bi_opf & (REQ_META|REQ_PRIO) || |
| 95 | in_use <= CUTOFF_WRITEBACK); |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 96 | } |
| 97 | |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 98 | static inline void bch_writeback_queue(struct cached_dev *dc) |
| 99 | { |
Stefan Bader | 8d16ce5 | 2015-11-29 18:44:49 -0800 | [diff] [blame] | 100 | if (!IS_ERR_OR_NULL(dc->writeback_thread)) |
| 101 | wake_up_process(dc->writeback_thread); |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | static inline void bch_writeback_add(struct cached_dev *dc) |
| 105 | { |
| 106 | if (!atomic_read(&dc->has_dirty) && |
| 107 | !atomic_xchg(&dc->has_dirty, 1)) { |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 108 | if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { |
| 109 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); |
| 110 | /* XXX: should do this synchronously */ |
| 111 | bch_write_bdev_super(dc, NULL); |
| 112 | } |
| 113 | |
| 114 | bch_writeback_queue(dc); |
| 115 | } |
| 116 | } |
| 117 | |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 118 | void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 119 | |
Tang Junhui | 175206c | 2017-09-07 01:28:53 +0800 | [diff] [blame] | 120 | void bch_sectors_dirty_init(struct bcache_device *); |
Slava Pestov | 9e5c353 | 2014-05-01 13:48:57 -0700 | [diff] [blame] | 121 | void bch_cached_dev_writeback_init(struct cached_dev *); |
| 122 | int bch_cached_dev_writeback_start(struct cached_dev *); |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 123 | |
| 124 | #endif |