Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 2 | #ifndef _BCACHE_WRITEBACK_H |
| 3 | #define _BCACHE_WRITEBACK_H |
| 4 | |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 5 | #define CUTOFF_WRITEBACK 40 |
| 6 | #define CUTOFF_WRITEBACK_SYNC 70 |
| 7 | |
Tang Junhui | 539d39e | 2018-01-08 12:21:22 -0800 | [diff] [blame] | 8 | #define MAX_WRITEBACKS_IN_PASS 5 |
| 9 | #define MAX_WRITESIZE_IN_PASS 5000 /* *512b */ |
| 10 | |
Michael Lyle | 616486a | 2018-01-08 12:21:30 -0800 | [diff] [blame^] | 11 | /* |
| 12 | * 14 (16384ths) is chosen here as something that each backing device |
| 13 | * should be a reasonable fraction of the share, and not to blow up |
| 14 | * until individual backing devices are a petabyte. |
| 15 | */ |
| 16 | #define WRITEBACK_SHARE_SHIFT 14 |
| 17 | |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 18 | static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) |
| 19 | { |
| 20 | uint64_t i, ret = 0; |
| 21 | |
| 22 | for (i = 0; i < d->nr_stripes; i++) |
| 23 | ret += atomic_read(d->stripe_sectors_dirty + i); |
| 24 | |
| 25 | return ret; |
| 26 | } |
| 27 | |
Tang Junhui | a839409 | 2017-09-06 14:25:56 +0800 | [diff] [blame] | 28 | static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c) |
| 29 | { |
| 30 | uint64_t i, ret = 0; |
| 31 | |
| 32 | mutex_lock(&bch_register_lock); |
| 33 | |
Coly Li | 2831231 | 2018-01-08 12:21:28 -0800 | [diff] [blame] | 34 | for (i = 0; i < c->devices_max_used; i++) { |
Tang Junhui | a839409 | 2017-09-06 14:25:56 +0800 | [diff] [blame] | 35 | struct bcache_device *d = c->devices[i]; |
| 36 | |
| 37 | if (!d || !UUID_FLASH_ONLY(&c->uuids[i])) |
| 38 | continue; |
| 39 | ret += bcache_dev_sectors_dirty(d); |
| 40 | } |
| 41 | |
| 42 | mutex_unlock(&bch_register_lock); |
| 43 | |
| 44 | return ret; |
| 45 | } |
| 46 | |
Kent Overstreet | 48a915a | 2013-10-31 15:43:22 -0700 | [diff] [blame] | 47 | static inline unsigned offset_to_stripe(struct bcache_device *d, |
| 48 | uint64_t offset) |
| 49 | { |
| 50 | do_div(offset, d->stripe_size); |
| 51 | return offset; |
| 52 | } |
| 53 | |
| 54 | static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 55 | uint64_t offset, |
| 56 | unsigned nr_sectors) |
| 57 | { |
Kent Overstreet | 48a915a | 2013-10-31 15:43:22 -0700 | [diff] [blame] | 58 | unsigned stripe = offset_to_stripe(&dc->disk, offset); |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 59 | |
| 60 | while (1) { |
Kent Overstreet | 48a915a | 2013-10-31 15:43:22 -0700 | [diff] [blame] | 61 | if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 62 | return true; |
| 63 | |
Kent Overstreet | 48a915a | 2013-10-31 15:43:22 -0700 | [diff] [blame] | 64 | if (nr_sectors <= dc->disk.stripe_size) |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 65 | return false; |
| 66 | |
Kent Overstreet | 48a915a | 2013-10-31 15:43:22 -0700 | [diff] [blame] | 67 | nr_sectors -= dc->disk.stripe_size; |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 68 | stripe++; |
| 69 | } |
| 70 | } |
| 71 | |
| 72 | static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, |
| 73 | unsigned cache_mode, bool would_skip) |
| 74 | { |
| 75 | unsigned in_use = dc->disk.c->gc_stats.in_use; |
| 76 | |
| 77 | if (cache_mode != CACHE_MODE_WRITEBACK || |
Kent Overstreet | c4d951d | 2013-08-21 17:49:09 -0700 | [diff] [blame] | 78 | test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 79 | in_use > CUTOFF_WRITEBACK_SYNC) |
| 80 | return false; |
| 81 | |
| 82 | if (dc->partial_stripes_expensive && |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 83 | bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 84 | bio_sectors(bio))) |
| 85 | return true; |
| 86 | |
| 87 | if (would_skip) |
| 88 | return false; |
| 89 | |
Eric Wheeler | b41c9b0 | 2017-10-13 16:35:33 -0700 | [diff] [blame] | 90 | return (op_is_sync(bio->bi_opf) || |
| 91 | bio->bi_opf & (REQ_META|REQ_PRIO) || |
| 92 | in_use <= CUTOFF_WRITEBACK); |
Kent Overstreet | 72c2706 | 2013-06-05 06:24:39 -0700 | [diff] [blame] | 93 | } |
| 94 | |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 95 | static inline void bch_writeback_queue(struct cached_dev *dc) |
| 96 | { |
Stefan Bader | 8d16ce5 | 2015-11-29 18:44:49 -0800 | [diff] [blame] | 97 | if (!IS_ERR_OR_NULL(dc->writeback_thread)) |
| 98 | wake_up_process(dc->writeback_thread); |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | static inline void bch_writeback_add(struct cached_dev *dc) |
| 102 | { |
| 103 | if (!atomic_read(&dc->has_dirty) && |
| 104 | !atomic_xchg(&dc->has_dirty, 1)) { |
Elena Reshetova | 3b304d2 | 2017-10-30 14:46:32 -0700 | [diff] [blame] | 105 | refcount_inc(&dc->count); |
Kent Overstreet | 5e6926da | 2013-07-24 17:50:06 -0700 | [diff] [blame] | 106 | |
| 107 | if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { |
| 108 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); |
| 109 | /* XXX: should do this synchronously */ |
| 110 | bch_write_bdev_super(dc, NULL); |
| 111 | } |
| 112 | |
| 113 | bch_writeback_queue(dc); |
| 114 | } |
| 115 | } |
| 116 | |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 117 | void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 118 | |
Tang Junhui | 175206c | 2017-09-07 01:28:53 +0800 | [diff] [blame] | 119 | void bch_sectors_dirty_init(struct bcache_device *); |
Slava Pestov | 9e5c353 | 2014-05-01 13:48:57 -0700 | [diff] [blame] | 120 | void bch_cached_dev_writeback_init(struct cached_dev *); |
| 121 | int bch_cached_dev_writeback_start(struct cached_dev *); |
Kent Overstreet | 279afba | 2013-06-05 06:21:07 -0700 | [diff] [blame] | 122 | |
| 123 | #endif |