Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 2 | #ifndef WB_THROTTLE_H |
| 3 | #define WB_THROTTLE_H |
| 4 | |
| 5 | #include <linux/kernel.h> |
| 6 | #include <linux/atomic.h> |
| 7 | #include <linux/wait.h> |
| 8 | #include <linux/timer.h> |
| 9 | #include <linux/ktime.h> |
| 10 | |
| 11 | #include "blk-stat.h" |
| 12 | |
| 13 | enum wbt_flags { |
| 14 | WBT_TRACKED = 1, /* write, tracked for throttling */ |
| 15 | WBT_READ = 2, /* read */ |
| 16 | WBT_KSWAPD = 4, /* write, from kswapd */ |
| 17 | |
| 18 | WBT_NR_BITS = 3, /* number of bits */ |
| 19 | }; |
| 20 | |
| 21 | enum { |
| 22 | WBT_NUM_RWQ = 2, |
| 23 | }; |
| 24 | |
Jens Axboe | d62118b | 2016-11-28 09:40:34 -0700 | [diff] [blame] | 25 | /* |
| 26 | * Enable states. Either off, or on by default (done at init time), |
| 27 | * or on through manual setup in sysfs. |
| 28 | */ |
| 29 | enum { |
| 30 | WBT_STATE_ON_DEFAULT = 1, |
| 31 | WBT_STATE_ON_MANUAL = 2, |
| 32 | }; |
| 33 | |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 34 | static inline void wbt_clear_state(struct blk_issue_stat *stat) |
| 35 | { |
Shaohua Li | 88eeca4 | 2017-03-27 15:19:41 -0700 | [diff] [blame] | 36 | stat->stat &= ~BLK_STAT_RES_MASK; |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 37 | } |
| 38 | |
| 39 | static inline enum wbt_flags wbt_stat_to_mask(struct blk_issue_stat *stat) |
| 40 | { |
Shaohua Li | 88eeca4 | 2017-03-27 15:19:41 -0700 | [diff] [blame] | 41 | return (stat->stat & BLK_STAT_RES_MASK) >> BLK_STAT_RES_SHIFT; |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 42 | } |
| 43 | |
| 44 | static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags wb_acct) |
| 45 | { |
Shaohua Li | 88eeca4 | 2017-03-27 15:19:41 -0700 | [diff] [blame] | 46 | stat->stat |= ((u64) wb_acct) << BLK_STAT_RES_SHIFT; |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 47 | } |
| 48 | |
| 49 | static inline bool wbt_is_tracked(struct blk_issue_stat *stat) |
| 50 | { |
Shaohua Li | 88eeca4 | 2017-03-27 15:19:41 -0700 | [diff] [blame] | 51 | return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_TRACKED; |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 52 | } |
| 53 | |
| 54 | static inline bool wbt_is_read(struct blk_issue_stat *stat) |
| 55 | { |
Shaohua Li | 88eeca4 | 2017-03-27 15:19:41 -0700 | [diff] [blame] | 56 | return (stat->stat >> BLK_STAT_RES_SHIFT) & WBT_READ; |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 57 | } |
| 58 | |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 59 | struct rq_wait { |
| 60 | wait_queue_head_t wait; |
| 61 | atomic_t inflight; |
| 62 | }; |
| 63 | |
| 64 | struct rq_wb { |
| 65 | /* |
| 66 | * Settings that govern how we throttle |
| 67 | */ |
| 68 | unsigned int wb_background; /* background writeback */ |
| 69 | unsigned int wb_normal; /* normal writeback */ |
| 70 | unsigned int wb_max; /* max throughput writeback */ |
| 71 | int scale_step; |
| 72 | bool scaled_max; |
| 73 | |
Jens Axboe | d62118b | 2016-11-28 09:40:34 -0700 | [diff] [blame] | 74 | short enable_state; /* WBT_STATE_* */ |
| 75 | |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 76 | /* |
| 77 | * Number of consecutive periods where we don't have enough |
| 78 | * information to make a firm scale up/down decision. |
| 79 | */ |
| 80 | unsigned int unknown_cnt; |
| 81 | |
| 82 | u64 win_nsec; /* default window size */ |
| 83 | u64 cur_win_nsec; /* current window size */ |
| 84 | |
Omar Sandoval | 34dbad5 | 2017-03-21 08:56:08 -0700 | [diff] [blame] | 85 | struct blk_stat_callback *cb; |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 86 | |
| 87 | s64 sync_issue; |
| 88 | void *sync_cookie; |
| 89 | |
| 90 | unsigned int wc; |
| 91 | unsigned int queue_depth; |
| 92 | |
| 93 | unsigned long last_issue; /* last non-throttled issue */ |
| 94 | unsigned long last_comp; /* last non-throttled comp */ |
| 95 | unsigned long min_lat_nsec; |
Jens Axboe | d8a0cbf | 2016-11-10 21:52:53 -0700 | [diff] [blame] | 96 | struct request_queue *queue; |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 97 | struct rq_wait rq_wait[WBT_NUM_RWQ]; |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 98 | }; |
| 99 | |
| 100 | static inline unsigned int wbt_inflight(struct rq_wb *rwb) |
| 101 | { |
| 102 | unsigned int i, ret = 0; |
| 103 | |
| 104 | for (i = 0; i < WBT_NUM_RWQ; i++) |
| 105 | ret += atomic_read(&rwb->rq_wait[i].inflight); |
| 106 | |
| 107 | return ret; |
| 108 | } |
| 109 | |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 110 | #ifdef CONFIG_BLK_WBT |
| 111 | |
| 112 | void __wbt_done(struct rq_wb *, enum wbt_flags); |
| 113 | void wbt_done(struct rq_wb *, struct blk_issue_stat *); |
| 114 | enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *); |
Jens Axboe | 8054b89 | 2016-11-10 21:50:51 -0700 | [diff] [blame] | 115 | int wbt_init(struct request_queue *); |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 116 | void wbt_exit(struct request_queue *); |
| 117 | void wbt_update_limits(struct rq_wb *); |
| 118 | void wbt_requeue(struct rq_wb *, struct blk_issue_stat *); |
| 119 | void wbt_issue(struct rq_wb *, struct blk_issue_stat *); |
Jens Axboe | fa224ee | 2016-11-28 09:25:50 -0700 | [diff] [blame] | 120 | void wbt_disable_default(struct request_queue *); |
Jan Kara | 8330cdb | 2017-04-19 11:33:27 +0200 | [diff] [blame] | 121 | void wbt_enable_default(struct request_queue *); |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 122 | |
| 123 | void wbt_set_queue_depth(struct rq_wb *, unsigned int); |
| 124 | void wbt_set_write_cache(struct rq_wb *, bool); |
| 125 | |
Jens Axboe | 80e091d | 2016-11-28 09:22:47 -0700 | [diff] [blame] | 126 | u64 wbt_default_latency_nsec(struct request_queue *); |
| 127 | |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 128 | #else |
| 129 | |
| 130 | static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags) |
| 131 | { |
| 132 | } |
| 133 | static inline void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat) |
| 134 | { |
| 135 | } |
| 136 | static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, |
| 137 | spinlock_t *lock) |
| 138 | { |
| 139 | return 0; |
| 140 | } |
Jens Axboe | 8054b89 | 2016-11-10 21:50:51 -0700 | [diff] [blame] | 141 | static inline int wbt_init(struct request_queue *q) |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 142 | { |
| 143 | return -EINVAL; |
| 144 | } |
| 145 | static inline void wbt_exit(struct request_queue *q) |
| 146 | { |
| 147 | } |
| 148 | static inline void wbt_update_limits(struct rq_wb *rwb) |
| 149 | { |
| 150 | } |
| 151 | static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat) |
| 152 | { |
| 153 | } |
| 154 | static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat) |
| 155 | { |
| 156 | } |
Jens Axboe | fa224ee | 2016-11-28 09:25:50 -0700 | [diff] [blame] | 157 | static inline void wbt_disable_default(struct request_queue *q) |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 158 | { |
| 159 | } |
Jan Kara | 8330cdb | 2017-04-19 11:33:27 +0200 | [diff] [blame] | 160 | static inline void wbt_enable_default(struct request_queue *q) |
| 161 | { |
| 162 | } |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 163 | static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth) |
| 164 | { |
| 165 | } |
| 166 | static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc) |
| 167 | { |
| 168 | } |
Jens Axboe | 80e091d | 2016-11-28 09:22:47 -0700 | [diff] [blame] | 169 | static inline u64 wbt_default_latency_nsec(struct request_queue *q) |
| 170 | { |
| 171 | return 0; |
| 172 | } |
Jens Axboe | e34cbd3 | 2016-11-09 12:36:15 -0700 | [diff] [blame] | 173 | |
| 174 | #endif /* CONFIG_BLK_WBT */ |
| 175 | |
| 176 | #endif |