blob: 8f485f8e1baf7aeddbe866f5a480a229ea38619c [file] [log] [blame]
Jens Axboee34cbd32016-11-09 12:36:15 -07001#ifndef WB_THROTTLE_H
2#define WB_THROTTLE_H
3
4#include <linux/kernel.h>
5#include <linux/atomic.h>
6#include <linux/wait.h>
7#include <linux/timer.h>
8#include <linux/ktime.h>
9
10#include "blk-stat.h"
11
12enum wbt_flags {
13 WBT_TRACKED = 1, /* write, tracked for throttling */
14 WBT_READ = 2, /* read */
15 WBT_KSWAPD = 4, /* write, from kswapd */
16
17 WBT_NR_BITS = 3, /* number of bits */
18};
19
20enum {
21 WBT_NUM_RWQ = 2,
22};
23
24static inline void wbt_clear_state(struct blk_issue_stat *stat)
25{
26 stat->time &= BLK_STAT_TIME_MASK;
27}
28
29static inline enum wbt_flags wbt_stat_to_mask(struct blk_issue_stat *stat)
30{
31 return (stat->time & BLK_STAT_MASK) >> BLK_STAT_SHIFT;
32}
33
34static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags wb_acct)
35{
36 stat->time |= ((u64) wb_acct) << BLK_STAT_SHIFT;
37}
38
39static inline bool wbt_is_tracked(struct blk_issue_stat *stat)
40{
41 return (stat->time >> BLK_STAT_SHIFT) & WBT_TRACKED;
42}
43
44static inline bool wbt_is_read(struct blk_issue_stat *stat)
45{
46 return (stat->time >> BLK_STAT_SHIFT) & WBT_READ;
47}
48
Jens Axboee34cbd32016-11-09 12:36:15 -070049struct rq_wait {
50 wait_queue_head_t wait;
51 atomic_t inflight;
52};
53
54struct rq_wb {
55 /*
56 * Settings that govern how we throttle
57 */
58 unsigned int wb_background; /* background writeback */
59 unsigned int wb_normal; /* normal writeback */
60 unsigned int wb_max; /* max throughput writeback */
61 int scale_step;
62 bool scaled_max;
63
64 /*
65 * Number of consecutive periods where we don't have enough
66 * information to make a firm scale up/down decision.
67 */
68 unsigned int unknown_cnt;
69
70 u64 win_nsec; /* default window size */
71 u64 cur_win_nsec; /* current window size */
72
73 struct timer_list window_timer;
74
75 s64 sync_issue;
76 void *sync_cookie;
77
78 unsigned int wc;
79 unsigned int queue_depth;
80
81 unsigned long last_issue; /* last non-throttled issue */
82 unsigned long last_comp; /* last non-throttled comp */
83 unsigned long min_lat_nsec;
Jens Axboed8a0cbf2016-11-10 21:52:53 -070084 struct request_queue *queue;
Jens Axboee34cbd32016-11-09 12:36:15 -070085 struct rq_wait rq_wait[WBT_NUM_RWQ];
Jens Axboee34cbd32016-11-09 12:36:15 -070086};
87
88static inline unsigned int wbt_inflight(struct rq_wb *rwb)
89{
90 unsigned int i, ret = 0;
91
92 for (i = 0; i < WBT_NUM_RWQ; i++)
93 ret += atomic_read(&rwb->rq_wait[i].inflight);
94
95 return ret;
96}
97
Jens Axboee34cbd32016-11-09 12:36:15 -070098#ifdef CONFIG_BLK_WBT
99
100void __wbt_done(struct rq_wb *, enum wbt_flags);
101void wbt_done(struct rq_wb *, struct blk_issue_stat *);
102enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *);
Jens Axboe8054b892016-11-10 21:50:51 -0700103int wbt_init(struct request_queue *);
Jens Axboee34cbd32016-11-09 12:36:15 -0700104void wbt_exit(struct request_queue *);
105void wbt_update_limits(struct rq_wb *);
106void wbt_requeue(struct rq_wb *, struct blk_issue_stat *);
107void wbt_issue(struct rq_wb *, struct blk_issue_stat *);
Jens Axboefa224ee2016-11-28 09:25:50 -0700108void wbt_disable_default(struct request_queue *);
Jens Axboee34cbd32016-11-09 12:36:15 -0700109
110void wbt_set_queue_depth(struct rq_wb *, unsigned int);
111void wbt_set_write_cache(struct rq_wb *, bool);
112
Jens Axboe80e091d2016-11-28 09:22:47 -0700113u64 wbt_default_latency_nsec(struct request_queue *);
114
Jens Axboee34cbd32016-11-09 12:36:15 -0700115#else
116
117static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags)
118{
119}
120static inline void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
121{
122}
123static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio,
124 spinlock_t *lock)
125{
126 return 0;
127}
Jens Axboe8054b892016-11-10 21:50:51 -0700128static inline int wbt_init(struct request_queue *q)
Jens Axboee34cbd32016-11-09 12:36:15 -0700129{
130 return -EINVAL;
131}
132static inline void wbt_exit(struct request_queue *q)
133{
134}
135static inline void wbt_update_limits(struct rq_wb *rwb)
136{
137}
138static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat)
139{
140}
141static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat)
142{
143}
Jens Axboefa224ee2016-11-28 09:25:50 -0700144static inline void wbt_disable_default(struct request_queue *q)
Jens Axboee34cbd32016-11-09 12:36:15 -0700145{
146}
147static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
148{
149}
150static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc)
151{
152}
Jens Axboe80e091d2016-11-28 09:22:47 -0700153static inline u64 wbt_default_latency_nsec(struct request_queue *q)
154{
155 return 0;
156}
Jens Axboee34cbd32016-11-09 12:36:15 -0700157
158#endif /* CONFIG_BLK_WBT */
159
160#endif