blob: f47218d5b3b2081c81f92c40d3f4fc247f3f9242 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Jens Axboee34cbd32016-11-09 12:36:15 -07002#ifndef WB_THROTTLE_H
3#define WB_THROTTLE_H
4
5#include <linux/kernel.h>
6#include <linux/atomic.h>
7#include <linux/wait.h>
8#include <linux/timer.h>
9#include <linux/ktime.h>
10
11#include "blk-stat.h"
Josef Bacika7905042018-07-03 09:32:35 -060012#include "blk-rq-qos.h"
Jens Axboee34cbd32016-11-09 12:36:15 -070013
14enum wbt_flags {
15 WBT_TRACKED = 1, /* write, tracked for throttling */
16 WBT_READ = 2, /* read */
17 WBT_KSWAPD = 4, /* write, from kswapd */
Jens Axboe782f5692018-05-07 10:03:23 -060018 WBT_DISCARD = 8, /* discard */
Jens Axboee34cbd32016-11-09 12:36:15 -070019
Jens Axboe782f5692018-05-07 10:03:23 -060020 WBT_NR_BITS = 4, /* number of bits */
Jens Axboee34cbd32016-11-09 12:36:15 -070021};
22
23enum {
Jens Axboe8bea6092018-05-07 09:57:08 -060024 WBT_RWQ_BG = 0,
25 WBT_RWQ_KSWAPD,
Jens Axboe782f5692018-05-07 10:03:23 -060026 WBT_RWQ_DISCARD,
Jens Axboe8bea6092018-05-07 09:57:08 -060027 WBT_NUM_RWQ,
Jens Axboee34cbd32016-11-09 12:36:15 -070028};
29
Jens Axboed62118b2016-11-28 09:40:34 -070030/*
31 * Enable states. Either off, or on by default (done at init time),
32 * or on through manual setup in sysfs.
33 */
34enum {
35 WBT_STATE_ON_DEFAULT = 1,
36 WBT_STATE_ON_MANUAL = 2,
37};
38
Jens Axboee34cbd32016-11-09 12:36:15 -070039struct rq_wb {
40 /*
41 * Settings that govern how we throttle
42 */
43 unsigned int wb_background; /* background writeback */
44 unsigned int wb_normal; /* normal writeback */
Jens Axboee34cbd32016-11-09 12:36:15 -070045
Jens Axboed62118b2016-11-28 09:40:34 -070046 short enable_state; /* WBT_STATE_* */
47
Jens Axboee34cbd32016-11-09 12:36:15 -070048 /*
49 * Number of consecutive periods where we don't have enough
50 * information to make a firm scale up/down decision.
51 */
52 unsigned int unknown_cnt;
53
54 u64 win_nsec; /* default window size */
55 u64 cur_win_nsec; /* current window size */
56
Omar Sandoval34dbad52017-03-21 08:56:08 -070057 struct blk_stat_callback *cb;
Jens Axboee34cbd32016-11-09 12:36:15 -070058
Omar Sandoval544ccc8d2018-05-09 02:08:50 -070059 u64 sync_issue;
Jens Axboee34cbd32016-11-09 12:36:15 -070060 void *sync_cookie;
61
62 unsigned int wc;
Jens Axboee34cbd32016-11-09 12:36:15 -070063
64 unsigned long last_issue; /* last non-throttled issue */
65 unsigned long last_comp; /* last non-throttled comp */
66 unsigned long min_lat_nsec;
Josef Bacika7905042018-07-03 09:32:35 -060067 struct rq_qos rqos;
Jens Axboee34cbd32016-11-09 12:36:15 -070068 struct rq_wait rq_wait[WBT_NUM_RWQ];
Josef Bacika7905042018-07-03 09:32:35 -060069 struct rq_depth rq_depth;
Jens Axboee34cbd32016-11-09 12:36:15 -070070};
71
Josef Bacika7905042018-07-03 09:32:35 -060072static inline struct rq_wb *RQWB(struct rq_qos *rqos)
73{
74 return container_of(rqos, struct rq_wb, rqos);
75}
76
Jens Axboee34cbd32016-11-09 12:36:15 -070077static inline unsigned int wbt_inflight(struct rq_wb *rwb)
78{
79 unsigned int i, ret = 0;
80
81 for (i = 0; i < WBT_NUM_RWQ; i++)
82 ret += atomic_read(&rwb->rq_wait[i].inflight);
83
84 return ret;
85}
86
Josef Bacika7905042018-07-03 09:32:35 -060087
Jens Axboee34cbd32016-11-09 12:36:15 -070088#ifdef CONFIG_BLK_WBT
89
Jens Axboe8054b892016-11-10 21:50:51 -070090int wbt_init(struct request_queue *);
Josef Bacika7905042018-07-03 09:32:35 -060091void wbt_update_limits(struct request_queue *);
Jens Axboefa224ee2016-11-28 09:25:50 -070092void wbt_disable_default(struct request_queue *);
Jan Kara8330cdb2017-04-19 11:33:27 +020093void wbt_enable_default(struct request_queue *);
Jens Axboee34cbd32016-11-09 12:36:15 -070094
Josef Bacika7905042018-07-03 09:32:35 -060095u64 wbt_get_min_lat(struct request_queue *q);
96void wbt_set_min_lat(struct request_queue *q, u64 val);
97
98void wbt_set_queue_depth(struct request_queue *, unsigned int);
99void wbt_set_write_cache(struct request_queue *, bool);
Jens Axboee34cbd32016-11-09 12:36:15 -0700100
Jens Axboe80e091d2016-11-28 09:22:47 -0700101u64 wbt_default_latency_nsec(struct request_queue *);
102
Jens Axboee34cbd32016-11-09 12:36:15 -0700103#else
104
Omar Sandovala8a45942018-05-09 02:08:48 -0700105static inline void wbt_track(struct request *rq, enum wbt_flags flags)
Omar Sandoval934031a2018-05-09 02:08:47 -0700106{
107}
Jens Axboe8054b892016-11-10 21:50:51 -0700108static inline int wbt_init(struct request_queue *q)
Jens Axboee34cbd32016-11-09 12:36:15 -0700109{
110 return -EINVAL;
111}
Josef Bacika7905042018-07-03 09:32:35 -0600112static inline void wbt_update_limits(struct request_queue *q)
Jens Axboee34cbd32016-11-09 12:36:15 -0700113{
114}
Jens Axboefa224ee2016-11-28 09:25:50 -0700115static inline void wbt_disable_default(struct request_queue *q)
Jens Axboee34cbd32016-11-09 12:36:15 -0700116{
117}
Jan Kara8330cdb2017-04-19 11:33:27 +0200118static inline void wbt_enable_default(struct request_queue *q)
119{
120}
Josef Bacika7905042018-07-03 09:32:35 -0600121static inline void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
Jens Axboee34cbd32016-11-09 12:36:15 -0700122{
123}
Josef Bacika7905042018-07-03 09:32:35 -0600124static inline void wbt_set_write_cache(struct request_queue *q, bool wc)
125{
126}
127static inline u64 wbt_get_min_lat(struct request_queue *q)
128{
129 return 0;
130}
131static inline void wbt_set_min_lat(struct request_queue *q, u64 val)
Jens Axboee34cbd32016-11-09 12:36:15 -0700132{
133}
Jens Axboe80e091d2016-11-28 09:22:47 -0700134static inline u64 wbt_default_latency_nsec(struct request_queue *q)
135{
136 return 0;
137}
Jens Axboee34cbd32016-11-09 12:36:15 -0700138
139#endif /* CONFIG_BLK_WBT */
140
141#endif