blob: 32b02efbfa66dda638a02d07aa00c63df48a5dab [file] [log] [blame]
Josef Bacika7905042018-07-03 09:32:35 -06001#ifndef RQ_QOS_H
2#define RQ_QOS_H
3
4#include <linux/kernel.h>
5#include <linux/blkdev.h>
6#include <linux/blk_types.h>
7#include <linux/atomic.h>
8#include <linux/wait.h>
9
10enum rq_qos_id {
11 RQ_QOS_WBT,
12 RQ_QOS_CGROUP,
13};
14
15struct rq_wait {
16 wait_queue_head_t wait;
17 atomic_t inflight;
18};
19
20struct rq_qos {
21 struct rq_qos_ops *ops;
22 struct request_queue *q;
23 enum rq_qos_id id;
24 struct rq_qos *next;
25};
26
27struct rq_qos_ops {
Josef Bacikc1c80382018-07-03 11:14:59 -040028 void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *);
29 void (*track)(struct rq_qos *, struct request *, struct bio *);
Josef Bacika7905042018-07-03 09:32:35 -060030 void (*issue)(struct rq_qos *, struct request *);
31 void (*requeue)(struct rq_qos *, struct request *);
32 void (*done)(struct rq_qos *, struct request *);
Josef Bacik67b42d02018-07-03 11:15:00 -040033 void (*done_bio)(struct rq_qos *, struct bio *);
Josef Bacikc1c80382018-07-03 11:14:59 -040034 void (*cleanup)(struct rq_qos *, struct bio *);
Josef Bacika7905042018-07-03 09:32:35 -060035 void (*exit)(struct rq_qos *);
36};
37
38struct rq_depth {
39 unsigned int max_depth;
40
41 int scale_step;
42 bool scaled_max;
43
44 unsigned int queue_depth;
45 unsigned int default_depth;
46};
47
48static inline struct rq_qos *rq_qos_id(struct request_queue *q,
49 enum rq_qos_id id)
50{
51 struct rq_qos *rqos;
52 for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
53 if (rqos->id == id)
54 break;
55 }
56 return rqos;
57}
58
59static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
60{
61 return rq_qos_id(q, RQ_QOS_WBT);
62}
63
64static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
65{
66 return rq_qos_id(q, RQ_QOS_CGROUP);
67}
68
69static inline void rq_wait_init(struct rq_wait *rq_wait)
70{
71 atomic_set(&rq_wait->inflight, 0);
72 init_waitqueue_head(&rq_wait->wait);
73}
74
75static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
76{
77 rqos->next = q->rq_qos;
78 q->rq_qos = rqos;
79}
80
81static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
82{
83 struct rq_qos *cur, *prev = NULL;
84 for (cur = q->rq_qos; cur; cur = cur->next) {
85 if (cur == rqos) {
86 if (prev)
87 prev->next = rqos->next;
88 else
89 q->rq_qos = cur;
90 break;
91 }
92 prev = cur;
93 }
94}
95
Josef Bacik22f17952018-07-19 21:42:13 -040096bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
Josef Bacika7905042018-07-03 09:32:35 -060097void rq_depth_scale_up(struct rq_depth *rqd);
98void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
99bool rq_depth_calc_max_depth(struct rq_depth *rqd);
100
Josef Bacikc1c80382018-07-03 11:14:59 -0400101void rq_qos_cleanup(struct request_queue *, struct bio *);
Josef Bacika7905042018-07-03 09:32:35 -0600102void rq_qos_done(struct request_queue *, struct request *);
103void rq_qos_issue(struct request_queue *, struct request *);
104void rq_qos_requeue(struct request_queue *, struct request *);
Josef Bacik67b42d02018-07-03 11:15:00 -0400105void rq_qos_done_bio(struct request_queue *q, struct bio *bio);
Josef Bacikc1c80382018-07-03 11:14:59 -0400106void rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *);
107void rq_qos_track(struct request_queue *q, struct request *, struct bio *);
Josef Bacika7905042018-07-03 09:32:35 -0600108void rq_qos_exit(struct request_queue *);
109#endif