blob: 98caba3e962eeb63bd9bbdc78ce0398d6fa414ca [file] [log] [blame]
Josef Bacika7905042018-07-03 09:32:35 -06001#ifndef RQ_QOS_H
2#define RQ_QOS_H
3
4#include <linux/kernel.h>
5#include <linux/blkdev.h>
6#include <linux/blk_types.h>
7#include <linux/atomic.h>
8#include <linux/wait.h>
9
10enum rq_qos_id {
11 RQ_QOS_WBT,
12 RQ_QOS_CGROUP,
13};
14
15struct rq_wait {
16 wait_queue_head_t wait;
17 atomic_t inflight;
18};
19
20struct rq_qos {
21 struct rq_qos_ops *ops;
22 struct request_queue *q;
23 enum rq_qos_id id;
24 struct rq_qos *next;
25};
26
27struct rq_qos_ops {
Josef Bacikc1c80382018-07-03 11:14:59 -040028 void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *);
29 void (*track)(struct rq_qos *, struct request *, struct bio *);
Josef Bacika7905042018-07-03 09:32:35 -060030 void (*issue)(struct rq_qos *, struct request *);
31 void (*requeue)(struct rq_qos *, struct request *);
32 void (*done)(struct rq_qos *, struct request *);
Josef Bacik67b42d02018-07-03 11:15:00 -040033 void (*done_bio)(struct rq_qos *, struct bio *);
Josef Bacikc1c80382018-07-03 11:14:59 -040034 void (*cleanup)(struct rq_qos *, struct bio *);
Josef Bacika7905042018-07-03 09:32:35 -060035 void (*exit)(struct rq_qos *);
36};
37
38struct rq_depth {
39 unsigned int max_depth;
40
41 int scale_step;
42 bool scaled_max;
43
44 unsigned int queue_depth;
45 unsigned int default_depth;
46};
47
48static inline struct rq_qos *rq_qos_id(struct request_queue *q,
49 enum rq_qos_id id)
50{
51 struct rq_qos *rqos;
52 for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
53 if (rqos->id == id)
54 break;
55 }
56 return rqos;
57}
58
59static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
60{
61 return rq_qos_id(q, RQ_QOS_WBT);
62}
63
64static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
65{
66 return rq_qos_id(q, RQ_QOS_CGROUP);
67}
68
69static inline void rq_wait_init(struct rq_wait *rq_wait)
70{
71 atomic_set(&rq_wait->inflight, 0);
72 init_waitqueue_head(&rq_wait->wait);
73}
74
75static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
76{
77 rqos->next = q->rq_qos;
78 q->rq_qos = rqos;
79}
80
81static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
82{
Tejun Heo05444112019-10-15 08:49:27 -070083 struct rq_qos **cur;
84
85 for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
86 if (*cur == rqos) {
87 *cur = rqos->next;
Josef Bacika7905042018-07-03 09:32:35 -060088 break;
89 }
Josef Bacika7905042018-07-03 09:32:35 -060090 }
91}
92
Josef Bacik22f17952018-07-19 21:42:13 -040093bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
Harshad Shirwadkar345c03a2019-10-05 11:59:27 -070094bool rq_depth_scale_up(struct rq_depth *rqd);
95bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
Josef Bacika7905042018-07-03 09:32:35 -060096bool rq_depth_calc_max_depth(struct rq_depth *rqd);
97
Josef Bacikc1c80382018-07-03 11:14:59 -040098void rq_qos_cleanup(struct request_queue *, struct bio *);
Josef Bacika7905042018-07-03 09:32:35 -060099void rq_qos_done(struct request_queue *, struct request *);
100void rq_qos_issue(struct request_queue *, struct request *);
101void rq_qos_requeue(struct request_queue *, struct request *);
Josef Bacik67b42d02018-07-03 11:15:00 -0400102void rq_qos_done_bio(struct request_queue *q, struct bio *bio);
Josef Bacikc1c80382018-07-03 11:14:59 -0400103void rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *);
104void rq_qos_track(struct request_queue *q, struct request *, struct bio *);
Josef Bacika7905042018-07-03 09:32:35 -0600105void rq_qos_exit(struct request_queue *);
106#endif