blob: becbc78403643609f3c9f10ba31c4762be4ab95c [file] [log] [blame]
Jens Axboebd166ef2017-01-17 06:03:22 -07001#ifndef BLK_MQ_SCHED_H
2#define BLK_MQ_SCHED_H
3
4#include "blk-mq.h"
5#include "blk-mq-tag.h"
6
7int blk_mq_sched_init_hctx_data(struct request_queue *q, size_t size,
8 int (*init)(struct blk_mq_hw_ctx *),
9 void (*exit)(struct blk_mq_hw_ctx *));
10
11void blk_mq_sched_free_hctx_data(struct request_queue *q,
12 void (*exit)(struct blk_mq_hw_ctx *));
13
14struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data);
15void blk_mq_sched_put_request(struct request *rq);
16
17void blk_mq_sched_request_inserted(struct request *rq);
18bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq);
19bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio);
20bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
21bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
Jens Axboe50e1dab2017-01-26 14:42:34 -070022void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
Jens Axboebd166ef2017-01-17 06:03:22 -070023
24void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
25void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
26 struct list_head *rq_list,
27 struct request *(*get_rq)(struct blk_mq_hw_ctx *));
28
29int blk_mq_sched_setup(struct request_queue *q);
30void blk_mq_sched_teardown(struct request_queue *q);
31
Jens Axboed3484992017-01-13 14:43:58 -070032int blk_mq_sched_init(struct request_queue *q);
33
Jens Axboebd166ef2017-01-17 06:03:22 -070034static inline bool
35blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
36{
37 struct elevator_queue *e = q->elevator;
38
39 if (!e || blk_queue_nomerges(q) || !bio_mergeable(bio))
40 return false;
41
42 return __blk_mq_sched_bio_merge(q, bio);
43}
44
45static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
46 struct request *rq)
47{
48 struct elevator_queue *e = q->elevator;
49
50 if (e && e->type->ops.mq.get_rq_priv)
51 return e->type->ops.mq.get_rq_priv(q, rq);
52
53 return 0;
54}
55
56static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
57 struct request *rq)
58{
59 struct elevator_queue *e = q->elevator;
60
61 if (e && e->type->ops.mq.put_rq_priv)
62 e->type->ops.mq.put_rq_priv(q, rq);
63}
64
65static inline void
66blk_mq_sched_insert_request(struct request *rq, bool at_head, bool run_queue,
67 bool async)
68{
69 struct request_queue *q = rq->q;
70 struct elevator_queue *e = q->elevator;
71 struct blk_mq_ctx *ctx = rq->mq_ctx;
72 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
73
74 if (e && e->type->ops.mq.insert_requests) {
75 LIST_HEAD(list);
76
77 list_add(&rq->queuelist, &list);
78 e->type->ops.mq.insert_requests(hctx, &list, at_head);
79 } else {
80 spin_lock(&ctx->lock);
81 __blk_mq_insert_request(hctx, rq, at_head);
82 spin_unlock(&ctx->lock);
83 }
84
85 if (run_queue)
86 blk_mq_run_hw_queue(hctx, async);
87}
88
89static inline void
90blk_mq_sched_insert_requests(struct request_queue *q, struct blk_mq_ctx *ctx,
91 struct list_head *list, bool run_queue_async)
92{
93 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
94 struct elevator_queue *e = hctx->queue->elevator;
95
96 if (e && e->type->ops.mq.insert_requests)
97 e->type->ops.mq.insert_requests(hctx, list, false);
98 else
99 blk_mq_insert_requests(hctx, ctx, list);
100
101 blk_mq_run_hw_queue(hctx, run_queue_async);
102}
103
104static inline bool
105blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
106 struct bio *bio)
107{
108 struct elevator_queue *e = q->elevator;
109
110 if (e && e->type->ops.mq.allow_merge)
111 return e->type->ops.mq.allow_merge(q, rq, bio);
112
113 return true;
114}
115
116static inline void
117blk_mq_sched_completed_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
118{
119 struct elevator_queue *e = hctx->queue->elevator;
120
121 if (e && e->type->ops.mq.completed_request)
122 e->type->ops.mq.completed_request(hctx, rq);
123
124 BUG_ON(rq->internal_tag == -1);
125
126 blk_mq_put_tag(hctx, hctx->sched_tags, rq->mq_ctx, rq->internal_tag);
Jens Axboebd166ef2017-01-17 06:03:22 -0700127}
128
129static inline void blk_mq_sched_started_request(struct request *rq)
130{
131 struct request_queue *q = rq->q;
132 struct elevator_queue *e = q->elevator;
133
134 if (e && e->type->ops.mq.started_request)
135 e->type->ops.mq.started_request(rq);
136}
137
138static inline void blk_mq_sched_requeue_request(struct request *rq)
139{
140 struct request_queue *q = rq->q;
141 struct elevator_queue *e = q->elevator;
142
143 if (e && e->type->ops.mq.requeue_request)
144 e->type->ops.mq.requeue_request(rq);
145}
146
147static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
148{
149 struct elevator_queue *e = hctx->queue->elevator;
150
151 if (e && e->type->ops.mq.has_work)
152 return e->type->ops.mq.has_work(hctx);
153
154 return false;
155}
156
157static inline void blk_mq_sched_mark_restart(struct blk_mq_hw_ctx *hctx)
158{
Jens Axboe50e1dab2017-01-26 14:42:34 -0700159 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
Jens Axboebd166ef2017-01-17 06:03:22 -0700160 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
Jens Axboe50e1dab2017-01-26 14:42:34 -0700161 if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
162 struct request_queue *q = hctx->queue;
163
164 if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
165 set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
166 }
167 }
Jens Axboebd166ef2017-01-17 06:03:22 -0700168}
169
170static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
171{
172 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
173}
174
175#endif