blob: edafb5383b7bbdedfd5365ed38f9a5c373ec96ab [file] [log] [blame]
Jens Axboebd166ef2017-01-17 06:03:22 -07001#ifndef BLK_MQ_SCHED_H
2#define BLK_MQ_SCHED_H
3
4#include "blk-mq.h"
5#include "blk-mq-tag.h"
6
Jens Axboebd166ef2017-01-17 06:03:22 -07007void blk_mq_sched_free_hctx_data(struct request_queue *q,
8 void (*exit)(struct blk_mq_hw_ctx *));
9
10struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data);
11void blk_mq_sched_put_request(struct request *rq);
12
13void blk_mq_sched_request_inserted(struct request *rq);
Jens Axboee4d750c2017-02-03 09:48:28 -070014bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
15 struct request **merged_request);
Jens Axboebd166ef2017-01-17 06:03:22 -070016bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
17bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
Bart Van Assche6d8c6c02017-04-07 12:40:09 -060018void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
Jens Axboebd166ef2017-01-17 06:03:22 -070019
Jens Axboebd6737f2017-01-27 01:00:47 -070020void blk_mq_sched_insert_request(struct request *rq, bool at_head,
21 bool run_queue, bool async, bool can_block);
22void blk_mq_sched_insert_requests(struct request_queue *q,
23 struct blk_mq_ctx *ctx,
24 struct list_head *list, bool run_queue_async);
25
Jens Axboebd166ef2017-01-17 06:03:22 -070026void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
Jens Axboebd166ef2017-01-17 06:03:22 -070027
Omar Sandoval6917ff02017-04-05 12:01:30 -070028int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
Omar Sandoval54d53292017-04-07 08:52:27 -060029void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
Jens Axboebd166ef2017-01-17 06:03:22 -070030
Omar Sandoval93252632017-04-05 12:01:31 -070031int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
32 unsigned int hctx_idx);
33void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
34 unsigned int hctx_idx);
35
Jens Axboed3484992017-01-13 14:43:58 -070036int blk_mq_sched_init(struct request_queue *q);
37
Jens Axboebd166ef2017-01-17 06:03:22 -070038static inline bool
39blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
40{
41 struct elevator_queue *e = q->elevator;
42
43 if (!e || blk_queue_nomerges(q) || !bio_mergeable(bio))
44 return false;
45
46 return __blk_mq_sched_bio_merge(q, bio);
47}
48
49static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
Paolo Valentef1ba8262017-02-07 18:24:43 +010050 struct request *rq,
51 struct bio *bio)
Jens Axboebd166ef2017-01-17 06:03:22 -070052{
53 struct elevator_queue *e = q->elevator;
54
55 if (e && e->type->ops.mq.get_rq_priv)
Paolo Valentef1ba8262017-02-07 18:24:43 +010056 return e->type->ops.mq.get_rq_priv(q, rq, bio);
Jens Axboebd166ef2017-01-17 06:03:22 -070057
58 return 0;
59}
60
61static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
62 struct request *rq)
63{
64 struct elevator_queue *e = q->elevator;
65
66 if (e && e->type->ops.mq.put_rq_priv)
67 e->type->ops.mq.put_rq_priv(q, rq);
68}
69
Jens Axboebd166ef2017-01-17 06:03:22 -070070static inline bool
71blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
72 struct bio *bio)
73{
74 struct elevator_queue *e = q->elevator;
75
76 if (e && e->type->ops.mq.allow_merge)
77 return e->type->ops.mq.allow_merge(q, rq, bio);
78
79 return true;
80}
81
Omar Sandovalc05f8522017-04-14 01:00:01 -070082static inline void blk_mq_sched_completed_request(struct request *rq)
Jens Axboebd166ef2017-01-17 06:03:22 -070083{
Omar Sandovalc05f8522017-04-14 01:00:01 -070084 struct elevator_queue *e = rq->q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -070085
86 if (e && e->type->ops.mq.completed_request)
Omar Sandovalc05f8522017-04-14 01:00:01 -070087 e->type->ops.mq.completed_request(rq);
Jens Axboebd166ef2017-01-17 06:03:22 -070088}
89
90static inline void blk_mq_sched_started_request(struct request *rq)
91{
92 struct request_queue *q = rq->q;
93 struct elevator_queue *e = q->elevator;
94
95 if (e && e->type->ops.mq.started_request)
96 e->type->ops.mq.started_request(rq);
97}
98
99static inline void blk_mq_sched_requeue_request(struct request *rq)
100{
101 struct request_queue *q = rq->q;
102 struct elevator_queue *e = q->elevator;
103
104 if (e && e->type->ops.mq.requeue_request)
105 e->type->ops.mq.requeue_request(rq);
106}
107
108static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
109{
110 struct elevator_queue *e = hctx->queue->elevator;
111
112 if (e && e->type->ops.mq.has_work)
113 return e->type->ops.mq.has_work(hctx);
114
115 return false;
116}
117
Omar Sandovald38d3512017-02-22 10:58:30 -0800118/*
119 * Mark a hardware queue as needing a restart.
120 */
121static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
Jens Axboebd166ef2017-01-17 06:03:22 -0700122{
Omar Sandovald38d3512017-02-22 10:58:30 -0800123 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
Jens Axboebd166ef2017-01-17 06:03:22 -0700124 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
Omar Sandovald38d3512017-02-22 10:58:30 -0800125}
Jens Axboe50e1dab2017-01-26 14:42:34 -0700126
Jens Axboebd166ef2017-01-17 06:03:22 -0700127static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
128{
129 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
130}
131
132#endif