blob: 5ad9251627f80567e1fb459cf699f5d1a76a3342 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Jens Axboe320ae512013-10-24 09:20:05 +01002#ifndef INT_BLK_MQ_H
3#define INT_BLK_MQ_H
4
Jens Axboecf43e6b2016-11-07 21:32:37 -07005#include "blk-stat.h"
Ming Lei244c65a2017-11-04 12:39:57 -06006#include "blk-mq-tag.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -07007
Christoph Hellwig24d2f902014-04-15 14:14:00 -06008struct blk_mq_tag_set;
9
Linus Walleijfe644072018-04-20 10:29:51 +020010/**
11 * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
12 */
Jens Axboe320ae512013-10-24 09:20:05 +010013struct blk_mq_ctx {
14 struct {
15 spinlock_t lock;
16 struct list_head rq_list;
17 } ____cacheline_aligned_in_smp;
18
19 unsigned int cpu;
20 unsigned int index_hw;
Jens Axboe320ae512013-10-24 09:20:05 +010021
22 /* incremented at dispatch time */
23 unsigned long rq_dispatched[2];
24 unsigned long rq_merged;
25
26 /* incremented at completion time */
27 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
28
29 struct request_queue *queue;
30 struct kobject kobj;
Jens Axboe4bb659b2014-05-09 09:36:49 -060031} ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010032
Tejun Heo780db202014-07-01 10:31:13 -060033void blk_mq_freeze_queue(struct request_queue *q);
Ming Leie238e6d2019-04-30 09:52:25 +080034void blk_mq_exit_queue(struct request_queue *q);
Jens Axboee3a2b3f2014-05-20 11:49:02 -060035int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
Jens Axboeaed3ea92014-12-22 14:04:42 -070036void blk_mq_wake_waiters(struct request_queue *q);
Ming Leide148292017-10-14 17:22:29 +080037bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
Jens Axboe2c3ad662016-12-14 14:34:47 -070038void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
Ming Lei8ab6bb9e2018-06-25 19:31:45 +080039bool blk_mq_get_driver_tag(struct request *rq);
Ming Leib3476892017-10-14 17:22:30 +080040struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
41 struct blk_mq_ctx *start);
Jens Axboe2c3ad662016-12-14 14:34:47 -070042
43/*
44 * Internal helpers for allocating/freeing the request map
45 */
Jens Axboecc71a6f2017-01-11 14:29:56 -070046void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
47 unsigned int hctx_idx);
48void blk_mq_free_rq_map(struct blk_mq_tags *tags);
49struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
50 unsigned int hctx_idx,
51 unsigned int nr_tags,
52 unsigned int reserved_tags);
53int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
54 unsigned int hctx_idx, unsigned int depth);
Jens Axboe2c3ad662016-12-14 14:34:47 -070055
56/*
57 * Internal helpers for request insertion into sw queues
58 */
59void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
60 bool at_head);
Ming Leib0850292017-11-02 23:24:34 +080061void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
Jens Axboebd166ef2017-01-17 06:03:22 -070062void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
63 struct list_head *list);
Jens Axboe320ae512013-10-24 09:20:05 +010064
Ming Lei396eaf22018-01-17 11:25:57 -050065/* Used by blk_insert_cloned_request() to issue request directly */
Bart Van Asschec77ff7f2018-01-19 08:58:54 -080066blk_status_t blk_mq_request_issue_directly(struct request *rq);
Ming Lei6ce3dd62018-07-10 09:03:31 +080067void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
68 struct list_head *list);
Ming Lei396eaf22018-01-17 11:25:57 -050069
Jens Axboe320ae512013-10-24 09:20:05 +010070/*
71 * CPU -> queue mappings
72 */
Jens Axboef14bbe72014-05-27 12:06:53 -060073extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010074
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +020075static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
76 int cpu)
77{
78 return q->queue_hw_ctx[q->mq_map[cpu]];
79}
80
Jens Axboee93ecf62014-05-19 09:17:48 -060081/*
Jens Axboe67aec142014-05-30 08:25:36 -060082 * sysfs helpers
83 */
Ming Lei737f98c2017-02-22 18:13:59 +080084extern void blk_mq_sysfs_init(struct request_queue *q);
Ming Lei7ea5fe32017-02-22 18:14:00 +080085extern void blk_mq_sysfs_deinit(struct request_queue *q);
Bart Van Assche2d0364c2017-04-26 13:47:48 -070086extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
Jens Axboe67aec142014-05-30 08:25:36 -060087extern int blk_mq_sysfs_register(struct request_queue *q);
88extern void blk_mq_sysfs_unregister(struct request_queue *q);
Keith Busch868f2f02015-12-17 17:08:14 -070089extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
Jens Axboe67aec142014-05-30 08:25:36 -060090
Ming Leie09aae72015-01-29 20:17:27 +080091void blk_mq_release(struct request_queue *q);
92
Tejun Heo1d9bd512018-01-09 08:29:48 -080093/**
94 * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
95 * @rq: target request.
96 */
Keith Busch12f5b932018-05-29 15:52:28 +020097static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
Tejun Heo1d9bd512018-01-09 08:29:48 -080098{
Keith Busch12f5b932018-05-29 15:52:28 +020099 return READ_ONCE(rq->state);
Tejun Heo1d9bd512018-01-09 08:29:48 -0800100}
101
Ming Lei1aecfe42014-06-01 00:43:36 +0800102static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
103 unsigned int cpu)
104{
105 return per_cpu_ptr(q->queue_ctx, cpu);
106}
107
108/*
109 * This assumes per-cpu software queueing queues. They could be per-node
110 * as well, for instance. For now this is hardcoded as-is. Note that we don't
111 * care about preemption, since we know the ctx's are persistent. This does
112 * mean that we can't rely on ctx always matching the currently running CPU.
113 */
114static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
115{
116 return __blk_mq_get_ctx(q, get_cpu());
117}
118
119static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
120{
121 put_cpu();
122}
123
Ming Leicb96a422014-06-01 00:43:37 +0800124struct blk_mq_alloc_data {
125 /* input parameter */
126 struct request_queue *q;
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800127 blk_mq_req_flags_t flags;
Omar Sandoval229a92872017-04-14 00:59:59 -0700128 unsigned int shallow_depth;
Ming Leicb96a422014-06-01 00:43:37 +0800129
130 /* input & output parameter */
131 struct blk_mq_ctx *ctx;
132 struct blk_mq_hw_ctx *hctx;
133};
134
Jens Axboe49411152017-01-13 08:09:05 -0700135static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
136{
Jens Axboebd166ef2017-01-17 06:03:22 -0700137 if (data->flags & BLK_MQ_REQ_INTERNAL)
138 return data->hctx->sched_tags;
139
Jens Axboe49411152017-01-13 08:09:05 -0700140 return data->hctx->tags;
141}
142
Bart Van Assche5d1b25c2016-10-28 17:19:15 -0700143static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
144{
145 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
146}
147
Ming Lei19c66e52014-12-03 19:38:04 +0800148static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
149{
150 return hctx->nr_ctx && hctx->tags;
151}
152
Jens Axboef299b7c2017-08-08 17:51:45 -0600153void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
Omar Sandovalbf0ddab2018-04-26 00:21:59 -0700154 unsigned int inflight[2]);
155void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
156 unsigned int inflight[2]);
Jens Axboef299b7c2017-08-08 17:51:45 -0600157
Ming Leide148292017-10-14 17:22:29 +0800158static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
159{
160 struct request_queue *q = hctx->queue;
161
162 if (q->mq_ops->put_budget)
163 q->mq_ops->put_budget(hctx);
164}
165
Ming Lei88022d72017-11-05 02:21:12 +0800166static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
Ming Leide148292017-10-14 17:22:29 +0800167{
168 struct request_queue *q = hctx->queue;
169
170 if (q->mq_ops->get_budget)
171 return q->mq_ops->get_budget(hctx);
Ming Lei88022d72017-11-05 02:21:12 +0800172 return true;
Ming Leide148292017-10-14 17:22:29 +0800173}
174
Ming Lei244c65a2017-11-04 12:39:57 -0600175static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
176 struct request *rq)
177{
178 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
179 rq->tag = -1;
180
181 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
182 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
183 atomic_dec(&hctx->nr_active);
184 }
185}
186
187static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
188 struct request *rq)
189{
190 if (rq->tag == -1 || rq->internal_tag == -1)
191 return;
192
193 __blk_mq_put_driver_tag(hctx, rq);
194}
195
196static inline void blk_mq_put_driver_tag(struct request *rq)
197{
198 struct blk_mq_hw_ctx *hctx;
199
200 if (rq->tag == -1 || rq->internal_tag == -1)
201 return;
202
203 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
204 __blk_mq_put_driver_tag(hctx, rq);
205}
206
Minwoo Im0da73d02018-07-02 23:46:43 +0900207static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set)
208{
209 int cpu;
210
211 for_each_possible_cpu(cpu)
212 set->mq_map[cpu] = 0;
213}
214
Jens Axboe320ae512013-10-24 09:20:05 +0100215#endif