blob: 2502f40ccdc07d77abddd3c761132b441bb1f2f4 [file] [log] [blame]
Jens Axboe320ae512013-10-24 09:20:05 +01001#ifndef INT_BLK_MQ_H
2#define INT_BLK_MQ_H
3
Jens Axboecf43e6b2016-11-07 21:32:37 -07004#include "blk-stat.h"
Ming Lei244c65a2017-11-04 12:39:57 -06005#include "blk-mq-tag.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -07006
Christoph Hellwig24d2f902014-04-15 14:14:00 -06007struct blk_mq_tag_set;
8
Jens Axboe320ae512013-10-24 09:20:05 +01009struct blk_mq_ctx {
10 struct {
11 spinlock_t lock;
12 struct list_head rq_list;
13 } ____cacheline_aligned_in_smp;
14
15 unsigned int cpu;
16 unsigned int index_hw;
Jens Axboe320ae512013-10-24 09:20:05 +010017
18 /* incremented at dispatch time */
19 unsigned long rq_dispatched[2];
20 unsigned long rq_merged;
21
22 /* incremented at completion time */
23 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
24
25 struct request_queue *queue;
26 struct kobject kobj;
Jens Axboe4bb659b2014-05-09 09:36:49 -060027} ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010028
Jens Axboe320ae512013-10-24 09:20:05 +010029void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
Tejun Heo780db202014-07-01 10:31:13 -060030void blk_mq_freeze_queue(struct request_queue *q);
Ming Lei3edcc0c2013-12-26 21:31:38 +080031void blk_mq_free_queue(struct request_queue *q);
Jens Axboee3a2b3f2014-05-20 11:49:02 -060032int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
Jens Axboeaed3ea92014-12-22 14:04:42 -070033void blk_mq_wake_waiters(struct request_queue *q);
Ming Leide148292017-10-14 17:22:29 +080034bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
Jens Axboe2c3ad662016-12-14 14:34:47 -070035void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
Jens Axboe50e1dab2017-01-26 14:42:34 -070036bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
Jens Axboebd6737f2017-01-27 01:00:47 -070037bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
38 bool wait);
Ming Leib3476892017-10-14 17:22:30 +080039struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
40 struct blk_mq_ctx *start);
Jens Axboe2c3ad662016-12-14 14:34:47 -070041
42/*
43 * Internal helpers for allocating/freeing the request map
44 */
Jens Axboecc71a6f2017-01-11 14:29:56 -070045void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
46 unsigned int hctx_idx);
47void blk_mq_free_rq_map(struct blk_mq_tags *tags);
48struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
49 unsigned int hctx_idx,
50 unsigned int nr_tags,
51 unsigned int reserved_tags);
52int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
53 unsigned int hctx_idx, unsigned int depth);
Jens Axboe2c3ad662016-12-14 14:34:47 -070054
55/*
56 * Internal helpers for request insertion into sw queues
57 */
58void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
59 bool at_head);
Ming Leib0850292017-11-02 23:24:34 +080060void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
Jens Axboebd166ef2017-01-17 06:03:22 -070061void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
62 struct list_head *list);
Jens Axboe320ae512013-10-24 09:20:05 +010063
64/*
65 * CPU -> queue mappings
66 */
Jens Axboef14bbe72014-05-27 12:06:53 -060067extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010068
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +020069static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
70 int cpu)
71{
72 return q->queue_hw_ctx[q->mq_map[cpu]];
73}
74
Jens Axboee93ecf62014-05-19 09:17:48 -060075/*
Jens Axboe67aec142014-05-30 08:25:36 -060076 * sysfs helpers
77 */
Ming Lei737f98c2017-02-22 18:13:59 +080078extern void blk_mq_sysfs_init(struct request_queue *q);
Ming Lei7ea5fe32017-02-22 18:14:00 +080079extern void blk_mq_sysfs_deinit(struct request_queue *q);
Bart Van Assche2d0364c2017-04-26 13:47:48 -070080extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q);
Jens Axboe67aec142014-05-30 08:25:36 -060081extern int blk_mq_sysfs_register(struct request_queue *q);
82extern void blk_mq_sysfs_unregister(struct request_queue *q);
Keith Busch868f2f02015-12-17 17:08:14 -070083extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
Jens Axboe67aec142014-05-30 08:25:36 -060084
Christoph Hellwig90415832014-09-22 10:21:48 -060085extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
86
Ming Leie09aae72015-01-29 20:17:27 +080087void blk_mq_release(struct request_queue *q);
88
Ming Lei1aecfe42014-06-01 00:43:36 +080089static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
90 unsigned int cpu)
91{
92 return per_cpu_ptr(q->queue_ctx, cpu);
93}
94
95/*
96 * This assumes per-cpu software queueing queues. They could be per-node
97 * as well, for instance. For now this is hardcoded as-is. Note that we don't
98 * care about preemption, since we know the ctx's are persistent. This does
99 * mean that we can't rely on ctx always matching the currently running CPU.
100 */
101static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
102{
103 return __blk_mq_get_ctx(q, get_cpu());
104}
105
106static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
107{
108 put_cpu();
109}
110
Ming Leicb96a422014-06-01 00:43:37 +0800111struct blk_mq_alloc_data {
112 /* input parameter */
113 struct request_queue *q;
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100114 unsigned int flags;
Omar Sandoval229a92872017-04-14 00:59:59 -0700115 unsigned int shallow_depth;
Ming Leicb96a422014-06-01 00:43:37 +0800116
117 /* input & output parameter */
118 struct blk_mq_ctx *ctx;
119 struct blk_mq_hw_ctx *hctx;
120};
121
Jens Axboe49411152017-01-13 08:09:05 -0700122static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
123{
Jens Axboebd166ef2017-01-17 06:03:22 -0700124 if (data->flags & BLK_MQ_REQ_INTERNAL)
125 return data->hctx->sched_tags;
126
Jens Axboe49411152017-01-13 08:09:05 -0700127 return data->hctx->tags;
128}
129
Bart Van Assche5d1b25c2016-10-28 17:19:15 -0700130static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
131{
132 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
133}
134
Ming Lei19c66e52014-12-03 19:38:04 +0800135static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
136{
137 return hctx->nr_ctx && hctx->tags;
138}
139
Jens Axboef299b7c2017-08-08 17:51:45 -0600140void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
141 unsigned int inflight[2]);
142
Ming Leide148292017-10-14 17:22:29 +0800143static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
144{
145 struct request_queue *q = hctx->queue;
146
147 if (q->mq_ops->put_budget)
148 q->mq_ops->put_budget(hctx);
149}
150
Ming Lei88022d72017-11-05 02:21:12 +0800151static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
Ming Leide148292017-10-14 17:22:29 +0800152{
153 struct request_queue *q = hctx->queue;
154
155 if (q->mq_ops->get_budget)
156 return q->mq_ops->get_budget(hctx);
Ming Lei88022d72017-11-05 02:21:12 +0800157 return true;
Ming Leide148292017-10-14 17:22:29 +0800158}
159
Ming Lei244c65a2017-11-04 12:39:57 -0600160static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
161 struct request *rq)
162{
163 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
164 rq->tag = -1;
165
166 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
167 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
168 atomic_dec(&hctx->nr_active);
169 }
170}
171
172static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
173 struct request *rq)
174{
175 if (rq->tag == -1 || rq->internal_tag == -1)
176 return;
177
178 __blk_mq_put_driver_tag(hctx, rq);
179}
180
181static inline void blk_mq_put_driver_tag(struct request *rq)
182{
183 struct blk_mq_hw_ctx *hctx;
184
185 if (rq->tag == -1 || rq->internal_tag == -1)
186 return;
187
188 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
189 __blk_mq_put_driver_tag(hctx, rq);
190}
191
Jens Axboe320ae512013-10-24 09:20:05 +0100192#endif