blob: 088ced003c13d7282712b423ade0521c16aeebdc [file] [log] [blame]
Jens Axboe320ae512013-10-24 09:20:05 +01001#ifndef INT_BLK_MQ_H
2#define INT_BLK_MQ_H
3
Jens Axboecf43e6b2016-11-07 21:32:37 -07004#include "blk-stat.h"
5
Christoph Hellwig24d2f902014-04-15 14:14:00 -06006struct blk_mq_tag_set;
7
Jens Axboe320ae512013-10-24 09:20:05 +01008struct blk_mq_ctx {
9 struct {
10 spinlock_t lock;
11 struct list_head rq_list;
12 } ____cacheline_aligned_in_smp;
13
14 unsigned int cpu;
15 unsigned int index_hw;
Jens Axboe320ae512013-10-24 09:20:05 +010016
17 /* incremented at dispatch time */
18 unsigned long rq_dispatched[2];
19 unsigned long rq_merged;
20
21 /* incremented at completion time */
22 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
Jens Axboecf43e6b2016-11-07 21:32:37 -070023 struct blk_rq_stat stat[2];
Jens Axboe320ae512013-10-24 09:20:05 +010024
25 struct request_queue *queue;
26 struct kobject kobj;
Jens Axboe4bb659b2014-05-09 09:36:49 -060027} ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010028
Jens Axboe320ae512013-10-24 09:20:05 +010029void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
Tejun Heo780db202014-07-01 10:31:13 -060030void blk_mq_freeze_queue(struct request_queue *q);
Ming Lei3edcc0c2013-12-26 21:31:38 +080031void blk_mq_free_queue(struct request_queue *q);
Jens Axboee3a2b3f2014-05-20 11:49:02 -060032int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
Jens Axboeaed3ea92014-12-22 14:04:42 -070033void blk_mq_wake_waiters(struct request_queue *q);
Jens Axboef04c3df2016-12-07 08:41:17 -070034bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
Jens Axboe2c3ad662016-12-14 14:34:47 -070035void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
Jens Axboe50e1dab2017-01-26 14:42:34 -070036bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
Jens Axboebd6737f2017-01-27 01:00:47 -070037bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
38 bool wait);
Jens Axboe2c3ad662016-12-14 14:34:47 -070039
40/*
41 * Internal helpers for allocating/freeing the request map
42 */
Jens Axboecc71a6f2017-01-11 14:29:56 -070043void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
44 unsigned int hctx_idx);
45void blk_mq_free_rq_map(struct blk_mq_tags *tags);
46struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
47 unsigned int hctx_idx,
48 unsigned int nr_tags,
49 unsigned int reserved_tags);
50int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
51 unsigned int hctx_idx, unsigned int depth);
Jens Axboe2c3ad662016-12-14 14:34:47 -070052
53/*
54 * Internal helpers for request insertion into sw queues
55 */
56void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
57 bool at_head);
Jens Axboebd166ef2017-01-17 06:03:22 -070058void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
59 struct list_head *list);
Jens Axboe320ae512013-10-24 09:20:05 +010060/*
61 * CPU hotplug helpers
62 */
Jens Axboe676141e2014-03-20 13:29:18 -060063void blk_mq_enable_hotplug(void);
64void blk_mq_disable_hotplug(void);
Jens Axboe320ae512013-10-24 09:20:05 +010065
66/*
67 * CPU -> queue mappings
68 */
Jens Axboef14bbe72014-05-27 12:06:53 -060069extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010070
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +020071static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
72 int cpu)
73{
74 return q->queue_hw_ctx[q->mq_map[cpu]];
75}
76
Jens Axboee93ecf62014-05-19 09:17:48 -060077/*
Jens Axboe67aec142014-05-30 08:25:36 -060078 * sysfs helpers
79 */
80extern int blk_mq_sysfs_register(struct request_queue *q);
81extern void blk_mq_sysfs_unregister(struct request_queue *q);
Keith Busch868f2f02015-12-17 17:08:14 -070082extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
Jens Axboe67aec142014-05-30 08:25:36 -060083
Omar Sandoval07e4fea2017-01-25 08:06:40 -080084/*
85 * debugfs helpers
86 */
Omar Sandoval400f73b2017-01-27 15:03:01 -070087#ifdef CONFIG_BLK_DEBUG_FS
Omar Sandoval07e4fea2017-01-25 08:06:40 -080088int blk_mq_debugfs_register(struct request_queue *q, const char *name);
89void blk_mq_debugfs_unregister(struct request_queue *q);
90int blk_mq_debugfs_register_hctxs(struct request_queue *q);
91void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
92#else
Omar Sandoval400f73b2017-01-27 15:03:01 -070093static inline int blk_mq_debugfs_register(struct request_queue *q,
94 const char *name)
Omar Sandoval07e4fea2017-01-25 08:06:40 -080095{
96 return 0;
97}
98
Omar Sandoval400f73b2017-01-27 15:03:01 -070099static inline void blk_mq_debugfs_unregister(struct request_queue *q)
Omar Sandoval07e4fea2017-01-25 08:06:40 -0800100{
101}
102
Omar Sandoval400f73b2017-01-27 15:03:01 -0700103static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q)
Omar Sandoval07e4fea2017-01-25 08:06:40 -0800104{
105 return 0;
106}
107
Omar Sandoval400f73b2017-01-27 15:03:01 -0700108static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
Omar Sandoval07e4fea2017-01-25 08:06:40 -0800109{
110}
111#endif
112
Christoph Hellwig90415832014-09-22 10:21:48 -0600113extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
114
Ming Leie09aae72015-01-29 20:17:27 +0800115void blk_mq_release(struct request_queue *q);
116
Ming Lei1aecfe42014-06-01 00:43:36 +0800117static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
118 unsigned int cpu)
119{
120 return per_cpu_ptr(q->queue_ctx, cpu);
121}
122
123/*
124 * This assumes per-cpu software queueing queues. They could be per-node
125 * as well, for instance. For now this is hardcoded as-is. Note that we don't
126 * care about preemption, since we know the ctx's are persistent. This does
127 * mean that we can't rely on ctx always matching the currently running CPU.
128 */
129static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
130{
131 return __blk_mq_get_ctx(q, get_cpu());
132}
133
134static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
135{
136 put_cpu();
137}
138
Ming Leicb96a422014-06-01 00:43:37 +0800139struct blk_mq_alloc_data {
140 /* input parameter */
141 struct request_queue *q;
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100142 unsigned int flags;
Ming Leicb96a422014-06-01 00:43:37 +0800143
144 /* input & output parameter */
145 struct blk_mq_ctx *ctx;
146 struct blk_mq_hw_ctx *hctx;
147};
148
Jens Axboe49411152017-01-13 08:09:05 -0700149static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
150{
Jens Axboebd166ef2017-01-17 06:03:22 -0700151 if (data->flags & BLK_MQ_REQ_INTERNAL)
152 return data->hctx->sched_tags;
153
Jens Axboe49411152017-01-13 08:09:05 -0700154 return data->hctx->tags;
155}
156
Jens Axboe2c3ad662016-12-14 14:34:47 -0700157/*
158 * Internal helpers for request allocation/init/free
159 */
160void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
161 struct request *rq, unsigned int op);
Jens Axboebd166ef2017-01-17 06:03:22 -0700162void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
Jens Axboe2c3ad662016-12-14 14:34:47 -0700163 struct request *rq);
Jens Axboebd166ef2017-01-17 06:03:22 -0700164void blk_mq_finish_request(struct request *rq);
Jens Axboe2c3ad662016-12-14 14:34:47 -0700165struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
166 unsigned int op);
167
Bart Van Assche5d1b25c2016-10-28 17:19:15 -0700168static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
169{
170 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
171}
172
Ming Lei19c66e52014-12-03 19:38:04 +0800173static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
174{
175 return hctx->nr_ctx && hctx->tags;
176}
177
Jens Axboe320ae512013-10-24 09:20:05 +0100178#endif