blob: b444370ae05ba03ffb730812a4397b199e3383d9 [file] [log] [blame]
Jens Axboe320ae512013-10-24 09:20:05 +01001#ifndef INT_BLK_MQ_H
2#define INT_BLK_MQ_H
3
Jens Axboecf43e6b2016-11-07 21:32:37 -07004#include "blk-stat.h"
5
Christoph Hellwig24d2f902014-04-15 14:14:00 -06006struct blk_mq_tag_set;
7
Jens Axboe320ae512013-10-24 09:20:05 +01008struct blk_mq_ctx {
9 struct {
10 spinlock_t lock;
11 struct list_head rq_list;
12 } ____cacheline_aligned_in_smp;
13
14 unsigned int cpu;
15 unsigned int index_hw;
Jens Axboe320ae512013-10-24 09:20:05 +010016
17 /* incremented at dispatch time */
18 unsigned long rq_dispatched[2];
19 unsigned long rq_merged;
20
21 /* incremented at completion time */
22 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
Jens Axboecf43e6b2016-11-07 21:32:37 -070023 struct blk_rq_stat stat[2];
Jens Axboe320ae512013-10-24 09:20:05 +010024
25 struct request_queue *queue;
26 struct kobject kobj;
Jens Axboe4bb659b2014-05-09 09:36:49 -060027} ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010028
Jens Axboe320ae512013-10-24 09:20:05 +010029void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
Tejun Heo780db202014-07-01 10:31:13 -060030void blk_mq_freeze_queue(struct request_queue *q);
Ming Lei3edcc0c2013-12-26 21:31:38 +080031void blk_mq_free_queue(struct request_queue *q);
Jens Axboee3a2b3f2014-05-20 11:49:02 -060032int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
Jens Axboeaed3ea92014-12-22 14:04:42 -070033void blk_mq_wake_waiters(struct request_queue *q);
Jens Axboe320ae512013-10-24 09:20:05 +010034
35/*
36 * CPU hotplug helpers
37 */
Jens Axboe676141e2014-03-20 13:29:18 -060038void blk_mq_enable_hotplug(void);
39void blk_mq_disable_hotplug(void);
Jens Axboe320ae512013-10-24 09:20:05 +010040
41/*
42 * CPU -> queue mappings
43 */
Christoph Hellwigda695ba2016-09-14 16:18:55 +020044int blk_mq_map_queues(struct blk_mq_tag_set *set);
Jens Axboef14bbe72014-05-27 12:06:53 -060045extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010046
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +020047static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
48 int cpu)
49{
50 return q->queue_hw_ctx[q->mq_map[cpu]];
51}
52
Jens Axboee93ecf62014-05-19 09:17:48 -060053/*
Jens Axboe67aec142014-05-30 08:25:36 -060054 * sysfs helpers
55 */
56extern int blk_mq_sysfs_register(struct request_queue *q);
57extern void blk_mq_sysfs_unregister(struct request_queue *q);
Keith Busch868f2f02015-12-17 17:08:14 -070058extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
Jens Axboe67aec142014-05-30 08:25:36 -060059
Christoph Hellwig90415832014-09-22 10:21:48 -060060extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
61
Ming Leie09aae72015-01-29 20:17:27 +080062void blk_mq_release(struct request_queue *q);
63
Ming Lei1aecfe42014-06-01 00:43:36 +080064static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
65 unsigned int cpu)
66{
67 return per_cpu_ptr(q->queue_ctx, cpu);
68}
69
70/*
71 * This assumes per-cpu software queueing queues. They could be per-node
72 * as well, for instance. For now this is hardcoded as-is. Note that we don't
73 * care about preemption, since we know the ctx's are persistent. This does
74 * mean that we can't rely on ctx always matching the currently running CPU.
75 */
76static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
77{
78 return __blk_mq_get_ctx(q, get_cpu());
79}
80
81static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
82{
83 put_cpu();
84}
85
Ming Leicb96a422014-06-01 00:43:37 +080086struct blk_mq_alloc_data {
87 /* input parameter */
88 struct request_queue *q;
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +010089 unsigned int flags;
Ming Leicb96a422014-06-01 00:43:37 +080090
91 /* input & output parameter */
92 struct blk_mq_ctx *ctx;
93 struct blk_mq_hw_ctx *hctx;
94};
95
96static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +010097 struct request_queue *q, unsigned int flags,
98 struct blk_mq_ctx *ctx, struct blk_mq_hw_ctx *hctx)
Ming Leicb96a422014-06-01 00:43:37 +080099{
100 data->q = q;
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100101 data->flags = flags;
Ming Leicb96a422014-06-01 00:43:37 +0800102 data->ctx = ctx;
103 data->hctx = hctx;
104}
105
Bart Van Assche5d1b25c2016-10-28 17:19:15 -0700106static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
107{
108 return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
109}
110
Ming Lei19c66e52014-12-03 19:38:04 +0800111static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
112{
113 return hctx->nr_ctx && hctx->tags;
114}
115
Jens Axboe320ae512013-10-24 09:20:05 +0100116#endif