blob: b44dce165761268c1f0a6bd64db78451f68d912f [file] [log] [blame]
Jens Axboe320ae512013-10-24 09:20:05 +01001#ifndef INT_BLK_MQ_H
2#define INT_BLK_MQ_H
3
Christoph Hellwig24d2f902014-04-15 14:14:00 -06004struct blk_mq_tag_set;
5
Jens Axboe320ae512013-10-24 09:20:05 +01006struct blk_mq_ctx {
7 struct {
8 spinlock_t lock;
9 struct list_head rq_list;
10 } ____cacheline_aligned_in_smp;
11
12 unsigned int cpu;
13 unsigned int index_hw;
Jens Axboe320ae512013-10-24 09:20:05 +010014
Jens Axboe4bb659b2014-05-09 09:36:49 -060015 unsigned int last_tag ____cacheline_aligned_in_smp;
16
Jens Axboe320ae512013-10-24 09:20:05 +010017 /* incremented at dispatch time */
18 unsigned long rq_dispatched[2];
19 unsigned long rq_merged;
20
21 /* incremented at completion time */
22 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
23
24 struct request_queue *queue;
25 struct kobject kobj;
Jens Axboe4bb659b2014-05-09 09:36:49 -060026} ____cacheline_aligned_in_smp;
Jens Axboe320ae512013-10-24 09:20:05 +010027
Christoph Hellwig30a91cb2014-02-10 03:24:38 -080028void __blk_mq_complete_request(struct request *rq);
Jens Axboe320ae512013-10-24 09:20:05 +010029void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
Tejun Heo780db202014-07-01 10:31:13 -060030void blk_mq_freeze_queue(struct request_queue *q);
Ming Lei3edcc0c2013-12-26 21:31:38 +080031void blk_mq_free_queue(struct request_queue *q);
Jens Axboee3a2b3f2014-05-20 11:49:02 -060032int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
Jens Axboeaed3ea92014-12-22 14:04:42 -070033void blk_mq_wake_waiters(struct request_queue *q);
Jens Axboe320ae512013-10-24 09:20:05 +010034
35/*
36 * CPU hotplug helpers
37 */
38struct blk_mq_cpu_notifier;
39void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
Jens Axboee814e712014-05-21 13:59:08 -060040 int (*fn)(void *, unsigned long, unsigned int),
Jens Axboe320ae512013-10-24 09:20:05 +010041 void *data);
42void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
43void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
44void blk_mq_cpu_init(void);
Jens Axboe676141e2014-03-20 13:29:18 -060045void blk_mq_enable_hotplug(void);
46void blk_mq_disable_hotplug(void);
Jens Axboe320ae512013-10-24 09:20:05 +010047
48/*
49 * CPU -> queue mappings
50 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -060051extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
Akinobu Mita57783222015-09-27 02:09:23 +090052extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
53 const struct cpumask *online_mask);
Jens Axboef14bbe72014-05-27 12:06:53 -060054extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010055
Jens Axboee93ecf62014-05-19 09:17:48 -060056/*
Jens Axboe67aec142014-05-30 08:25:36 -060057 * sysfs helpers
58 */
59extern int blk_mq_sysfs_register(struct request_queue *q);
60extern void blk_mq_sysfs_unregister(struct request_queue *q);
61
Christoph Hellwig90415832014-09-22 10:21:48 -060062extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
63
Ming Leie09aae72015-01-29 20:17:27 +080064void blk_mq_release(struct request_queue *q);
65
Jens Axboe67aec142014-05-30 08:25:36 -060066/*
Jens Axboee93ecf62014-05-19 09:17:48 -060067 * Basic implementation of sparser bitmap, allowing the user to spread
68 * the bits over more cachelines.
69 */
70struct blk_align_bitmap {
71 unsigned long word;
72 unsigned long depth;
73} ____cacheline_aligned_in_smp;
74
Ming Lei1aecfe42014-06-01 00:43:36 +080075static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
76 unsigned int cpu)
77{
78 return per_cpu_ptr(q->queue_ctx, cpu);
79}
80
81/*
82 * This assumes per-cpu software queueing queues. They could be per-node
83 * as well, for instance. For now this is hardcoded as-is. Note that we don't
84 * care about preemption, since we know the ctx's are persistent. This does
85 * mean that we can't rely on ctx always matching the currently running CPU.
86 */
87static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
88{
89 return __blk_mq_get_ctx(q, get_cpu());
90}
91
92static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
93{
94 put_cpu();
95}
96
Ming Leicb96a422014-06-01 00:43:37 +080097struct blk_mq_alloc_data {
98 /* input parameter */
99 struct request_queue *q;
100 gfp_t gfp;
101 bool reserved;
102
103 /* input & output parameter */
104 struct blk_mq_ctx *ctx;
105 struct blk_mq_hw_ctx *hctx;
106};
107
108static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
109 struct request_queue *q, gfp_t gfp, bool reserved,
110 struct blk_mq_ctx *ctx,
111 struct blk_mq_hw_ctx *hctx)
112{
113 data->q = q;
114 data->gfp = gfp;
115 data->reserved = reserved;
116 data->ctx = ctx;
117 data->hctx = hctx;
118}
119
Ming Lei19c66e52014-12-03 19:38:04 +0800120static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
121{
122 return hctx->nr_ctx && hctx->tags;
123}
124
Jens Axboe320ae512013-10-24 09:20:05 +0100125#endif