blob: 72beba1f9d55efa827e8667535a7f2956dd2b924 [file] [log] [blame]
Jens Axboe320ae512013-10-24 09:20:05 +01001#ifndef INT_BLK_MQ_H
2#define INT_BLK_MQ_H
3
4struct blk_mq_ctx {
5 struct {
6 spinlock_t lock;
7 struct list_head rq_list;
8 } ____cacheline_aligned_in_smp;
9
10 unsigned int cpu;
11 unsigned int index_hw;
12 unsigned int ipi_redirect;
13
14 /* incremented at dispatch time */
15 unsigned long rq_dispatched[2];
16 unsigned long rq_merged;
17
18 /* incremented at completion time */
19 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
20
21 struct request_queue *queue;
22 struct kobject kobj;
23};
24
Christoph Hellwig30a91cb2014-02-10 03:24:38 -080025void __blk_mq_complete_request(struct request *rq);
Jens Axboe320ae512013-10-24 09:20:05 +010026void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
27void blk_mq_init_flush(struct request_queue *q);
Ming Lei43a5e4e2013-12-26 21:31:35 +080028void blk_mq_drain_queue(struct request_queue *q);
Ming Lei3edcc0c2013-12-26 21:31:38 +080029void blk_mq_free_queue(struct request_queue *q);
Christoph Hellwig18741982014-02-10 09:29:00 -070030void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq);
Jens Axboe320ae512013-10-24 09:20:05 +010031
32/*
33 * CPU hotplug helpers
34 */
35struct blk_mq_cpu_notifier;
36void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
37 void (*fn)(void *, unsigned long, unsigned int),
38 void *data);
39void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
40void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
41void blk_mq_cpu_init(void);
Jens Axboe320ae512013-10-24 09:20:05 +010042
43/*
44 * CPU -> queue mappings
45 */
46struct blk_mq_reg;
47extern unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg);
48extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
49
50void blk_mq_add_timer(struct request *rq);
51
52#endif