blob: ed0035cd458ee8f78691a8f95415a8665707ca11 [file] [log] [blame]
Jens Axboe320ae512013-10-24 09:20:05 +01001#ifndef INT_BLK_MQ_H
2#define INT_BLK_MQ_H
3
4struct blk_mq_ctx {
5 struct {
6 spinlock_t lock;
7 struct list_head rq_list;
8 } ____cacheline_aligned_in_smp;
9
10 unsigned int cpu;
11 unsigned int index_hw;
12 unsigned int ipi_redirect;
13
14 /* incremented at dispatch time */
15 unsigned long rq_dispatched[2];
16 unsigned long rq_merged;
17
18 /* incremented at completion time */
19 unsigned long ____cacheline_aligned_in_smp rq_completed[2];
20
21 struct request_queue *queue;
22 struct kobject kobj;
23};
24
Christoph Hellwig30a91cb2014-02-10 03:24:38 -080025void __blk_mq_complete_request(struct request *rq);
Jens Axboe320ae512013-10-24 09:20:05 +010026void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
27void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
28void blk_mq_init_flush(struct request_queue *q);
Ming Lei43a5e4e2013-12-26 21:31:35 +080029void blk_mq_drain_queue(struct request_queue *q);
Ming Lei3edcc0c2013-12-26 21:31:38 +080030void blk_mq_free_queue(struct request_queue *q);
Christoph Hellwig18741982014-02-10 09:29:00 -070031void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq);
Jens Axboe320ae512013-10-24 09:20:05 +010032
33/*
34 * CPU hotplug helpers
35 */
36struct blk_mq_cpu_notifier;
37void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
38 void (*fn)(void *, unsigned long, unsigned int),
39 void *data);
40void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
41void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
42void blk_mq_cpu_init(void);
Jens Axboe320ae512013-10-24 09:20:05 +010043
44/*
45 * CPU -> queue mappings
46 */
47struct blk_mq_reg;
48extern unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg);
49extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
50
51void blk_mq_add_timer(struct request *rq);
52
53#endif