Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 1 | #ifndef INT_BLK_MQ_H |
| 2 | #define INT_BLK_MQ_H |
| 3 | |
| 4 | struct blk_mq_ctx { |
| 5 | struct { |
| 6 | spinlock_t lock; |
| 7 | struct list_head rq_list; |
| 8 | } ____cacheline_aligned_in_smp; |
| 9 | |
| 10 | unsigned int cpu; |
| 11 | unsigned int index_hw; |
| 12 | unsigned int ipi_redirect; |
| 13 | |
| 14 | /* incremented at dispatch time */ |
| 15 | unsigned long rq_dispatched[2]; |
| 16 | unsigned long rq_merged; |
| 17 | |
| 18 | /* incremented at completion time */ |
| 19 | unsigned long ____cacheline_aligned_in_smp rq_completed[2]; |
| 20 | |
| 21 | struct request_queue *queue; |
| 22 | struct kobject kobj; |
| 23 | }; |
| 24 | |
Christoph Hellwig | 30a91cb | 2014-02-10 03:24:38 -0800 | [diff] [blame] | 25 | void __blk_mq_complete_request(struct request *rq); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 26 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
| 27 | void blk_mq_init_flush(struct request_queue *q); |
Ming Lei | 43a5e4e | 2013-12-26 21:31:35 +0800 | [diff] [blame] | 28 | void blk_mq_drain_queue(struct request_queue *q); |
Ming Lei | 3edcc0c | 2013-12-26 21:31:38 +0800 | [diff] [blame] | 29 | void blk_mq_free_queue(struct request_queue *q); |
Christoph Hellwig | 1874198 | 2014-02-10 09:29:00 -0700 | [diff] [blame] | 30 | void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 31 | |
| 32 | /* |
| 33 | * CPU hotplug helpers |
| 34 | */ |
| 35 | struct blk_mq_cpu_notifier; |
| 36 | void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, |
| 37 | void (*fn)(void *, unsigned long, unsigned int), |
| 38 | void *data); |
| 39 | void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier); |
| 40 | void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier); |
| 41 | void blk_mq_cpu_init(void); |
Jens Axboe | 676141e | 2014-03-20 13:29:18 -0600 | [diff] [blame] | 42 | void blk_mq_enable_hotplug(void); |
| 43 | void blk_mq_disable_hotplug(void); |
Jens Axboe | 320ae51 | 2013-10-24 09:20:05 +0100 | [diff] [blame] | 44 | |
| 45 | /* |
| 46 | * CPU -> queue mappings |
| 47 | */ |
| 48 | struct blk_mq_reg; |
| 49 | extern unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg); |
| 50 | extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); |
| 51 | |
| 52 | void blk_mq_add_timer(struct request *rq); |
| 53 | |
| 54 | #endif |