blob: f83d15f6e1c1846c88f76f60dd471b39c5c1bfa4 [file] [log] [blame]
Jens Axboe320ae512013-10-24 09:20:05 +01001#ifndef BLK_MQ_H
2#define BLK_MQ_H
3
4#include <linux/blkdev.h>
5
6struct blk_mq_tags;
7
8struct blk_mq_cpu_notifier {
9 struct list_head list;
10 void *data;
11 void (*notify)(void *data, unsigned long action, unsigned int cpu);
12};
13
14struct blk_mq_hw_ctx {
15 struct {
16 spinlock_t lock;
17 struct list_head dispatch;
18 } ____cacheline_aligned_in_smp;
19
20 unsigned long state; /* BLK_MQ_S_* flags */
Christoph Hellwig70f4db62014-04-16 10:48:08 -060021 struct delayed_work run_work;
22 struct delayed_work delay_work;
Jens Axboee4043dc2014-04-09 10:18:23 -060023 cpumask_var_t cpumask;
Jens Axboe506e9312014-05-07 10:26:44 -060024 int next_cpu;
25 int next_cpu_batch;
Jens Axboe320ae512013-10-24 09:20:05 +010026
27 unsigned long flags; /* BLK_MQ_F_* flags */
28
29 struct request_queue *queue;
30 unsigned int queue_num;
31
32 void *driver_data;
33
Jens Axboe320ae512013-10-24 09:20:05 +010034 unsigned int nr_ctx_map;
35 unsigned long *ctx_map;
Jens Axboe4bb659b2014-05-09 09:36:49 -060036 unsigned int nr_ctx;
37 struct blk_mq_ctx **ctxs;
38
39 unsigned int wait_index;
Jens Axboe320ae512013-10-24 09:20:05 +010040
Jens Axboe320ae512013-10-24 09:20:05 +010041 struct blk_mq_tags *tags;
42
43 unsigned long queued;
44 unsigned long run;
45#define BLK_MQ_MAX_DISPATCH_ORDER 10
46 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
47
Jens Axboe320ae512013-10-24 09:20:05 +010048 unsigned int numa_node;
49 unsigned int cmd_size; /* per-request extra data */
50
51 struct blk_mq_cpu_notifier cpu_notifier;
52 struct kobject kobj;
53};
54
Christoph Hellwig24d2f902014-04-15 14:14:00 -060055struct blk_mq_tag_set {
Jens Axboe320ae512013-10-24 09:20:05 +010056 struct blk_mq_ops *ops;
57 unsigned int nr_hw_queues;
58 unsigned int queue_depth;
59 unsigned int reserved_tags;
60 unsigned int cmd_size; /* per-request extra data */
61 int numa_node;
62 unsigned int timeout;
63 unsigned int flags; /* BLK_MQ_F_* */
Christoph Hellwig24d2f902014-04-15 14:14:00 -060064 void *driver_data;
65
66 struct blk_mq_tags **tags;
Jens Axboe320ae512013-10-24 09:20:05 +010067};
68
69typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
70typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
Christoph Hellwig24d2f902014-04-15 14:14:00 -060071typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *,
72 unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010073typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
74typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
75typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
Christoph Hellwig24d2f902014-04-15 14:14:00 -060076typedef int (init_request_fn)(void *, struct request *, unsigned int,
77 unsigned int, unsigned int);
78typedef void (exit_request_fn)(void *, struct request *, unsigned int,
79 unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010080
81struct blk_mq_ops {
82 /*
83 * Queue request
84 */
85 queue_rq_fn *queue_rq;
86
87 /*
88 * Map to specific hardware queue
89 */
90 map_queue_fn *map_queue;
91
92 /*
93 * Called on request timeout
94 */
95 rq_timed_out_fn *timeout;
96
Christoph Hellwig30a91cb2014-02-10 03:24:38 -080097 softirq_done_fn *complete;
98
Jens Axboe320ae512013-10-24 09:20:05 +010099 /*
100 * Override for hctx allocations (should probably go)
101 */
102 alloc_hctx_fn *alloc_hctx;
103 free_hctx_fn *free_hctx;
104
105 /*
106 * Called when the block layer side of a hardware queue has been
107 * set up, allowing the driver to allocate/init matching structures.
108 * Ditto for exit/teardown.
109 */
110 init_hctx_fn *init_hctx;
111 exit_hctx_fn *exit_hctx;
Christoph Hellwige9b267d2014-04-15 13:59:10 -0600112
113 /*
114 * Called for every command allocated by the block layer to allow
115 * the driver to set up driver specific data.
116 * Ditto for exit/teardown.
117 */
118 init_request_fn *init_request;
119 exit_request_fn *exit_request;
Jens Axboe320ae512013-10-24 09:20:05 +0100120};
121
122enum {
123 BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */
124 BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */
125 BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
126
127 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
128 BLK_MQ_F_SHOULD_SORT = 1 << 1,
Jens Axboe320ae512013-10-24 09:20:05 +0100129
Jens Axboe5d12f902014-03-19 15:25:02 -0600130 BLK_MQ_S_STOPPED = 0,
Jens Axboe320ae512013-10-24 09:20:05 +0100131
132 BLK_MQ_MAX_DEPTH = 2048,
Jens Axboe506e9312014-05-07 10:26:44 -0600133
134 BLK_MQ_CPU_WORK_BATCH = 8,
Jens Axboe320ae512013-10-24 09:20:05 +0100135};
136
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600137struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
Jens Axboe320ae512013-10-24 09:20:05 +0100138int blk_mq_register_disk(struct gendisk *);
139void blk_mq_unregister_disk(struct gendisk *);
Jens Axboe320ae512013-10-24 09:20:05 +0100140
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600141int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
142void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
143
Jens Axboe320ae512013-10-24 09:20:05 +0100144void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
145
Christoph Hellwigfeb71da2014-02-20 15:32:37 -0800146void blk_mq_insert_request(struct request *, bool, bool, bool);
Jens Axboe320ae512013-10-24 09:20:05 +0100147void blk_mq_run_queues(struct request_queue *q, bool async);
148void blk_mq_free_request(struct request *rq);
149bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
Christoph Hellwig18741982014-02-10 09:29:00 -0700150struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
Jens Axboe320ae512013-10-24 09:20:05 +0100151struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600152struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
Jens Axboe320ae512013-10-24 09:20:05 +0100153
154struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600155struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +0100156void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
157
Christoph Hellwig63151a42014-04-16 09:44:52 +0200158void blk_mq_end_io(struct request *rq, int error);
159void __blk_mq_end_io(struct request *rq, int error);
Jens Axboe320ae512013-10-24 09:20:05 +0100160
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200161void blk_mq_requeue_request(struct request *rq);
162
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800163void blk_mq_complete_request(struct request *rq);
164
Jens Axboe320ae512013-10-24 09:20:05 +0100165void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
166void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
Christoph Hellwig280d45f2013-10-25 14:45:58 +0100167void blk_mq_stop_hw_queues(struct request_queue *q);
Christoph Hellwig2f268552014-04-16 09:44:56 +0200168void blk_mq_start_hw_queues(struct request_queue *q);
Christoph Hellwig1b4a3252014-04-16 09:44:54 +0200169void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600170void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
Jens Axboe320ae512013-10-24 09:20:05 +0100171
172/*
173 * Driver command data is immediately after the request. So subtract request
174 * size to get back to the original request.
175 */
176static inline struct request *blk_mq_rq_from_pdu(void *pdu)
177{
178 return pdu - sizeof(struct request);
179}
180static inline void *blk_mq_rq_to_pdu(struct request *rq)
181{
182 return (void *) rq + sizeof(*rq);
183}
184
Jens Axboe320ae512013-10-24 09:20:05 +0100185#define queue_for_each_hw_ctx(q, hctx, i) \
Jose Alonso0d0b7d42014-01-28 08:09:46 -0700186 for ((i) = 0; (i) < (q)->nr_hw_queues && \
187 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
Jens Axboe320ae512013-10-24 09:20:05 +0100188
189#define queue_for_each_ctx(q, ctx, i) \
Jose Alonso0d0b7d42014-01-28 08:09:46 -0700190 for ((i) = 0; (i) < (q)->nr_queues && \
191 ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
Jens Axboe320ae512013-10-24 09:20:05 +0100192
193#define hctx_for_each_ctx(hctx, ctx, i) \
Jose Alonso0d0b7d42014-01-28 08:09:46 -0700194 for ((i) = 0; (i) < (hctx)->nr_ctx && \
195 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
Jens Axboe320ae512013-10-24 09:20:05 +0100196
197#define blk_ctx_sum(q, sum) \
198({ \
199 struct blk_mq_ctx *__x; \
200 unsigned int __ret = 0, __i; \
201 \
202 queue_for_each_ctx((q), __x, __i) \
203 __ret += sum; \
204 __ret; \
205})
206
207#endif