blob: 0120451545d8d2b85cc5eb10475359442f9d7460 [file] [log] [blame]
Jens Axboe320ae512013-10-24 09:20:05 +01001#ifndef BLK_MQ_H
2#define BLK_MQ_H
3
4#include <linux/blkdev.h>
5
6struct blk_mq_tags;
7
8struct blk_mq_cpu_notifier {
9 struct list_head list;
10 void *data;
11 void (*notify)(void *data, unsigned long action, unsigned int cpu);
12};
13
14struct blk_mq_hw_ctx {
15 struct {
16 spinlock_t lock;
17 struct list_head dispatch;
18 } ____cacheline_aligned_in_smp;
19
20 unsigned long state; /* BLK_MQ_S_* flags */
21 struct delayed_work delayed_work;
22
23 unsigned long flags; /* BLK_MQ_F_* flags */
24
25 struct request_queue *queue;
26 unsigned int queue_num;
27
28 void *driver_data;
29
30 unsigned int nr_ctx;
31 struct blk_mq_ctx **ctxs;
32 unsigned int nr_ctx_map;
33 unsigned long *ctx_map;
34
35 struct request **rqs;
36 struct list_head page_list;
37 struct blk_mq_tags *tags;
38
39 unsigned long queued;
40 unsigned long run;
41#define BLK_MQ_MAX_DISPATCH_ORDER 10
42 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
43
44 unsigned int queue_depth;
45 unsigned int numa_node;
46 unsigned int cmd_size; /* per-request extra data */
47
48 struct blk_mq_cpu_notifier cpu_notifier;
49 struct kobject kobj;
50};
51
52struct blk_mq_reg {
53 struct blk_mq_ops *ops;
54 unsigned int nr_hw_queues;
55 unsigned int queue_depth;
56 unsigned int reserved_tags;
57 unsigned int cmd_size; /* per-request extra data */
58 int numa_node;
59 unsigned int timeout;
60 unsigned int flags; /* BLK_MQ_F_* */
61};
62
63typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
64typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
65typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int);
66typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
67typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
68typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
69
70struct blk_mq_ops {
71 /*
72 * Queue request
73 */
74 queue_rq_fn *queue_rq;
75
76 /*
77 * Map to specific hardware queue
78 */
79 map_queue_fn *map_queue;
80
81 /*
82 * Called on request timeout
83 */
84 rq_timed_out_fn *timeout;
85
Christoph Hellwig30a91cb2014-02-10 03:24:38 -080086 softirq_done_fn *complete;
87
Jens Axboe320ae512013-10-24 09:20:05 +010088 /*
89 * Override for hctx allocations (should probably go)
90 */
91 alloc_hctx_fn *alloc_hctx;
92 free_hctx_fn *free_hctx;
93
94 /*
95 * Called when the block layer side of a hardware queue has been
96 * set up, allowing the driver to allocate/init matching structures.
97 * Ditto for exit/teardown.
98 */
99 init_hctx_fn *init_hctx;
100 exit_hctx_fn *exit_hctx;
101};
102
103enum {
104 BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */
105 BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */
106 BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
107
108 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
109 BLK_MQ_F_SHOULD_SORT = 1 << 1,
110 BLK_MQ_F_SHOULD_IPI = 1 << 2,
111
Jens Axboe5d12f902014-03-19 15:25:02 -0600112 BLK_MQ_S_STOPPED = 0,
Jens Axboe320ae512013-10-24 09:20:05 +0100113
114 BLK_MQ_MAX_DEPTH = 2048,
115};
116
117struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
Jens Axboe320ae512013-10-24 09:20:05 +0100118int blk_mq_register_disk(struct gendisk *);
119void blk_mq_unregister_disk(struct gendisk *);
Jens Axboe95363ef2014-03-14 10:43:15 -0600120int blk_mq_init_commands(struct request_queue *, int (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
121void blk_mq_free_commands(struct request_queue *, void (*free)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
Jens Axboe320ae512013-10-24 09:20:05 +0100122
123void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
124
Christoph Hellwigfeb71da2014-02-20 15:32:37 -0800125void blk_mq_insert_request(struct request *, bool, bool, bool);
Jens Axboe320ae512013-10-24 09:20:05 +0100126void blk_mq_run_queues(struct request_queue *q, bool async);
127void blk_mq_free_request(struct request *rq);
128bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
Christoph Hellwig18741982014-02-10 09:29:00 -0700129struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
Jens Axboe320ae512013-10-24 09:20:05 +0100130struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
131struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
132
133struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
134struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
135void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
136
Christoph Hellwigd6a25b32014-02-20 15:32:38 -0800137bool blk_mq_end_io_partial(struct request *rq, int error,
138 unsigned int nr_bytes);
139static inline void blk_mq_end_io(struct request *rq, int error)
140{
141 bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq));
142 BUG_ON(!done);
143}
Jens Axboe320ae512013-10-24 09:20:05 +0100144
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800145void blk_mq_complete_request(struct request *rq);
146
Jens Axboe320ae512013-10-24 09:20:05 +0100147void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
148void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
Christoph Hellwig280d45f2013-10-25 14:45:58 +0100149void blk_mq_stop_hw_queues(struct request_queue *q);
Jens Axboe320ae512013-10-24 09:20:05 +0100150void blk_mq_start_stopped_hw_queues(struct request_queue *q);
151
152/*
153 * Driver command data is immediately after the request. So subtract request
154 * size to get back to the original request.
155 */
156static inline struct request *blk_mq_rq_from_pdu(void *pdu)
157{
158 return pdu - sizeof(struct request);
159}
160static inline void *blk_mq_rq_to_pdu(struct request *rq)
161{
162 return (void *) rq + sizeof(*rq);
163}
164
165static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
166 unsigned int tag)
167{
168 return hctx->rqs[tag];
169}
170
171#define queue_for_each_hw_ctx(q, hctx, i) \
Jose Alonso0d0b7d42014-01-28 08:09:46 -0700172 for ((i) = 0; (i) < (q)->nr_hw_queues && \
173 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
Jens Axboe320ae512013-10-24 09:20:05 +0100174
175#define queue_for_each_ctx(q, ctx, i) \
Jose Alonso0d0b7d42014-01-28 08:09:46 -0700176 for ((i) = 0; (i) < (q)->nr_queues && \
177 ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
Jens Axboe320ae512013-10-24 09:20:05 +0100178
179#define hctx_for_each_ctx(hctx, ctx, i) \
Jose Alonso0d0b7d42014-01-28 08:09:46 -0700180 for ((i) = 0; (i) < (hctx)->nr_ctx && \
181 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
Jens Axboe320ae512013-10-24 09:20:05 +0100182
183#define blk_ctx_sum(q, sum) \
184({ \
185 struct blk_mq_ctx *__x; \
186 unsigned int __ret = 0, __i; \
187 \
188 queue_for_each_ctx((q), __x, __i) \
189 __ret += sum; \
190 __ret; \
191})
192
193#endif