blob: a81b474b794fbf7a91c9fd57bc15dc0e8c0d8da1 [file] [log] [blame]
Jens Axboe320ae512013-10-24 09:20:05 +01001#ifndef BLK_MQ_H
2#define BLK_MQ_H
3
4#include <linux/blkdev.h>
5
6struct blk_mq_tags;
7
8struct blk_mq_cpu_notifier {
9 struct list_head list;
10 void *data;
11 void (*notify)(void *data, unsigned long action, unsigned int cpu);
12};
13
14struct blk_mq_hw_ctx {
15 struct {
16 spinlock_t lock;
17 struct list_head dispatch;
18 } ____cacheline_aligned_in_smp;
19
20 unsigned long state; /* BLK_MQ_S_* flags */
21 struct delayed_work delayed_work;
Jens Axboee4043dc2014-04-09 10:18:23 -060022 cpumask_var_t cpumask;
Jens Axboe320ae512013-10-24 09:20:05 +010023
24 unsigned long flags; /* BLK_MQ_F_* flags */
25
26 struct request_queue *queue;
27 unsigned int queue_num;
28
29 void *driver_data;
30
31 unsigned int nr_ctx;
32 struct blk_mq_ctx **ctxs;
33 unsigned int nr_ctx_map;
34 unsigned long *ctx_map;
35
Jens Axboe320ae512013-10-24 09:20:05 +010036 struct blk_mq_tags *tags;
37
38 unsigned long queued;
39 unsigned long run;
40#define BLK_MQ_MAX_DISPATCH_ORDER 10
41 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
42
Jens Axboe320ae512013-10-24 09:20:05 +010043 unsigned int numa_node;
44 unsigned int cmd_size; /* per-request extra data */
45
46 struct blk_mq_cpu_notifier cpu_notifier;
47 struct kobject kobj;
48};
49
Christoph Hellwig24d2f902014-04-15 14:14:00 -060050struct blk_mq_tag_set {
Jens Axboe320ae512013-10-24 09:20:05 +010051 struct blk_mq_ops *ops;
52 unsigned int nr_hw_queues;
53 unsigned int queue_depth;
54 unsigned int reserved_tags;
55 unsigned int cmd_size; /* per-request extra data */
56 int numa_node;
57 unsigned int timeout;
58 unsigned int flags; /* BLK_MQ_F_* */
Christoph Hellwig24d2f902014-04-15 14:14:00 -060059 void *driver_data;
60
61 struct blk_mq_tags **tags;
Jens Axboe320ae512013-10-24 09:20:05 +010062};
63
64typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
65typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
Christoph Hellwig24d2f902014-04-15 14:14:00 -060066typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *,
67 unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010068typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
69typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
70typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
Christoph Hellwig24d2f902014-04-15 14:14:00 -060071typedef int (init_request_fn)(void *, struct request *, unsigned int,
72 unsigned int, unsigned int);
73typedef void (exit_request_fn)(void *, struct request *, unsigned int,
74 unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010075
76struct blk_mq_ops {
77 /*
78 * Queue request
79 */
80 queue_rq_fn *queue_rq;
81
82 /*
83 * Map to specific hardware queue
84 */
85 map_queue_fn *map_queue;
86
87 /*
88 * Called on request timeout
89 */
90 rq_timed_out_fn *timeout;
91
Christoph Hellwig30a91cb2014-02-10 03:24:38 -080092 softirq_done_fn *complete;
93
Jens Axboe320ae512013-10-24 09:20:05 +010094 /*
95 * Override for hctx allocations (should probably go)
96 */
97 alloc_hctx_fn *alloc_hctx;
98 free_hctx_fn *free_hctx;
99
100 /*
101 * Called when the block layer side of a hardware queue has been
102 * set up, allowing the driver to allocate/init matching structures.
103 * Ditto for exit/teardown.
104 */
105 init_hctx_fn *init_hctx;
106 exit_hctx_fn *exit_hctx;
Christoph Hellwige9b267d2014-04-15 13:59:10 -0600107
108 /*
109 * Called for every command allocated by the block layer to allow
110 * the driver to set up driver specific data.
111 * Ditto for exit/teardown.
112 */
113 init_request_fn *init_request;
114 exit_request_fn *exit_request;
Jens Axboe320ae512013-10-24 09:20:05 +0100115};
116
117enum {
118 BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */
119 BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */
120 BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
121
122 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
123 BLK_MQ_F_SHOULD_SORT = 1 << 1,
124 BLK_MQ_F_SHOULD_IPI = 1 << 2,
125
Jens Axboe5d12f902014-03-19 15:25:02 -0600126 BLK_MQ_S_STOPPED = 0,
Jens Axboe320ae512013-10-24 09:20:05 +0100127
128 BLK_MQ_MAX_DEPTH = 2048,
129};
130
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600131struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
Jens Axboe320ae512013-10-24 09:20:05 +0100132int blk_mq_register_disk(struct gendisk *);
133void blk_mq_unregister_disk(struct gendisk *);
Jens Axboe320ae512013-10-24 09:20:05 +0100134
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600135int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
136void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
137
Jens Axboe320ae512013-10-24 09:20:05 +0100138void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
139
Christoph Hellwigfeb71da2014-02-20 15:32:37 -0800140void blk_mq_insert_request(struct request *, bool, bool, bool);
Jens Axboe320ae512013-10-24 09:20:05 +0100141void blk_mq_run_queues(struct request_queue *q, bool async);
142void blk_mq_free_request(struct request *rq);
143bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
Christoph Hellwig18741982014-02-10 09:29:00 -0700144struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
Jens Axboe320ae512013-10-24 09:20:05 +0100145struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600146struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
Jens Axboe320ae512013-10-24 09:20:05 +0100147
148struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600149struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +0100150void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
151
Christoph Hellwig63151a42014-04-16 09:44:52 +0200152void blk_mq_end_io(struct request *rq, int error);
153void __blk_mq_end_io(struct request *rq, int error);
Jens Axboe320ae512013-10-24 09:20:05 +0100154
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800155void blk_mq_complete_request(struct request *rq);
156
Jens Axboe320ae512013-10-24 09:20:05 +0100157void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
158void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
Christoph Hellwig280d45f2013-10-25 14:45:58 +0100159void blk_mq_stop_hw_queues(struct request_queue *q);
Jens Axboe320ae512013-10-24 09:20:05 +0100160void blk_mq_start_stopped_hw_queues(struct request_queue *q);
161
162/*
163 * Driver command data is immediately after the request. So subtract request
164 * size to get back to the original request.
165 */
166static inline struct request *blk_mq_rq_from_pdu(void *pdu)
167{
168 return pdu - sizeof(struct request);
169}
170static inline void *blk_mq_rq_to_pdu(struct request *rq)
171{
172 return (void *) rq + sizeof(*rq);
173}
174
Jens Axboe320ae512013-10-24 09:20:05 +0100175#define queue_for_each_hw_ctx(q, hctx, i) \
Jose Alonso0d0b7d42014-01-28 08:09:46 -0700176 for ((i) = 0; (i) < (q)->nr_hw_queues && \
177 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
Jens Axboe320ae512013-10-24 09:20:05 +0100178
179#define queue_for_each_ctx(q, ctx, i) \
Jose Alonso0d0b7d42014-01-28 08:09:46 -0700180 for ((i) = 0; (i) < (q)->nr_queues && \
181 ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
Jens Axboe320ae512013-10-24 09:20:05 +0100182
183#define hctx_for_each_ctx(hctx, ctx, i) \
Jose Alonso0d0b7d42014-01-28 08:09:46 -0700184 for ((i) = 0; (i) < (hctx)->nr_ctx && \
185 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
Jens Axboe320ae512013-10-24 09:20:05 +0100186
187#define blk_ctx_sum(q, sum) \
188({ \
189 struct blk_mq_ctx *__x; \
190 unsigned int __ret = 0, __i; \
191 \
192 queue_for_each_ctx((q), __x, __i) \
193 __ret += sum; \
194 __ret; \
195})
196
197#endif