blob: ad69ef657e850cc79c6379667c0c501f9400a551 [file] [log] [blame]
Jens Axboe75bb4622014-05-28 10:15:41 -06001/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
Jens Axboe320ae512013-10-24 09:20:05 +01007#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/backing-dev.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
12#include <linux/mm.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/workqueue.h>
16#include <linux/smp.h>
17#include <linux/llist.h>
18#include <linux/list_sort.h>
19#include <linux/cpu.h>
20#include <linux/cache.h>
21#include <linux/sched/sysctl.h>
22#include <linux/delay.h>
23
24#include <trace/events/block.h>
25
26#include <linux/blk-mq.h>
27#include "blk.h"
28#include "blk-mq.h"
29#include "blk-mq-tag.h"
30
31static DEFINE_MUTEX(all_q_mutex);
32static LIST_HEAD(all_q_list);
33
34static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
35
Jens Axboe320ae512013-10-24 09:20:05 +010036/*
37 * Check if any of the ctx's have pending work in this hardware queue
38 */
39static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
40{
41 unsigned int i;
42
Jens Axboe1429d7c2014-05-19 09:23:55 -060043 for (i = 0; i < hctx->ctx_map.map_size; i++)
44 if (hctx->ctx_map.map[i].word)
Jens Axboe320ae512013-10-24 09:20:05 +010045 return true;
46
47 return false;
48}
49
Jens Axboe1429d7c2014-05-19 09:23:55 -060050static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
51 struct blk_mq_ctx *ctx)
52{
53 return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
54}
55
56#define CTX_TO_BIT(hctx, ctx) \
57 ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
58
Jens Axboe320ae512013-10-24 09:20:05 +010059/*
60 * Mark this ctx as having pending work in this hardware queue
61 */
62static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
63 struct blk_mq_ctx *ctx)
64{
Jens Axboe1429d7c2014-05-19 09:23:55 -060065 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
66
67 if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
68 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
69}
70
71static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
72 struct blk_mq_ctx *ctx)
73{
74 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
75
76 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
Jens Axboe320ae512013-10-24 09:20:05 +010077}
78
Jens Axboe320ae512013-10-24 09:20:05 +010079static int blk_mq_queue_enter(struct request_queue *q)
80{
81 int ret;
82
83 __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
84 smp_wmb();
Keith Busch3b632cf2014-06-06 10:22:07 -060085
86 /* we have problems freezing the queue if it's initializing */
87 if (!blk_queue_dying(q) &&
88 (!blk_queue_bypass(q) || !blk_queue_init_done(q)))
Jens Axboe320ae512013-10-24 09:20:05 +010089 return 0;
90
91 __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
92
93 spin_lock_irq(q->queue_lock);
94 ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
Ming Lei43a5e4e2013-12-26 21:31:35 +080095 !blk_queue_bypass(q) || blk_queue_dying(q),
96 *q->queue_lock);
Jens Axboe320ae512013-10-24 09:20:05 +010097 /* inc usage with lock hold to avoid freeze_queue runs here */
Ming Lei43a5e4e2013-12-26 21:31:35 +080098 if (!ret && !blk_queue_dying(q))
Jens Axboe320ae512013-10-24 09:20:05 +010099 __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
Ming Lei43a5e4e2013-12-26 21:31:35 +0800100 else if (blk_queue_dying(q))
101 ret = -ENODEV;
Jens Axboe320ae512013-10-24 09:20:05 +0100102 spin_unlock_irq(q->queue_lock);
103
104 return ret;
105}
106
107static void blk_mq_queue_exit(struct request_queue *q)
108{
109 __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
110}
111
Christoph Hellwig95ed0682014-06-13 19:43:35 +0200112void blk_mq_drain_queue(struct request_queue *q)
Ming Lei43a5e4e2013-12-26 21:31:35 +0800113{
114 while (true) {
115 s64 count;
116
117 spin_lock_irq(q->queue_lock);
118 count = percpu_counter_sum(&q->mq_usage_counter);
119 spin_unlock_irq(q->queue_lock);
120
121 if (count == 0)
122 break;
Christoph Hellwig8f5280f2014-06-13 19:43:04 +0200123 blk_mq_start_hw_queues(q);
Ming Lei43a5e4e2013-12-26 21:31:35 +0800124 msleep(10);
125 }
126}
127
Jens Axboe320ae512013-10-24 09:20:05 +0100128/*
129 * Guarantee no request is in use, so we can change any data structure of
130 * the queue afterward.
131 */
132static void blk_mq_freeze_queue(struct request_queue *q)
133{
134 bool drain;
135
136 spin_lock_irq(q->queue_lock);
137 drain = !q->bypass_depth++;
138 queue_flag_set(QUEUE_FLAG_BYPASS, q);
139 spin_unlock_irq(q->queue_lock);
140
Ming Lei43a5e4e2013-12-26 21:31:35 +0800141 if (drain)
Christoph Hellwig95ed0682014-06-13 19:43:35 +0200142 blk_mq_drain_queue(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100143}
144
145static void blk_mq_unfreeze_queue(struct request_queue *q)
146{
147 bool wake = false;
148
149 spin_lock_irq(q->queue_lock);
150 if (!--q->bypass_depth) {
151 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
152 wake = true;
153 }
154 WARN_ON_ONCE(q->bypass_depth < 0);
155 spin_unlock_irq(q->queue_lock);
156 if (wake)
157 wake_up_all(&q->mq_freeze_wq);
158}
159
160bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
161{
162 return blk_mq_has_free_tags(hctx->tags);
163}
164EXPORT_SYMBOL(blk_mq_can_queue);
165
Jens Axboe94eddfb2013-11-19 09:25:07 -0700166static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
167 struct request *rq, unsigned int rw_flags)
Jens Axboe320ae512013-10-24 09:20:05 +0100168{
Jens Axboe94eddfb2013-11-19 09:25:07 -0700169 if (blk_queue_io_stat(q))
170 rw_flags |= REQ_IO_STAT;
171
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200172 INIT_LIST_HEAD(&rq->queuelist);
173 /* csd/requeue_work/fifo_time is initialized before use */
174 rq->q = q;
Jens Axboe320ae512013-10-24 09:20:05 +0100175 rq->mq_ctx = ctx;
Jens Axboe0d2602c2014-05-13 15:10:52 -0600176 rq->cmd_flags |= rw_flags;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200177 /* do not touch atomic flags, it needs atomic ops against the timer */
178 rq->cpu = -1;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200179 INIT_HLIST_NODE(&rq->hash);
180 RB_CLEAR_NODE(&rq->rb_node);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200181 rq->rq_disk = NULL;
182 rq->part = NULL;
Jens Axboe3ee32372014-06-09 09:36:53 -0600183 rq->start_time = jiffies;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200184#ifdef CONFIG_BLK_CGROUP
185 rq->rl = NULL;
Ming Lei0fec08b2014-01-03 10:00:08 -0700186 set_start_time_ns(rq);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200187 rq->io_start_time_ns = 0;
188#endif
189 rq->nr_phys_segments = 0;
190#if defined(CONFIG_BLK_DEV_INTEGRITY)
191 rq->nr_integrity_segments = 0;
192#endif
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200193 rq->special = NULL;
194 /* tag was already set */
195 rq->errors = 0;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200196
197 rq->extra_len = 0;
198 rq->sense_len = 0;
199 rq->resid_len = 0;
200 rq->sense = NULL;
201
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200202 INIT_LIST_HEAD(&rq->timeout_list);
Jens Axboef6be4fb2014-06-06 11:03:48 -0600203 rq->timeout = 0;
204
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200205 rq->end_io = NULL;
206 rq->end_io_data = NULL;
207 rq->next_rq = NULL;
208
Jens Axboe320ae512013-10-24 09:20:05 +0100209 ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
210}
211
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200212static struct request *
Ming Leicb96a422014-06-01 00:43:37 +0800213__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200214{
215 struct request *rq;
216 unsigned int tag;
217
Ming Leicb96a422014-06-01 00:43:37 +0800218 tag = blk_mq_get_tag(data);
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200219 if (tag != BLK_MQ_TAG_FAIL) {
Ming Leicb96a422014-06-01 00:43:37 +0800220 rq = data->hctx->tags->rqs[tag];
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200221
222 rq->cmd_flags = 0;
Ming Leicb96a422014-06-01 00:43:37 +0800223 if (blk_mq_tag_busy(data->hctx)) {
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200224 rq->cmd_flags = REQ_MQ_INFLIGHT;
Ming Leicb96a422014-06-01 00:43:37 +0800225 atomic_inc(&data->hctx->nr_active);
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200226 }
227
228 rq->tag = tag;
Ming Leicb96a422014-06-01 00:43:37 +0800229 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200230 return rq;
231 }
232
233 return NULL;
234}
235
Christoph Hellwig4ce01dd2014-05-27 20:59:46 +0200236struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
237 bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100238{
Christoph Hellwigd8525642014-05-27 20:59:50 +0200239 struct blk_mq_ctx *ctx;
240 struct blk_mq_hw_ctx *hctx;
Jens Axboe320ae512013-10-24 09:20:05 +0100241 struct request *rq;
Ming Leicb96a422014-06-01 00:43:37 +0800242 struct blk_mq_alloc_data alloc_data;
Jens Axboe320ae512013-10-24 09:20:05 +0100243
244 if (blk_mq_queue_enter(q))
245 return NULL;
246
Christoph Hellwigd8525642014-05-27 20:59:50 +0200247 ctx = blk_mq_get_ctx(q);
248 hctx = q->mq_ops->map_queue(q, ctx->cpu);
Ming Leicb96a422014-06-01 00:43:37 +0800249 blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
250 reserved, ctx, hctx);
Christoph Hellwigd8525642014-05-27 20:59:50 +0200251
Ming Leicb96a422014-06-01 00:43:37 +0800252 rq = __blk_mq_alloc_request(&alloc_data, rw);
Christoph Hellwigd8525642014-05-27 20:59:50 +0200253 if (!rq && (gfp & __GFP_WAIT)) {
254 __blk_mq_run_hw_queue(hctx);
255 blk_mq_put_ctx(ctx);
256
257 ctx = blk_mq_get_ctx(q);
258 hctx = q->mq_ops->map_queue(q, ctx->cpu);
Ming Leicb96a422014-06-01 00:43:37 +0800259 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
260 hctx);
261 rq = __blk_mq_alloc_request(&alloc_data, rw);
262 ctx = alloc_data.ctx;
Christoph Hellwigd8525642014-05-27 20:59:50 +0200263 }
264 blk_mq_put_ctx(ctx);
Jens Axboe320ae512013-10-24 09:20:05 +0100265 return rq;
266}
Jens Axboe4bb659b2014-05-09 09:36:49 -0600267EXPORT_SYMBOL(blk_mq_alloc_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100268
Jens Axboe320ae512013-10-24 09:20:05 +0100269static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
270 struct blk_mq_ctx *ctx, struct request *rq)
271{
272 const int tag = rq->tag;
273 struct request_queue *q = rq->q;
274
Jens Axboe0d2602c2014-05-13 15:10:52 -0600275 if (rq->cmd_flags & REQ_MQ_INFLIGHT)
276 atomic_dec(&hctx->nr_active);
277
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200278 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600279 blk_mq_put_tag(hctx, tag, &ctx->last_tag);
Jens Axboe320ae512013-10-24 09:20:05 +0100280 blk_mq_queue_exit(q);
281}
282
283void blk_mq_free_request(struct request *rq)
284{
285 struct blk_mq_ctx *ctx = rq->mq_ctx;
286 struct blk_mq_hw_ctx *hctx;
287 struct request_queue *q = rq->q;
288
289 ctx->rq_completed[rq_is_sync(rq)]++;
290
291 hctx = q->mq_ops->map_queue(q, ctx->cpu);
292 __blk_mq_free_request(hctx, ctx, rq);
293}
294
Christoph Hellwig8727af42014-04-14 10:30:08 +0200295/*
296 * Clone all relevant state from a request that has been put on hold in
297 * the flush state machine into the preallocated flush request that hangs
298 * off the request queue.
299 *
300 * For a driver the flush request should be invisible, that's why we are
301 * impersonating the original request here.
302 */
303void blk_mq_clone_flush_request(struct request *flush_rq,
304 struct request *orig_rq)
305{
306 struct blk_mq_hw_ctx *hctx =
307 orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
308
309 flush_rq->mq_ctx = orig_rq->mq_ctx;
310 flush_rq->tag = orig_rq->tag;
311 memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
312 hctx->cmd_size);
313}
314
Christoph Hellwig63151a42014-04-16 09:44:52 +0200315inline void __blk_mq_end_io(struct request *rq, int error)
Jens Axboe320ae512013-10-24 09:20:05 +0100316{
Ming Lei0d11e6a2013-12-05 10:50:39 -0700317 blk_account_io_done(rq);
318
Christoph Hellwig91b63632014-04-16 09:44:53 +0200319 if (rq->end_io) {
Jens Axboe320ae512013-10-24 09:20:05 +0100320 rq->end_io(rq, error);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200321 } else {
322 if (unlikely(blk_bidi_rq(rq)))
323 blk_mq_free_request(rq->next_rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100324 blk_mq_free_request(rq);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200325 }
Jens Axboe320ae512013-10-24 09:20:05 +0100326}
Christoph Hellwig63151a42014-04-16 09:44:52 +0200327EXPORT_SYMBOL(__blk_mq_end_io);
328
329void blk_mq_end_io(struct request *rq, int error)
330{
331 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
332 BUG();
333 __blk_mq_end_io(rq, error);
334}
335EXPORT_SYMBOL(blk_mq_end_io);
Jens Axboe320ae512013-10-24 09:20:05 +0100336
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800337static void __blk_mq_complete_request_remote(void *data)
Jens Axboe320ae512013-10-24 09:20:05 +0100338{
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800339 struct request *rq = data;
Jens Axboe320ae512013-10-24 09:20:05 +0100340
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800341 rq->q->softirq_done_fn(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100342}
343
Jens Axboeed851862014-05-30 21:20:50 -0600344static void blk_mq_ipi_complete_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100345{
346 struct blk_mq_ctx *ctx = rq->mq_ctx;
Christoph Hellwig38535202014-04-25 02:32:53 -0700347 bool shared = false;
Jens Axboe320ae512013-10-24 09:20:05 +0100348 int cpu;
349
Christoph Hellwig38535202014-04-25 02:32:53 -0700350 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800351 rq->q->softirq_done_fn(rq);
352 return;
353 }
Jens Axboe320ae512013-10-24 09:20:05 +0100354
355 cpu = get_cpu();
Christoph Hellwig38535202014-04-25 02:32:53 -0700356 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
357 shared = cpus_share_cache(cpu, ctx->cpu);
358
359 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800360 rq->csd.func = __blk_mq_complete_request_remote;
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800361 rq->csd.info = rq;
362 rq->csd.flags = 0;
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +0100363 smp_call_function_single_async(ctx->cpu, &rq->csd);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800364 } else {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800365 rq->q->softirq_done_fn(rq);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800366 }
Jens Axboe320ae512013-10-24 09:20:05 +0100367 put_cpu();
368}
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800369
Jens Axboeed851862014-05-30 21:20:50 -0600370void __blk_mq_complete_request(struct request *rq)
371{
372 struct request_queue *q = rq->q;
373
374 if (!q->softirq_done_fn)
375 blk_mq_end_io(rq, rq->errors);
376 else
377 blk_mq_ipi_complete_request(rq);
378}
379
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800380/**
381 * blk_mq_complete_request - end I/O on a request
382 * @rq: the request being processed
383 *
384 * Description:
385 * Ends all I/O on a request. It does not handle partial completions.
386 * The actual completion happens out-of-order, through a IPI handler.
387 **/
388void blk_mq_complete_request(struct request *rq)
389{
Jens Axboe95f09682014-05-27 17:46:48 -0600390 struct request_queue *q = rq->q;
391
392 if (unlikely(blk_should_fake_timeout(q)))
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800393 return;
Jens Axboeed851862014-05-30 21:20:50 -0600394 if (!blk_mark_rq_complete(rq))
395 __blk_mq_complete_request(rq);
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800396}
397EXPORT_SYMBOL(blk_mq_complete_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100398
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800399static void blk_mq_start_request(struct request *rq, bool last)
Jens Axboe320ae512013-10-24 09:20:05 +0100400{
401 struct request_queue *q = rq->q;
402
403 trace_block_rq_issue(q, rq);
404
Christoph Hellwig742ee692014-04-14 10:30:06 +0200405 rq->resid_len = blk_rq_bytes(rq);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200406 if (unlikely(blk_bidi_rq(rq)))
407 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
Christoph Hellwig742ee692014-04-14 10:30:06 +0200408
Ming Lei2b8393b2014-06-10 00:16:41 +0800409 blk_add_timer(rq);
Jens Axboe87ee7b12014-04-24 08:51:47 -0600410
411 /*
412 * Mark us as started and clear complete. Complete might have been
413 * set if requeue raced with timeout, which then marked it as
414 * complete. So be sure to clear complete again when we start
415 * the request, otherwise we'll ignore the completion event.
416 */
Jens Axboe4b570522014-05-29 11:00:11 -0600417 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
418 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
419 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
420 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800421
422 if (q->dma_drain_size && blk_rq_bytes(rq)) {
423 /*
424 * Make sure space for the drain appears. We know we can do
425 * this because max_hw_segments has been adjusted to be one
426 * fewer than the device can handle.
427 */
428 rq->nr_phys_segments++;
429 }
430
431 /*
432 * Flag the last request in the series so that drivers know when IO
433 * should be kicked off, if they don't do it on a per-request basis.
434 *
435 * Note: the flag isn't the only condition drivers should do kick off.
436 * If drive is busy, the last request might not have the bit set.
437 */
438 if (last)
439 rq->cmd_flags |= REQ_END;
Jens Axboe320ae512013-10-24 09:20:05 +0100440}
441
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200442static void __blk_mq_requeue_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100443{
444 struct request_queue *q = rq->q;
445
446 trace_block_rq_requeue(q, rq);
447 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800448
449 rq->cmd_flags &= ~REQ_END;
450
451 if (q->dma_drain_size && blk_rq_bytes(rq))
452 rq->nr_phys_segments--;
Jens Axboe320ae512013-10-24 09:20:05 +0100453}
454
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200455void blk_mq_requeue_request(struct request *rq)
456{
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200457 __blk_mq_requeue_request(rq);
458 blk_clear_rq_complete(rq);
459
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200460 BUG_ON(blk_queued_rq(rq));
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600461 blk_mq_add_to_requeue_list(rq, true);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200462}
463EXPORT_SYMBOL(blk_mq_requeue_request);
464
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600465static void blk_mq_requeue_work(struct work_struct *work)
466{
467 struct request_queue *q =
468 container_of(work, struct request_queue, requeue_work);
469 LIST_HEAD(rq_list);
470 struct request *rq, *next;
471 unsigned long flags;
472
473 spin_lock_irqsave(&q->requeue_lock, flags);
474 list_splice_init(&q->requeue_list, &rq_list);
475 spin_unlock_irqrestore(&q->requeue_lock, flags);
476
477 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
478 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
479 continue;
480
481 rq->cmd_flags &= ~REQ_SOFTBARRIER;
482 list_del_init(&rq->queuelist);
483 blk_mq_insert_request(rq, true, false, false);
484 }
485
486 while (!list_empty(&rq_list)) {
487 rq = list_entry(rq_list.next, struct request, queuelist);
488 list_del_init(&rq->queuelist);
489 blk_mq_insert_request(rq, false, false, false);
490 }
491
492 blk_mq_run_queues(q, false);
493}
494
495void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
496{
497 struct request_queue *q = rq->q;
498 unsigned long flags;
499
500 /*
501 * We abuse this flag that is otherwise used by the I/O scheduler to
502 * request head insertation from the workqueue.
503 */
504 BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
505
506 spin_lock_irqsave(&q->requeue_lock, flags);
507 if (at_head) {
508 rq->cmd_flags |= REQ_SOFTBARRIER;
509 list_add(&rq->queuelist, &q->requeue_list);
510 } else {
511 list_add_tail(&rq->queuelist, &q->requeue_list);
512 }
513 spin_unlock_irqrestore(&q->requeue_lock, flags);
514}
515EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
516
517void blk_mq_kick_requeue_list(struct request_queue *q)
518{
519 kblockd_schedule_work(&q->requeue_work);
520}
521EXPORT_SYMBOL(blk_mq_kick_requeue_list);
522
Jens Axboe0e62f512014-06-04 10:23:49 -0600523static inline bool is_flush_request(struct request *rq, unsigned int tag)
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600524{
Jens Axboe0e62f512014-06-04 10:23:49 -0600525 return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
526 rq->q->flush_rq->tag == tag);
527}
Shaohua Li22302372014-05-30 08:06:42 -0600528
Jens Axboe0e62f512014-06-04 10:23:49 -0600529struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
530{
531 struct request *rq = tags->rqs[tag];
Shaohua Li22302372014-05-30 08:06:42 -0600532
Jens Axboe0e62f512014-06-04 10:23:49 -0600533 if (!is_flush_request(rq, tag))
534 return rq;
535
536 return rq->q->flush_rq;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600537}
538EXPORT_SYMBOL(blk_mq_tag_to_rq);
539
Jens Axboe320ae512013-10-24 09:20:05 +0100540struct blk_mq_timeout_data {
541 struct blk_mq_hw_ctx *hctx;
542 unsigned long *next;
543 unsigned int *next_set;
544};
545
546static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
547{
548 struct blk_mq_timeout_data *data = __data;
549 struct blk_mq_hw_ctx *hctx = data->hctx;
550 unsigned int tag;
551
552 /* It may not be in flight yet (this is where
553 * the REQ_ATOMIC_STARTED flag comes in). The requests are
554 * statically allocated, so we know it's always safe to access the
555 * memory associated with a bit offset into ->rqs[].
556 */
557 tag = 0;
558 do {
559 struct request *rq;
560
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600561 tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
562 if (tag >= hctx->tags->nr_tags)
Jens Axboe320ae512013-10-24 09:20:05 +0100563 break;
564
Jens Axboe0e62f512014-06-04 10:23:49 -0600565 rq = blk_mq_tag_to_rq(hctx->tags, tag++);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600566 if (rq->q != hctx->queue)
567 continue;
Jens Axboe320ae512013-10-24 09:20:05 +0100568 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
569 continue;
570
571 blk_rq_check_expired(rq, data->next, data->next_set);
572 } while (1);
573}
574
575static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
576 unsigned long *next,
577 unsigned int *next_set)
578{
579 struct blk_mq_timeout_data data = {
580 .hctx = hctx,
581 .next = next,
582 .next_set = next_set,
583 };
584
585 /*
586 * Ask the tagging code to iterate busy requests, so we can
587 * check them for timeout.
588 */
589 blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
590}
591
Jens Axboe87ee7b12014-04-24 08:51:47 -0600592static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
593{
594 struct request_queue *q = rq->q;
595
596 /*
597 * We know that complete is set at this point. If STARTED isn't set
598 * anymore, then the request isn't active and the "timeout" should
599 * just be ignored. This can happen due to the bitflag ordering.
600 * Timeout first checks if STARTED is set, and if it is, assumes
601 * the request is active. But if we race with completion, then
602 * we both flags will get cleared. So check here again, and ignore
603 * a timeout event with a request that isn't active.
604 */
605 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
606 return BLK_EH_NOT_HANDLED;
607
608 if (!q->mq_ops->timeout)
609 return BLK_EH_RESET_TIMER;
610
611 return q->mq_ops->timeout(rq);
612}
613
Jens Axboe320ae512013-10-24 09:20:05 +0100614static void blk_mq_rq_timer(unsigned long data)
615{
616 struct request_queue *q = (struct request_queue *) data;
617 struct blk_mq_hw_ctx *hctx;
618 unsigned long next = 0;
619 int i, next_set = 0;
620
Jens Axboe484b4062014-05-21 14:01:15 -0600621 queue_for_each_hw_ctx(q, hctx, i) {
622 /*
623 * If not software queues are currently mapped to this
624 * hardware queue, there's nothing to check
625 */
626 if (!hctx->nr_ctx || !hctx->tags)
627 continue;
628
Jens Axboe320ae512013-10-24 09:20:05 +0100629 blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
Jens Axboe484b4062014-05-21 14:01:15 -0600630 }
Jens Axboe320ae512013-10-24 09:20:05 +0100631
Jens Axboe0d2602c2014-05-13 15:10:52 -0600632 if (next_set) {
633 next = blk_rq_timeout(round_jiffies_up(next));
634 mod_timer(&q->timeout, next);
635 } else {
636 queue_for_each_hw_ctx(q, hctx, i)
637 blk_mq_tag_idle(hctx);
638 }
Jens Axboe320ae512013-10-24 09:20:05 +0100639}
640
641/*
642 * Reverse check our software queue for entries that we could potentially
643 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
644 * too much time checking for merges.
645 */
646static bool blk_mq_attempt_merge(struct request_queue *q,
647 struct blk_mq_ctx *ctx, struct bio *bio)
648{
649 struct request *rq;
650 int checked = 8;
651
652 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
653 int el_ret;
654
655 if (!checked--)
656 break;
657
658 if (!blk_rq_merge_ok(rq, bio))
659 continue;
660
661 el_ret = blk_try_merge(rq, bio);
662 if (el_ret == ELEVATOR_BACK_MERGE) {
663 if (bio_attempt_back_merge(q, rq, bio)) {
664 ctx->rq_merged++;
665 return true;
666 }
667 break;
668 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
669 if (bio_attempt_front_merge(q, rq, bio)) {
670 ctx->rq_merged++;
671 return true;
672 }
673 break;
674 }
675 }
676
677 return false;
678}
679
Jens Axboe320ae512013-10-24 09:20:05 +0100680/*
Jens Axboe1429d7c2014-05-19 09:23:55 -0600681 * Process software queues that have been marked busy, splicing them
682 * to the for-dispatch
683 */
684static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
685{
686 struct blk_mq_ctx *ctx;
687 int i;
688
689 for (i = 0; i < hctx->ctx_map.map_size; i++) {
690 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
691 unsigned int off, bit;
692
693 if (!bm->word)
694 continue;
695
696 bit = 0;
697 off = i * hctx->ctx_map.bits_per_word;
698 do {
699 bit = find_next_bit(&bm->word, bm->depth, bit);
700 if (bit >= bm->depth)
701 break;
702
703 ctx = hctx->ctxs[bit + off];
704 clear_bit(bit, &bm->word);
705 spin_lock(&ctx->lock);
706 list_splice_tail_init(&ctx->rq_list, list);
707 spin_unlock(&ctx->lock);
708
709 bit++;
710 } while (1);
711 }
712}
713
714/*
Jens Axboe320ae512013-10-24 09:20:05 +0100715 * Run this hardware queue, pulling any software queues mapped to it in.
716 * Note that this function currently has various problems around ordering
717 * of IO. In particular, we'd like FIFO behaviour on handling existing
718 * items on the hctx->dispatch list. Ignore that for now.
719 */
720static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
721{
722 struct request_queue *q = hctx->queue;
Jens Axboe320ae512013-10-24 09:20:05 +0100723 struct request *rq;
724 LIST_HEAD(rq_list);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600725 int queued;
Jens Axboe320ae512013-10-24 09:20:05 +0100726
Jens Axboefd1270d2014-04-16 09:23:48 -0600727 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
Jens Axboee4043dc2014-04-09 10:18:23 -0600728
Jens Axboe5d12f902014-03-19 15:25:02 -0600729 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
Jens Axboe320ae512013-10-24 09:20:05 +0100730 return;
731
732 hctx->run++;
733
734 /*
735 * Touch any software queue that has pending entries.
736 */
Jens Axboe1429d7c2014-05-19 09:23:55 -0600737 flush_busy_ctxs(hctx, &rq_list);
Jens Axboe320ae512013-10-24 09:20:05 +0100738
739 /*
740 * If we have previous entries on our dispatch list, grab them
741 * and stuff them at the front for more fair dispatch.
742 */
743 if (!list_empty_careful(&hctx->dispatch)) {
744 spin_lock(&hctx->lock);
745 if (!list_empty(&hctx->dispatch))
746 list_splice_init(&hctx->dispatch, &rq_list);
747 spin_unlock(&hctx->lock);
748 }
749
750 /*
Jens Axboe320ae512013-10-24 09:20:05 +0100751 * Now process all the entries, sending them to the driver.
752 */
Jens Axboe1429d7c2014-05-19 09:23:55 -0600753 queued = 0;
Jens Axboe320ae512013-10-24 09:20:05 +0100754 while (!list_empty(&rq_list)) {
755 int ret;
756
757 rq = list_first_entry(&rq_list, struct request, queuelist);
758 list_del_init(&rq->queuelist);
Jens Axboe320ae512013-10-24 09:20:05 +0100759
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800760 blk_mq_start_request(rq, list_empty(&rq_list));
Jens Axboe320ae512013-10-24 09:20:05 +0100761
762 ret = q->mq_ops->queue_rq(hctx, rq);
763 switch (ret) {
764 case BLK_MQ_RQ_QUEUE_OK:
765 queued++;
766 continue;
767 case BLK_MQ_RQ_QUEUE_BUSY:
Jens Axboe320ae512013-10-24 09:20:05 +0100768 list_add(&rq->queuelist, &rq_list);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200769 __blk_mq_requeue_request(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100770 break;
771 default:
772 pr_err("blk-mq: bad return on queue: %d\n", ret);
Jens Axboe320ae512013-10-24 09:20:05 +0100773 case BLK_MQ_RQ_QUEUE_ERROR:
Christoph Hellwig1e93b8c2014-02-11 08:27:13 -0800774 rq->errors = -EIO;
Jens Axboe320ae512013-10-24 09:20:05 +0100775 blk_mq_end_io(rq, rq->errors);
776 break;
777 }
778
779 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
780 break;
781 }
782
783 if (!queued)
784 hctx->dispatched[0]++;
785 else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
786 hctx->dispatched[ilog2(queued) + 1]++;
787
788 /*
789 * Any items that need requeuing? Stuff them into hctx->dispatch,
790 * that is where we will continue on next queue run.
791 */
792 if (!list_empty(&rq_list)) {
793 spin_lock(&hctx->lock);
794 list_splice(&rq_list, &hctx->dispatch);
795 spin_unlock(&hctx->lock);
796 }
797}
798
Jens Axboe506e9312014-05-07 10:26:44 -0600799/*
800 * It'd be great if the workqueue API had a way to pass
801 * in a mask and had some smarts for more clever placement.
802 * For now we just round-robin here, switching for every
803 * BLK_MQ_CPU_WORK_BATCH queued items.
804 */
805static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
806{
807 int cpu = hctx->next_cpu;
808
809 if (--hctx->next_cpu_batch <= 0) {
810 int next_cpu;
811
812 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
813 if (next_cpu >= nr_cpu_ids)
814 next_cpu = cpumask_first(hctx->cpumask);
815
816 hctx->next_cpu = next_cpu;
817 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
818 }
819
820 return cpu;
821}
822
Jens Axboe320ae512013-10-24 09:20:05 +0100823void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
824{
Jens Axboe5d12f902014-03-19 15:25:02 -0600825 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
Jens Axboe320ae512013-10-24 09:20:05 +0100826 return;
827
Jens Axboee4043dc2014-04-09 10:18:23 -0600828 if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
Jens Axboe320ae512013-10-24 09:20:05 +0100829 __blk_mq_run_hw_queue(hctx);
Jens Axboee4043dc2014-04-09 10:18:23 -0600830 else if (hctx->queue->nr_hw_queues == 1)
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600831 kblockd_schedule_delayed_work(&hctx->run_work, 0);
Jens Axboee4043dc2014-04-09 10:18:23 -0600832 else {
833 unsigned int cpu;
834
Jens Axboe506e9312014-05-07 10:26:44 -0600835 cpu = blk_mq_hctx_next_cpu(hctx);
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600836 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
Jens Axboee4043dc2014-04-09 10:18:23 -0600837 }
Jens Axboe320ae512013-10-24 09:20:05 +0100838}
839
840void blk_mq_run_queues(struct request_queue *q, bool async)
841{
842 struct blk_mq_hw_ctx *hctx;
843 int i;
844
845 queue_for_each_hw_ctx(q, hctx, i) {
846 if ((!blk_mq_hctx_has_pending(hctx) &&
847 list_empty_careful(&hctx->dispatch)) ||
Jens Axboe5d12f902014-03-19 15:25:02 -0600848 test_bit(BLK_MQ_S_STOPPED, &hctx->state))
Jens Axboe320ae512013-10-24 09:20:05 +0100849 continue;
850
Jens Axboee4043dc2014-04-09 10:18:23 -0600851 preempt_disable();
Jens Axboe320ae512013-10-24 09:20:05 +0100852 blk_mq_run_hw_queue(hctx, async);
Jens Axboee4043dc2014-04-09 10:18:23 -0600853 preempt_enable();
Jens Axboe320ae512013-10-24 09:20:05 +0100854 }
855}
856EXPORT_SYMBOL(blk_mq_run_queues);
857
858void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
859{
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600860 cancel_delayed_work(&hctx->run_work);
861 cancel_delayed_work(&hctx->delay_work);
Jens Axboe320ae512013-10-24 09:20:05 +0100862 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
863}
864EXPORT_SYMBOL(blk_mq_stop_hw_queue);
865
Christoph Hellwig280d45f2013-10-25 14:45:58 +0100866void blk_mq_stop_hw_queues(struct request_queue *q)
867{
868 struct blk_mq_hw_ctx *hctx;
869 int i;
870
871 queue_for_each_hw_ctx(q, hctx, i)
872 blk_mq_stop_hw_queue(hctx);
873}
874EXPORT_SYMBOL(blk_mq_stop_hw_queues);
875
Jens Axboe320ae512013-10-24 09:20:05 +0100876void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
877{
878 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboee4043dc2014-04-09 10:18:23 -0600879
880 preempt_disable();
Jens Axboe0ffbce82014-06-25 08:22:34 -0600881 blk_mq_run_hw_queue(hctx, false);
Jens Axboee4043dc2014-04-09 10:18:23 -0600882 preempt_enable();
Jens Axboe320ae512013-10-24 09:20:05 +0100883}
884EXPORT_SYMBOL(blk_mq_start_hw_queue);
885
Christoph Hellwig2f268552014-04-16 09:44:56 +0200886void blk_mq_start_hw_queues(struct request_queue *q)
887{
888 struct blk_mq_hw_ctx *hctx;
889 int i;
890
891 queue_for_each_hw_ctx(q, hctx, i)
892 blk_mq_start_hw_queue(hctx);
893}
894EXPORT_SYMBOL(blk_mq_start_hw_queues);
895
896
Christoph Hellwig1b4a3252014-04-16 09:44:54 +0200897void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +0100898{
899 struct blk_mq_hw_ctx *hctx;
900 int i;
901
902 queue_for_each_hw_ctx(q, hctx, i) {
903 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
904 continue;
905
906 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboee4043dc2014-04-09 10:18:23 -0600907 preempt_disable();
Christoph Hellwig1b4a3252014-04-16 09:44:54 +0200908 blk_mq_run_hw_queue(hctx, async);
Jens Axboee4043dc2014-04-09 10:18:23 -0600909 preempt_enable();
Jens Axboe320ae512013-10-24 09:20:05 +0100910 }
911}
912EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
913
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600914static void blk_mq_run_work_fn(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +0100915{
916 struct blk_mq_hw_ctx *hctx;
917
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600918 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
Jens Axboee4043dc2014-04-09 10:18:23 -0600919
Jens Axboe320ae512013-10-24 09:20:05 +0100920 __blk_mq_run_hw_queue(hctx);
921}
922
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600923static void blk_mq_delay_work_fn(struct work_struct *work)
924{
925 struct blk_mq_hw_ctx *hctx;
926
927 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
928
929 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
930 __blk_mq_run_hw_queue(hctx);
931}
932
933void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
934{
935 unsigned long tmo = msecs_to_jiffies(msecs);
936
937 if (hctx->queue->nr_hw_queues == 1)
938 kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
939 else {
940 unsigned int cpu;
941
Jens Axboe506e9312014-05-07 10:26:44 -0600942 cpu = blk_mq_hctx_next_cpu(hctx);
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600943 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
944 }
945}
946EXPORT_SYMBOL(blk_mq_delay_queue);
947
Jens Axboe320ae512013-10-24 09:20:05 +0100948static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
Christoph Hellwig72a0a362014-02-07 10:22:36 -0800949 struct request *rq, bool at_head)
Jens Axboe320ae512013-10-24 09:20:05 +0100950{
951 struct blk_mq_ctx *ctx = rq->mq_ctx;
952
Jens Axboe01b983c2013-11-19 18:59:10 -0700953 trace_block_rq_insert(hctx->queue, rq);
954
Christoph Hellwig72a0a362014-02-07 10:22:36 -0800955 if (at_head)
956 list_add(&rq->queuelist, &ctx->rq_list);
957 else
958 list_add_tail(&rq->queuelist, &ctx->rq_list);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600959
Jens Axboe320ae512013-10-24 09:20:05 +0100960 blk_mq_hctx_mark_pending(hctx, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +0100961}
962
Christoph Hellwigeeabc852014-03-21 08:57:37 -0600963void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
964 bool async)
Jens Axboe320ae512013-10-24 09:20:05 +0100965{
966 struct request_queue *q = rq->q;
967 struct blk_mq_hw_ctx *hctx;
Christoph Hellwigeeabc852014-03-21 08:57:37 -0600968 struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +0100969
970 current_ctx = blk_mq_get_ctx(q);
Christoph Hellwigeeabc852014-03-21 08:57:37 -0600971 if (!cpu_online(ctx->cpu))
972 rq->mq_ctx = ctx = current_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +0100973
Jens Axboe320ae512013-10-24 09:20:05 +0100974 hctx = q->mq_ops->map_queue(q, ctx->cpu);
975
Christoph Hellwigeeabc852014-03-21 08:57:37 -0600976 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
977 !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
978 blk_insert_flush(rq);
979 } else {
980 spin_lock(&ctx->lock);
981 __blk_mq_insert_request(hctx, rq, at_head);
982 spin_unlock(&ctx->lock);
983 }
Jens Axboe320ae512013-10-24 09:20:05 +0100984
Jens Axboe320ae512013-10-24 09:20:05 +0100985 if (run_queue)
986 blk_mq_run_hw_queue(hctx, async);
Jens Axboee4043dc2014-04-09 10:18:23 -0600987
988 blk_mq_put_ctx(current_ctx);
Jens Axboe320ae512013-10-24 09:20:05 +0100989}
990
991static void blk_mq_insert_requests(struct request_queue *q,
992 struct blk_mq_ctx *ctx,
993 struct list_head *list,
994 int depth,
995 bool from_schedule)
996
997{
998 struct blk_mq_hw_ctx *hctx;
999 struct blk_mq_ctx *current_ctx;
1000
1001 trace_block_unplug(q, depth, !from_schedule);
1002
1003 current_ctx = blk_mq_get_ctx(q);
1004
1005 if (!cpu_online(ctx->cpu))
1006 ctx = current_ctx;
1007 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1008
1009 /*
1010 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1011 * offline now
1012 */
1013 spin_lock(&ctx->lock);
1014 while (!list_empty(list)) {
1015 struct request *rq;
1016
1017 rq = list_first_entry(list, struct request, queuelist);
1018 list_del_init(&rq->queuelist);
1019 rq->mq_ctx = ctx;
Christoph Hellwig72a0a362014-02-07 10:22:36 -08001020 __blk_mq_insert_request(hctx, rq, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001021 }
1022 spin_unlock(&ctx->lock);
1023
Jens Axboe320ae512013-10-24 09:20:05 +01001024 blk_mq_run_hw_queue(hctx, from_schedule);
Jens Axboee4043dc2014-04-09 10:18:23 -06001025 blk_mq_put_ctx(current_ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001026}
1027
1028static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1029{
1030 struct request *rqa = container_of(a, struct request, queuelist);
1031 struct request *rqb = container_of(b, struct request, queuelist);
1032
1033 return !(rqa->mq_ctx < rqb->mq_ctx ||
1034 (rqa->mq_ctx == rqb->mq_ctx &&
1035 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1036}
1037
1038void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1039{
1040 struct blk_mq_ctx *this_ctx;
1041 struct request_queue *this_q;
1042 struct request *rq;
1043 LIST_HEAD(list);
1044 LIST_HEAD(ctx_list);
1045 unsigned int depth;
1046
1047 list_splice_init(&plug->mq_list, &list);
1048
1049 list_sort(NULL, &list, plug_ctx_cmp);
1050
1051 this_q = NULL;
1052 this_ctx = NULL;
1053 depth = 0;
1054
1055 while (!list_empty(&list)) {
1056 rq = list_entry_rq(list.next);
1057 list_del_init(&rq->queuelist);
1058 BUG_ON(!rq->q);
1059 if (rq->mq_ctx != this_ctx) {
1060 if (this_ctx) {
1061 blk_mq_insert_requests(this_q, this_ctx,
1062 &ctx_list, depth,
1063 from_schedule);
1064 }
1065
1066 this_ctx = rq->mq_ctx;
1067 this_q = rq->q;
1068 depth = 0;
1069 }
1070
1071 depth++;
1072 list_add_tail(&rq->queuelist, &ctx_list);
1073 }
1074
1075 /*
1076 * If 'this_ctx' is set, we know we have entries to complete
1077 * on 'ctx_list'. Do those.
1078 */
1079 if (this_ctx) {
1080 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1081 from_schedule);
1082 }
1083}
1084
1085static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1086{
1087 init_request_from_bio(rq, bio);
Jens Axboe4b570522014-05-29 11:00:11 -06001088
Jens Axboe3ee32372014-06-09 09:36:53 -06001089 if (blk_do_io_stat(rq))
Jens Axboe4b570522014-05-29 11:00:11 -06001090 blk_account_io_start(rq, 1);
Jens Axboe320ae512013-10-24 09:20:05 +01001091}
1092
Jens Axboe07068d52014-05-22 10:40:51 -06001093static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1094 struct blk_mq_ctx *ctx,
1095 struct request *rq, struct bio *bio)
1096{
1097 struct request_queue *q = hctx->queue;
1098
1099 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
1100 blk_mq_bio_to_request(rq, bio);
1101 spin_lock(&ctx->lock);
1102insert_rq:
1103 __blk_mq_insert_request(hctx, rq, false);
1104 spin_unlock(&ctx->lock);
1105 return false;
1106 } else {
1107 spin_lock(&ctx->lock);
1108 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1109 blk_mq_bio_to_request(rq, bio);
1110 goto insert_rq;
1111 }
1112
1113 spin_unlock(&ctx->lock);
1114 __blk_mq_free_request(hctx, ctx, rq);
1115 return true;
1116 }
1117}
1118
1119struct blk_map_ctx {
1120 struct blk_mq_hw_ctx *hctx;
1121 struct blk_mq_ctx *ctx;
1122};
1123
1124static struct request *blk_mq_map_request(struct request_queue *q,
1125 struct bio *bio,
1126 struct blk_map_ctx *data)
Jens Axboe320ae512013-10-24 09:20:05 +01001127{
1128 struct blk_mq_hw_ctx *hctx;
1129 struct blk_mq_ctx *ctx;
Jens Axboe320ae512013-10-24 09:20:05 +01001130 struct request *rq;
Jens Axboe07068d52014-05-22 10:40:51 -06001131 int rw = bio_data_dir(bio);
Ming Leicb96a422014-06-01 00:43:37 +08001132 struct blk_mq_alloc_data alloc_data;
Jens Axboe320ae512013-10-24 09:20:05 +01001133
Jens Axboe07068d52014-05-22 10:40:51 -06001134 if (unlikely(blk_mq_queue_enter(q))) {
Nicholas Bellinger14ec77f2014-02-07 13:45:39 -07001135 bio_endio(bio, -EIO);
Jens Axboe07068d52014-05-22 10:40:51 -06001136 return NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01001137 }
1138
1139 ctx = blk_mq_get_ctx(q);
1140 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1141
Jens Axboe07068d52014-05-22 10:40:51 -06001142 if (rw_is_sync(bio->bi_rw))
Shaohua Li27fbf4e2014-02-19 20:20:21 +08001143 rw |= REQ_SYNC;
Jens Axboe07068d52014-05-22 10:40:51 -06001144
Jens Axboe320ae512013-10-24 09:20:05 +01001145 trace_block_getrq(q, bio, rw);
Ming Leicb96a422014-06-01 00:43:37 +08001146 blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1147 hctx);
1148 rq = __blk_mq_alloc_request(&alloc_data, rw);
Christoph Hellwig5dee8572014-05-27 20:59:47 +02001149 if (unlikely(!rq)) {
Christoph Hellwig793597a2014-05-27 20:59:49 +02001150 __blk_mq_run_hw_queue(hctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001151 blk_mq_put_ctx(ctx);
1152 trace_block_sleeprq(q, bio, rw);
Christoph Hellwig793597a2014-05-27 20:59:49 +02001153
1154 ctx = blk_mq_get_ctx(q);
Jens Axboe320ae512013-10-24 09:20:05 +01001155 hctx = q->mq_ops->map_queue(q, ctx->cpu);
Ming Leicb96a422014-06-01 00:43:37 +08001156 blk_mq_set_alloc_data(&alloc_data, q,
1157 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1158 rq = __blk_mq_alloc_request(&alloc_data, rw);
1159 ctx = alloc_data.ctx;
1160 hctx = alloc_data.hctx;
Jens Axboe320ae512013-10-24 09:20:05 +01001161 }
1162
1163 hctx->queued++;
Jens Axboe07068d52014-05-22 10:40:51 -06001164 data->hctx = hctx;
1165 data->ctx = ctx;
1166 return rq;
1167}
1168
1169/*
1170 * Multiple hardware queue variant. This will not use per-process plugs,
1171 * but will attempt to bypass the hctx queueing if we can go straight to
1172 * hardware for SYNC IO.
1173 */
1174static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1175{
1176 const int is_sync = rw_is_sync(bio->bi_rw);
1177 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1178 struct blk_map_ctx data;
1179 struct request *rq;
1180
1181 blk_queue_bounce(q, &bio);
1182
1183 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1184 bio_endio(bio, -EIO);
1185 return;
1186 }
1187
1188 rq = blk_mq_map_request(q, bio, &data);
1189 if (unlikely(!rq))
1190 return;
1191
1192 if (unlikely(is_flush_fua)) {
1193 blk_mq_bio_to_request(rq, bio);
1194 blk_insert_flush(rq);
1195 goto run_queue;
1196 }
1197
1198 if (is_sync) {
1199 int ret;
1200
1201 blk_mq_bio_to_request(rq, bio);
1202 blk_mq_start_request(rq, true);
1203
1204 /*
1205 * For OK queue, we are done. For error, kill it. Any other
1206 * error (busy), just add it to our list as we previously
1207 * would have done
1208 */
1209 ret = q->mq_ops->queue_rq(data.hctx, rq);
1210 if (ret == BLK_MQ_RQ_QUEUE_OK)
1211 goto done;
1212 else {
1213 __blk_mq_requeue_request(rq);
1214
1215 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1216 rq->errors = -EIO;
1217 blk_mq_end_io(rq, rq->errors);
1218 goto done;
1219 }
1220 }
1221 }
1222
1223 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1224 /*
1225 * For a SYNC request, send it to the hardware immediately. For
1226 * an ASYNC request, just ensure that we run it later on. The
1227 * latter allows for merging opportunities and more efficient
1228 * dispatching.
1229 */
1230run_queue:
1231 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1232 }
1233done:
1234 blk_mq_put_ctx(data.ctx);
1235}
1236
1237/*
1238 * Single hardware queue variant. This will attempt to use any per-process
1239 * plug for merging and IO deferral.
1240 */
1241static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1242{
1243 const int is_sync = rw_is_sync(bio->bi_rw);
1244 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1245 unsigned int use_plug, request_count = 0;
1246 struct blk_map_ctx data;
1247 struct request *rq;
1248
1249 /*
1250 * If we have multiple hardware queues, just go directly to
1251 * one of those for sync IO.
1252 */
1253 use_plug = !is_flush_fua && !is_sync;
1254
1255 blk_queue_bounce(q, &bio);
1256
1257 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1258 bio_endio(bio, -EIO);
1259 return;
1260 }
1261
1262 if (use_plug && !blk_queue_nomerges(q) &&
1263 blk_attempt_plug_merge(q, bio, &request_count))
1264 return;
1265
1266 rq = blk_mq_map_request(q, bio, &data);
Jens Axboeff87bce2014-06-03 11:59:49 -06001267 if (unlikely(!rq))
1268 return;
Jens Axboe320ae512013-10-24 09:20:05 +01001269
1270 if (unlikely(is_flush_fua)) {
1271 blk_mq_bio_to_request(rq, bio);
Jens Axboe320ae512013-10-24 09:20:05 +01001272 blk_insert_flush(rq);
1273 goto run_queue;
1274 }
1275
1276 /*
1277 * A task plug currently exists. Since this is completely lockless,
1278 * utilize that to temporarily store requests until the task is
1279 * either done or scheduled away.
1280 */
1281 if (use_plug) {
1282 struct blk_plug *plug = current->plug;
1283
1284 if (plug) {
1285 blk_mq_bio_to_request(rq, bio);
Shaohua Li92f399c2013-10-29 12:01:03 -06001286 if (list_empty(&plug->mq_list))
Jens Axboe320ae512013-10-24 09:20:05 +01001287 trace_block_plug(q);
1288 else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1289 blk_flush_plug_list(plug, false);
1290 trace_block_plug(q);
1291 }
1292 list_add_tail(&rq->queuelist, &plug->mq_list);
Jens Axboe07068d52014-05-22 10:40:51 -06001293 blk_mq_put_ctx(data.ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001294 return;
1295 }
1296 }
1297
Jens Axboe07068d52014-05-22 10:40:51 -06001298 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1299 /*
1300 * For a SYNC request, send it to the hardware immediately. For
1301 * an ASYNC request, just ensure that we run it later on. The
1302 * latter allows for merging opportunities and more efficient
1303 * dispatching.
1304 */
1305run_queue:
1306 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
Jens Axboe320ae512013-10-24 09:20:05 +01001307 }
1308
Jens Axboe07068d52014-05-22 10:40:51 -06001309 blk_mq_put_ctx(data.ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001310}
1311
1312/*
1313 * Default mapping to a software queue, since we use one per CPU.
1314 */
1315struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1316{
1317 return q->queue_hw_ctx[q->mq_map[cpu]];
1318}
1319EXPORT_SYMBOL(blk_mq_map_queue);
1320
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001321static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1322 struct blk_mq_tags *tags, unsigned int hctx_idx)
Jens Axboe320ae512013-10-24 09:20:05 +01001323{
1324 struct page *page;
1325
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001326 if (tags->rqs && set->ops->exit_request) {
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001327 int i;
1328
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001329 for (i = 0; i < tags->nr_tags; i++) {
1330 if (!tags->rqs[i])
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001331 continue;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001332 set->ops->exit_request(set->driver_data, tags->rqs[i],
1333 hctx_idx, i);
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001334 }
1335 }
1336
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001337 while (!list_empty(&tags->page_list)) {
1338 page = list_first_entry(&tags->page_list, struct page, lru);
Dave Hansen67534712014-01-08 20:17:46 -07001339 list_del_init(&page->lru);
Jens Axboe320ae512013-10-24 09:20:05 +01001340 __free_pages(page, page->private);
1341 }
1342
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001343 kfree(tags->rqs);
Jens Axboe320ae512013-10-24 09:20:05 +01001344
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001345 blk_mq_free_tags(tags);
Jens Axboe320ae512013-10-24 09:20:05 +01001346}
1347
1348static size_t order_to_size(unsigned int order)
1349{
Ming Lei4ca08502014-04-19 18:00:18 +08001350 return (size_t)PAGE_SIZE << order;
Jens Axboe320ae512013-10-24 09:20:05 +01001351}
1352
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001353static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1354 unsigned int hctx_idx)
Jens Axboe320ae512013-10-24 09:20:05 +01001355{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001356 struct blk_mq_tags *tags;
Jens Axboe320ae512013-10-24 09:20:05 +01001357 unsigned int i, j, entries_per_page, max_order = 4;
1358 size_t rq_size, left;
1359
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001360 tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1361 set->numa_node);
1362 if (!tags)
1363 return NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01001364
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001365 INIT_LIST_HEAD(&tags->page_list);
1366
1367 tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
1368 GFP_KERNEL, set->numa_node);
1369 if (!tags->rqs) {
1370 blk_mq_free_tags(tags);
1371 return NULL;
1372 }
Jens Axboe320ae512013-10-24 09:20:05 +01001373
1374 /*
1375 * rq_size is the size of the request plus driver payload, rounded
1376 * to the cacheline size
1377 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001378 rq_size = round_up(sizeof(struct request) + set->cmd_size,
Jens Axboe320ae512013-10-24 09:20:05 +01001379 cache_line_size());
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001380 left = rq_size * set->queue_depth;
Jens Axboe320ae512013-10-24 09:20:05 +01001381
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001382 for (i = 0; i < set->queue_depth; ) {
Jens Axboe320ae512013-10-24 09:20:05 +01001383 int this_order = max_order;
1384 struct page *page;
1385 int to_do;
1386 void *p;
1387
1388 while (left < order_to_size(this_order - 1) && this_order)
1389 this_order--;
1390
1391 do {
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001392 page = alloc_pages_node(set->numa_node, GFP_KERNEL,
1393 this_order);
Jens Axboe320ae512013-10-24 09:20:05 +01001394 if (page)
1395 break;
1396 if (!this_order--)
1397 break;
1398 if (order_to_size(this_order) < rq_size)
1399 break;
1400 } while (1);
1401
1402 if (!page)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001403 goto fail;
Jens Axboe320ae512013-10-24 09:20:05 +01001404
1405 page->private = this_order;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001406 list_add_tail(&page->lru, &tags->page_list);
Jens Axboe320ae512013-10-24 09:20:05 +01001407
1408 p = page_address(page);
1409 entries_per_page = order_to_size(this_order) / rq_size;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001410 to_do = min(entries_per_page, set->queue_depth - i);
Jens Axboe320ae512013-10-24 09:20:05 +01001411 left -= to_do * rq_size;
1412 for (j = 0; j < to_do; j++) {
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001413 tags->rqs[i] = p;
1414 if (set->ops->init_request) {
1415 if (set->ops->init_request(set->driver_data,
1416 tags->rqs[i], hctx_idx, i,
1417 set->numa_node))
1418 goto fail;
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001419 }
1420
Jens Axboe320ae512013-10-24 09:20:05 +01001421 p += rq_size;
1422 i++;
1423 }
1424 }
1425
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001426 return tags;
Jens Axboe320ae512013-10-24 09:20:05 +01001427
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001428fail:
1429 pr_warn("%s: failed to allocate requests\n", __func__);
1430 blk_mq_free_rq_map(set, tags, hctx_idx);
1431 return NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01001432}
1433
Jens Axboe1429d7c2014-05-19 09:23:55 -06001434static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1435{
1436 kfree(bitmap->map);
1437}
1438
1439static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1440{
1441 unsigned int bpw = 8, total, num_maps, i;
1442
1443 bitmap->bits_per_word = bpw;
1444
1445 num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1446 bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1447 GFP_KERNEL, node);
1448 if (!bitmap->map)
1449 return -ENOMEM;
1450
1451 bitmap->map_size = num_maps;
1452
1453 total = nr_cpu_ids;
1454 for (i = 0; i < num_maps; i++) {
1455 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1456 total -= bitmap->map[i].depth;
1457 }
1458
1459 return 0;
1460}
1461
Jens Axboe484b4062014-05-21 14:01:15 -06001462static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1463{
1464 struct request_queue *q = hctx->queue;
1465 struct blk_mq_ctx *ctx;
1466 LIST_HEAD(tmp);
1467
1468 /*
1469 * Move ctx entries to new CPU, if this one is going away.
1470 */
1471 ctx = __blk_mq_get_ctx(q, cpu);
1472
1473 spin_lock(&ctx->lock);
1474 if (!list_empty(&ctx->rq_list)) {
1475 list_splice_init(&ctx->rq_list, &tmp);
1476 blk_mq_hctx_clear_pending(hctx, ctx);
1477 }
1478 spin_unlock(&ctx->lock);
1479
1480 if (list_empty(&tmp))
1481 return NOTIFY_OK;
1482
1483 ctx = blk_mq_get_ctx(q);
1484 spin_lock(&ctx->lock);
1485
1486 while (!list_empty(&tmp)) {
1487 struct request *rq;
1488
1489 rq = list_first_entry(&tmp, struct request, queuelist);
1490 rq->mq_ctx = ctx;
1491 list_move_tail(&rq->queuelist, &ctx->rq_list);
1492 }
1493
1494 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1495 blk_mq_hctx_mark_pending(hctx, ctx);
1496
1497 spin_unlock(&ctx->lock);
1498
1499 blk_mq_run_hw_queue(hctx, true);
1500 blk_mq_put_ctx(ctx);
1501 return NOTIFY_OK;
1502}
1503
1504static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1505{
1506 struct request_queue *q = hctx->queue;
1507 struct blk_mq_tag_set *set = q->tag_set;
1508
1509 if (set->tags[hctx->queue_num])
1510 return NOTIFY_OK;
1511
1512 set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1513 if (!set->tags[hctx->queue_num])
1514 return NOTIFY_STOP;
1515
1516 hctx->tags = set->tags[hctx->queue_num];
1517 return NOTIFY_OK;
1518}
1519
1520static int blk_mq_hctx_notify(void *data, unsigned long action,
1521 unsigned int cpu)
1522{
1523 struct blk_mq_hw_ctx *hctx = data;
1524
1525 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1526 return blk_mq_hctx_cpu_offline(hctx, cpu);
1527 else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1528 return blk_mq_hctx_cpu_online(hctx, cpu);
1529
1530 return NOTIFY_OK;
1531}
1532
Ming Lei624dbe42014-05-27 23:35:13 +08001533static void blk_mq_exit_hw_queues(struct request_queue *q,
1534 struct blk_mq_tag_set *set, int nr_queue)
1535{
1536 struct blk_mq_hw_ctx *hctx;
1537 unsigned int i;
1538
1539 queue_for_each_hw_ctx(q, hctx, i) {
1540 if (i == nr_queue)
1541 break;
1542
Jens Axboef899fed2014-06-04 09:11:53 -06001543 blk_mq_tag_idle(hctx);
1544
Ming Lei624dbe42014-05-27 23:35:13 +08001545 if (set->ops->exit_hctx)
1546 set->ops->exit_hctx(hctx, i);
1547
1548 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1549 kfree(hctx->ctxs);
1550 blk_mq_free_bitmap(&hctx->ctx_map);
1551 }
1552
1553}
1554
1555static void blk_mq_free_hw_queues(struct request_queue *q,
1556 struct blk_mq_tag_set *set)
1557{
1558 struct blk_mq_hw_ctx *hctx;
1559 unsigned int i;
1560
1561 queue_for_each_hw_ctx(q, hctx, i) {
1562 free_cpumask_var(hctx->cpumask);
Christoph Hellwigcdef54d2014-05-28 18:11:06 +02001563 kfree(hctx);
Ming Lei624dbe42014-05-27 23:35:13 +08001564 }
1565}
1566
Jens Axboe320ae512013-10-24 09:20:05 +01001567static int blk_mq_init_hw_queues(struct request_queue *q,
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001568 struct blk_mq_tag_set *set)
Jens Axboe320ae512013-10-24 09:20:05 +01001569{
1570 struct blk_mq_hw_ctx *hctx;
Ming Lei624dbe42014-05-27 23:35:13 +08001571 unsigned int i;
Jens Axboe320ae512013-10-24 09:20:05 +01001572
1573 /*
1574 * Initialize hardware queues
1575 */
1576 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe320ae512013-10-24 09:20:05 +01001577 int node;
1578
1579 node = hctx->numa_node;
1580 if (node == NUMA_NO_NODE)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001581 node = hctx->numa_node = set->numa_node;
Jens Axboe320ae512013-10-24 09:20:05 +01001582
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001583 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1584 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
Jens Axboe320ae512013-10-24 09:20:05 +01001585 spin_lock_init(&hctx->lock);
1586 INIT_LIST_HEAD(&hctx->dispatch);
1587 hctx->queue = q;
1588 hctx->queue_num = i;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001589 hctx->flags = set->flags;
1590 hctx->cmd_size = set->cmd_size;
Jens Axboe320ae512013-10-24 09:20:05 +01001591
1592 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1593 blk_mq_hctx_notify, hctx);
1594 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1595
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001596 hctx->tags = set->tags[i];
Jens Axboe320ae512013-10-24 09:20:05 +01001597
1598 /*
1599 * Allocate space for all possible cpus to avoid allocation in
1600 * runtime
1601 */
1602 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1603 GFP_KERNEL, node);
1604 if (!hctx->ctxs)
1605 break;
1606
Jens Axboe1429d7c2014-05-19 09:23:55 -06001607 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
Jens Axboe320ae512013-10-24 09:20:05 +01001608 break;
1609
Jens Axboe320ae512013-10-24 09:20:05 +01001610 hctx->nr_ctx = 0;
1611
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001612 if (set->ops->init_hctx &&
1613 set->ops->init_hctx(hctx, set->driver_data, i))
Jens Axboe320ae512013-10-24 09:20:05 +01001614 break;
1615 }
1616
1617 if (i == q->nr_hw_queues)
1618 return 0;
1619
1620 /*
1621 * Init failed
1622 */
Ming Lei624dbe42014-05-27 23:35:13 +08001623 blk_mq_exit_hw_queues(q, set, i);
Jens Axboe320ae512013-10-24 09:20:05 +01001624
1625 return 1;
1626}
1627
1628static void blk_mq_init_cpu_queues(struct request_queue *q,
1629 unsigned int nr_hw_queues)
1630{
1631 unsigned int i;
1632
1633 for_each_possible_cpu(i) {
1634 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1635 struct blk_mq_hw_ctx *hctx;
1636
1637 memset(__ctx, 0, sizeof(*__ctx));
1638 __ctx->cpu = i;
1639 spin_lock_init(&__ctx->lock);
1640 INIT_LIST_HEAD(&__ctx->rq_list);
1641 __ctx->queue = q;
1642
1643 /* If the cpu isn't online, the cpu is mapped to first hctx */
Jens Axboe320ae512013-10-24 09:20:05 +01001644 if (!cpu_online(i))
1645 continue;
1646
Jens Axboee4043dc2014-04-09 10:18:23 -06001647 hctx = q->mq_ops->map_queue(q, i);
1648 cpumask_set_cpu(i, hctx->cpumask);
1649 hctx->nr_ctx++;
1650
Jens Axboe320ae512013-10-24 09:20:05 +01001651 /*
1652 * Set local node, IFF we have more than one hw queue. If
1653 * not, we remain on the home node of the device
1654 */
1655 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1656 hctx->numa_node = cpu_to_node(i);
1657 }
1658}
1659
1660static void blk_mq_map_swqueue(struct request_queue *q)
1661{
1662 unsigned int i;
1663 struct blk_mq_hw_ctx *hctx;
1664 struct blk_mq_ctx *ctx;
1665
1666 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboee4043dc2014-04-09 10:18:23 -06001667 cpumask_clear(hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01001668 hctx->nr_ctx = 0;
1669 }
1670
1671 /*
1672 * Map software to hardware queues
1673 */
1674 queue_for_each_ctx(q, ctx, i) {
1675 /* If the cpu isn't online, the cpu is mapped to first hctx */
Jens Axboee4043dc2014-04-09 10:18:23 -06001676 if (!cpu_online(i))
1677 continue;
1678
Jens Axboe320ae512013-10-24 09:20:05 +01001679 hctx = q->mq_ops->map_queue(q, i);
Jens Axboee4043dc2014-04-09 10:18:23 -06001680 cpumask_set_cpu(i, hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01001681 ctx->index_hw = hctx->nr_ctx;
1682 hctx->ctxs[hctx->nr_ctx++] = ctx;
1683 }
Jens Axboe506e9312014-05-07 10:26:44 -06001684
1685 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe484b4062014-05-21 14:01:15 -06001686 /*
1687 * If not software queues are mapped to this hardware queue,
1688 * disable it and free the request entries
1689 */
1690 if (!hctx->nr_ctx) {
1691 struct blk_mq_tag_set *set = q->tag_set;
1692
1693 if (set->tags[i]) {
1694 blk_mq_free_rq_map(set, set->tags[i], i);
1695 set->tags[i] = NULL;
1696 hctx->tags = NULL;
1697 }
1698 continue;
1699 }
1700
1701 /*
1702 * Initialize batch roundrobin counts
1703 */
Jens Axboe506e9312014-05-07 10:26:44 -06001704 hctx->next_cpu = cpumask_first(hctx->cpumask);
1705 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1706 }
Jens Axboe320ae512013-10-24 09:20:05 +01001707}
1708
Jens Axboe0d2602c2014-05-13 15:10:52 -06001709static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1710{
1711 struct blk_mq_hw_ctx *hctx;
1712 struct request_queue *q;
1713 bool shared;
1714 int i;
1715
1716 if (set->tag_list.next == set->tag_list.prev)
1717 shared = false;
1718 else
1719 shared = true;
1720
1721 list_for_each_entry(q, &set->tag_list, tag_set_list) {
1722 blk_mq_freeze_queue(q);
1723
1724 queue_for_each_hw_ctx(q, hctx, i) {
1725 if (shared)
1726 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1727 else
1728 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1729 }
1730 blk_mq_unfreeze_queue(q);
1731 }
1732}
1733
1734static void blk_mq_del_queue_tag_set(struct request_queue *q)
1735{
1736 struct blk_mq_tag_set *set = q->tag_set;
1737
1738 blk_mq_freeze_queue(q);
1739
1740 mutex_lock(&set->tag_list_lock);
1741 list_del_init(&q->tag_set_list);
1742 blk_mq_update_tag_set_depth(set);
1743 mutex_unlock(&set->tag_list_lock);
1744
1745 blk_mq_unfreeze_queue(q);
1746}
1747
1748static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1749 struct request_queue *q)
1750{
1751 q->tag_set = set;
1752
1753 mutex_lock(&set->tag_list_lock);
1754 list_add_tail(&q->tag_set_list, &set->tag_list);
1755 blk_mq_update_tag_set_depth(set);
1756 mutex_unlock(&set->tag_list_lock);
1757}
1758
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001759struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
Jens Axboe320ae512013-10-24 09:20:05 +01001760{
1761 struct blk_mq_hw_ctx **hctxs;
Ming Leie6cdb092014-06-03 11:24:06 +08001762 struct blk_mq_ctx __percpu *ctx;
Jens Axboe320ae512013-10-24 09:20:05 +01001763 struct request_queue *q;
Jens Axboef14bbe72014-05-27 12:06:53 -06001764 unsigned int *map;
Jens Axboe320ae512013-10-24 09:20:05 +01001765 int i;
1766
Jens Axboe320ae512013-10-24 09:20:05 +01001767 ctx = alloc_percpu(struct blk_mq_ctx);
1768 if (!ctx)
1769 return ERR_PTR(-ENOMEM);
1770
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001771 hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1772 set->numa_node);
Jens Axboe320ae512013-10-24 09:20:05 +01001773
1774 if (!hctxs)
1775 goto err_percpu;
1776
Jens Axboef14bbe72014-05-27 12:06:53 -06001777 map = blk_mq_make_queue_map(set);
1778 if (!map)
1779 goto err_map;
1780
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001781 for (i = 0; i < set->nr_hw_queues; i++) {
Jens Axboef14bbe72014-05-27 12:06:53 -06001782 int node = blk_mq_hw_queue_to_node(map, i);
1783
Christoph Hellwigcdef54d2014-05-28 18:11:06 +02001784 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1785 GFP_KERNEL, node);
Jens Axboe320ae512013-10-24 09:20:05 +01001786 if (!hctxs[i])
1787 goto err_hctxs;
1788
Jens Axboee4043dc2014-04-09 10:18:23 -06001789 if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
1790 goto err_hctxs;
1791
Jens Axboe0d2602c2014-05-13 15:10:52 -06001792 atomic_set(&hctxs[i]->nr_active, 0);
Jens Axboef14bbe72014-05-27 12:06:53 -06001793 hctxs[i]->numa_node = node;
Jens Axboe320ae512013-10-24 09:20:05 +01001794 hctxs[i]->queue_num = i;
1795 }
1796
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001797 q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
Jens Axboe320ae512013-10-24 09:20:05 +01001798 if (!q)
1799 goto err_hctxs;
1800
Ming Lei3d2936f2014-05-27 23:35:14 +08001801 if (percpu_counter_init(&q->mq_usage_counter, 0))
1802 goto err_map;
1803
Jens Axboe320ae512013-10-24 09:20:05 +01001804 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1805 blk_queue_rq_timeout(q, 30000);
1806
1807 q->nr_queues = nr_cpu_ids;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001808 q->nr_hw_queues = set->nr_hw_queues;
Jens Axboef14bbe72014-05-27 12:06:53 -06001809 q->mq_map = map;
Jens Axboe320ae512013-10-24 09:20:05 +01001810
1811 q->queue_ctx = ctx;
1812 q->queue_hw_ctx = hctxs;
1813
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001814 q->mq_ops = set->ops;
Jens Axboe94eddfb2013-11-19 09:25:07 -07001815 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
Jens Axboe320ae512013-10-24 09:20:05 +01001816
Jens Axboe05f1dd52014-05-29 09:53:32 -06001817 if (!(set->flags & BLK_MQ_F_SG_MERGE))
1818 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1819
Christoph Hellwig1be036e2014-02-07 10:22:39 -08001820 q->sg_reserved_size = INT_MAX;
1821
Christoph Hellwig6fca6a62014-05-28 08:08:02 -06001822 INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1823 INIT_LIST_HEAD(&q->requeue_list);
1824 spin_lock_init(&q->requeue_lock);
1825
Jens Axboe07068d52014-05-22 10:40:51 -06001826 if (q->nr_hw_queues > 1)
1827 blk_queue_make_request(q, blk_mq_make_request);
1828 else
1829 blk_queue_make_request(q, blk_sq_make_request);
1830
Jens Axboe87ee7b12014-04-24 08:51:47 -06001831 blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001832 if (set->timeout)
1833 blk_queue_rq_timeout(q, set->timeout);
Jens Axboe320ae512013-10-24 09:20:05 +01001834
Jens Axboeeba71762014-05-20 15:17:27 -06001835 /*
1836 * Do this after blk_queue_make_request() overrides it...
1837 */
1838 q->nr_requests = set->queue_depth;
1839
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001840 if (set->ops->complete)
1841 blk_queue_softirq_done(q, set->ops->complete);
Christoph Hellwig30a91cb2014-02-10 03:24:38 -08001842
Jens Axboe320ae512013-10-24 09:20:05 +01001843 blk_mq_init_flush(q);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001844 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01001845
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001846 q->flush_rq = kzalloc(round_up(sizeof(struct request) +
1847 set->cmd_size, cache_line_size()),
1848 GFP_KERNEL);
Christoph Hellwig18741982014-02-10 09:29:00 -07001849 if (!q->flush_rq)
Jens Axboe320ae512013-10-24 09:20:05 +01001850 goto err_hw;
1851
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001852 if (blk_mq_init_hw_queues(q, set))
Christoph Hellwig18741982014-02-10 09:29:00 -07001853 goto err_flush_rq;
1854
Jens Axboe320ae512013-10-24 09:20:05 +01001855 mutex_lock(&all_q_mutex);
1856 list_add_tail(&q->all_q_node, &all_q_list);
1857 mutex_unlock(&all_q_mutex);
1858
Jens Axboe0d2602c2014-05-13 15:10:52 -06001859 blk_mq_add_queue_tag_set(set, q);
1860
Jens Axboe484b4062014-05-21 14:01:15 -06001861 blk_mq_map_swqueue(q);
1862
Jens Axboe320ae512013-10-24 09:20:05 +01001863 return q;
Christoph Hellwig18741982014-02-10 09:29:00 -07001864
1865err_flush_rq:
1866 kfree(q->flush_rq);
Jens Axboe320ae512013-10-24 09:20:05 +01001867err_hw:
Jens Axboe320ae512013-10-24 09:20:05 +01001868 blk_cleanup_queue(q);
1869err_hctxs:
Jens Axboef14bbe72014-05-27 12:06:53 -06001870 kfree(map);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001871 for (i = 0; i < set->nr_hw_queues; i++) {
Jens Axboe320ae512013-10-24 09:20:05 +01001872 if (!hctxs[i])
1873 break;
Jens Axboee4043dc2014-04-09 10:18:23 -06001874 free_cpumask_var(hctxs[i]->cpumask);
Christoph Hellwigcdef54d2014-05-28 18:11:06 +02001875 kfree(hctxs[i]);
Jens Axboe320ae512013-10-24 09:20:05 +01001876 }
Jens Axboef14bbe72014-05-27 12:06:53 -06001877err_map:
Jens Axboe320ae512013-10-24 09:20:05 +01001878 kfree(hctxs);
1879err_percpu:
1880 free_percpu(ctx);
1881 return ERR_PTR(-ENOMEM);
1882}
1883EXPORT_SYMBOL(blk_mq_init_queue);
1884
1885void blk_mq_free_queue(struct request_queue *q)
1886{
Ming Lei624dbe42014-05-27 23:35:13 +08001887 struct blk_mq_tag_set *set = q->tag_set;
Jens Axboe320ae512013-10-24 09:20:05 +01001888
Jens Axboe0d2602c2014-05-13 15:10:52 -06001889 blk_mq_del_queue_tag_set(q);
1890
Ming Lei624dbe42014-05-27 23:35:13 +08001891 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1892 blk_mq_free_hw_queues(q, set);
Jens Axboe320ae512013-10-24 09:20:05 +01001893
Ming Lei3d2936f2014-05-27 23:35:14 +08001894 percpu_counter_destroy(&q->mq_usage_counter);
1895
Jens Axboe320ae512013-10-24 09:20:05 +01001896 free_percpu(q->queue_ctx);
1897 kfree(q->queue_hw_ctx);
1898 kfree(q->mq_map);
1899
1900 q->queue_ctx = NULL;
1901 q->queue_hw_ctx = NULL;
1902 q->mq_map = NULL;
1903
1904 mutex_lock(&all_q_mutex);
1905 list_del_init(&q->all_q_node);
1906 mutex_unlock(&all_q_mutex);
1907}
Jens Axboe320ae512013-10-24 09:20:05 +01001908
1909/* Basically redo blk_mq_init_queue with queue frozen */
Paul Gortmakerf618ef72013-11-14 08:26:02 -07001910static void blk_mq_queue_reinit(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +01001911{
1912 blk_mq_freeze_queue(q);
1913
Jens Axboe67aec142014-05-30 08:25:36 -06001914 blk_mq_sysfs_unregister(q);
1915
Jens Axboe320ae512013-10-24 09:20:05 +01001916 blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1917
1918 /*
1919 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1920 * we should change hctx numa_node according to new topology (this
1921 * involves free and re-allocate memory, worthy doing?)
1922 */
1923
1924 blk_mq_map_swqueue(q);
1925
Jens Axboe67aec142014-05-30 08:25:36 -06001926 blk_mq_sysfs_register(q);
1927
Jens Axboe320ae512013-10-24 09:20:05 +01001928 blk_mq_unfreeze_queue(q);
1929}
1930
Paul Gortmakerf618ef72013-11-14 08:26:02 -07001931static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1932 unsigned long action, void *hcpu)
Jens Axboe320ae512013-10-24 09:20:05 +01001933{
1934 struct request_queue *q;
1935
1936 /*
Jens Axboe9fccfed2014-05-08 14:50:19 -06001937 * Before new mappings are established, hotadded cpu might already
1938 * start handling requests. This doesn't break anything as we map
1939 * offline CPUs to first hardware queue. We will re-init the queue
1940 * below to get optimal settings.
Jens Axboe320ae512013-10-24 09:20:05 +01001941 */
1942 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1943 action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1944 return NOTIFY_OK;
1945
1946 mutex_lock(&all_q_mutex);
1947 list_for_each_entry(q, &all_q_list, all_q_node)
1948 blk_mq_queue_reinit(q);
1949 mutex_unlock(&all_q_mutex);
1950 return NOTIFY_OK;
1951}
1952
Jens Axboea4391c62014-06-05 15:21:56 -06001953/*
1954 * Alloc a tag set to be associated with one or more request queues.
1955 * May fail with EINVAL for various error conditions. May adjust the
1956 * requested depth down, if if it too large. In that case, the set
1957 * value will be stored in set->queue_depth.
1958 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001959int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1960{
1961 int i;
1962
1963 if (!set->nr_hw_queues)
1964 return -EINVAL;
Jens Axboea4391c62014-06-05 15:21:56 -06001965 if (!set->queue_depth)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001966 return -EINVAL;
1967 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
1968 return -EINVAL;
1969
Christoph Hellwigcdef54d2014-05-28 18:11:06 +02001970 if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001971 return -EINVAL;
1972
Jens Axboea4391c62014-06-05 15:21:56 -06001973 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
1974 pr_info("blk-mq: reduced tag depth to %u\n",
1975 BLK_MQ_MAX_DEPTH);
1976 set->queue_depth = BLK_MQ_MAX_DEPTH;
1977 }
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001978
Ming Lei48479002014-04-19 18:00:17 +08001979 set->tags = kmalloc_node(set->nr_hw_queues *
1980 sizeof(struct blk_mq_tags *),
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001981 GFP_KERNEL, set->numa_node);
1982 if (!set->tags)
1983 goto out;
1984
1985 for (i = 0; i < set->nr_hw_queues; i++) {
1986 set->tags[i] = blk_mq_init_rq_map(set, i);
1987 if (!set->tags[i])
1988 goto out_unwind;
1989 }
1990
Jens Axboe0d2602c2014-05-13 15:10:52 -06001991 mutex_init(&set->tag_list_lock);
1992 INIT_LIST_HEAD(&set->tag_list);
1993
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001994 return 0;
1995
1996out_unwind:
1997 while (--i >= 0)
1998 blk_mq_free_rq_map(set, set->tags[i], i);
1999out:
2000 return -ENOMEM;
2001}
2002EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2003
2004void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2005{
2006 int i;
2007
Jens Axboe484b4062014-05-21 14:01:15 -06002008 for (i = 0; i < set->nr_hw_queues; i++) {
2009 if (set->tags[i])
2010 blk_mq_free_rq_map(set, set->tags[i], i);
2011 }
2012
Ming Lei981bd182014-04-24 00:07:34 +08002013 kfree(set->tags);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002014}
2015EXPORT_SYMBOL(blk_mq_free_tag_set);
2016
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002017int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2018{
2019 struct blk_mq_tag_set *set = q->tag_set;
2020 struct blk_mq_hw_ctx *hctx;
2021 int i, ret;
2022
2023 if (!set || nr > set->queue_depth)
2024 return -EINVAL;
2025
2026 ret = 0;
2027 queue_for_each_hw_ctx(q, hctx, i) {
2028 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2029 if (ret)
2030 break;
2031 }
2032
2033 if (!ret)
2034 q->nr_requests = nr;
2035
2036 return ret;
2037}
2038
Jens Axboe676141e2014-03-20 13:29:18 -06002039void blk_mq_disable_hotplug(void)
2040{
2041 mutex_lock(&all_q_mutex);
2042}
2043
2044void blk_mq_enable_hotplug(void)
2045{
2046 mutex_unlock(&all_q_mutex);
2047}
2048
Jens Axboe320ae512013-10-24 09:20:05 +01002049static int __init blk_mq_init(void)
2050{
Jens Axboe320ae512013-10-24 09:20:05 +01002051 blk_mq_cpu_init();
2052
2053 /* Must be called after percpu_counter_hotcpu_callback() */
2054 hotcpu_notifier(blk_mq_queue_reinit_notify, -10);
2055
2056 return 0;
2057}
2058subsys_initcall(blk_mq_init);