blob: 103aa1dbc000441129b629a57edb4af0acaa3ee6 [file] [log] [blame]
Jens Axboe320ae512013-10-24 09:20:05 +01001#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/backing-dev.h>
4#include <linux/bio.h>
5#include <linux/blkdev.h>
6#include <linux/mm.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/workqueue.h>
10#include <linux/smp.h>
11#include <linux/llist.h>
12#include <linux/list_sort.h>
13#include <linux/cpu.h>
14#include <linux/cache.h>
15#include <linux/sched/sysctl.h>
16#include <linux/delay.h>
17
18#include <trace/events/block.h>
19
20#include <linux/blk-mq.h>
21#include "blk.h"
22#include "blk-mq.h"
23#include "blk-mq-tag.h"
24
25static DEFINE_MUTEX(all_q_mutex);
26static LIST_HEAD(all_q_list);
27
28static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
29
Jens Axboe320ae512013-10-24 09:20:05 +010030static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
31 unsigned int cpu)
32{
33 return per_cpu_ptr(q->queue_ctx, cpu);
34}
35
36/*
37 * This assumes per-cpu software queueing queues. They could be per-node
38 * as well, for instance. For now this is hardcoded as-is. Note that we don't
39 * care about preemption, since we know the ctx's are persistent. This does
40 * mean that we can't rely on ctx always matching the currently running CPU.
41 */
42static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
43{
44 return __blk_mq_get_ctx(q, get_cpu());
45}
46
47static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
48{
49 put_cpu();
50}
51
52/*
53 * Check if any of the ctx's have pending work in this hardware queue
54 */
55static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
56{
57 unsigned int i;
58
Jens Axboe1429d7c2014-05-19 09:23:55 -060059 for (i = 0; i < hctx->ctx_map.map_size; i++)
60 if (hctx->ctx_map.map[i].word)
Jens Axboe320ae512013-10-24 09:20:05 +010061 return true;
62
63 return false;
64}
65
Jens Axboe1429d7c2014-05-19 09:23:55 -060066static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
67 struct blk_mq_ctx *ctx)
68{
69 return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
70}
71
72#define CTX_TO_BIT(hctx, ctx) \
73 ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
74
Jens Axboe320ae512013-10-24 09:20:05 +010075/*
76 * Mark this ctx as having pending work in this hardware queue
77 */
78static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
79 struct blk_mq_ctx *ctx)
80{
Jens Axboe1429d7c2014-05-19 09:23:55 -060081 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
82
83 if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
84 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
85}
86
87static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
88 struct blk_mq_ctx *ctx)
89{
90 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
91
92 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
Jens Axboe320ae512013-10-24 09:20:05 +010093}
94
Christoph Hellwig081241e2014-02-20 15:32:36 -080095static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
Jens Axboe4bb659b2014-05-09 09:36:49 -060096 struct blk_mq_ctx *ctx,
Christoph Hellwig081241e2014-02-20 15:32:36 -080097 gfp_t gfp, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +010098{
99 struct request *rq;
100 unsigned int tag;
101
Jens Axboe0d2602c2014-05-13 15:10:52 -0600102 tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved);
Jens Axboe320ae512013-10-24 09:20:05 +0100103 if (tag != BLK_MQ_TAG_FAIL) {
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600104 rq = hctx->tags->rqs[tag];
Jens Axboe0d2602c2014-05-13 15:10:52 -0600105
106 rq->cmd_flags = 0;
107 if (blk_mq_tag_busy(hctx)) {
108 rq->cmd_flags = REQ_MQ_INFLIGHT;
109 atomic_inc(&hctx->nr_active);
110 }
111
Jens Axboe320ae512013-10-24 09:20:05 +0100112 rq->tag = tag;
Jens Axboe320ae512013-10-24 09:20:05 +0100113 return rq;
114 }
115
116 return NULL;
117}
118
119static int blk_mq_queue_enter(struct request_queue *q)
120{
121 int ret;
122
123 __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
124 smp_wmb();
125 /* we have problems to freeze the queue if it's initializing */
126 if (!blk_queue_bypass(q) || !blk_queue_init_done(q))
127 return 0;
128
129 __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
130
131 spin_lock_irq(q->queue_lock);
132 ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
Ming Lei43a5e4e2013-12-26 21:31:35 +0800133 !blk_queue_bypass(q) || blk_queue_dying(q),
134 *q->queue_lock);
Jens Axboe320ae512013-10-24 09:20:05 +0100135 /* inc usage with lock hold to avoid freeze_queue runs here */
Ming Lei43a5e4e2013-12-26 21:31:35 +0800136 if (!ret && !blk_queue_dying(q))
Jens Axboe320ae512013-10-24 09:20:05 +0100137 __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
Ming Lei43a5e4e2013-12-26 21:31:35 +0800138 else if (blk_queue_dying(q))
139 ret = -ENODEV;
Jens Axboe320ae512013-10-24 09:20:05 +0100140 spin_unlock_irq(q->queue_lock);
141
142 return ret;
143}
144
145static void blk_mq_queue_exit(struct request_queue *q)
146{
147 __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
148}
149
Ming Lei43a5e4e2013-12-26 21:31:35 +0800150static void __blk_mq_drain_queue(struct request_queue *q)
151{
152 while (true) {
153 s64 count;
154
155 spin_lock_irq(q->queue_lock);
156 count = percpu_counter_sum(&q->mq_usage_counter);
157 spin_unlock_irq(q->queue_lock);
158
159 if (count == 0)
160 break;
161 blk_mq_run_queues(q, false);
162 msleep(10);
163 }
164}
165
Jens Axboe320ae512013-10-24 09:20:05 +0100166/*
167 * Guarantee no request is in use, so we can change any data structure of
168 * the queue afterward.
169 */
170static void blk_mq_freeze_queue(struct request_queue *q)
171{
172 bool drain;
173
174 spin_lock_irq(q->queue_lock);
175 drain = !q->bypass_depth++;
176 queue_flag_set(QUEUE_FLAG_BYPASS, q);
177 spin_unlock_irq(q->queue_lock);
178
Ming Lei43a5e4e2013-12-26 21:31:35 +0800179 if (drain)
180 __blk_mq_drain_queue(q);
181}
Jens Axboe320ae512013-10-24 09:20:05 +0100182
Ming Lei43a5e4e2013-12-26 21:31:35 +0800183void blk_mq_drain_queue(struct request_queue *q)
184{
185 __blk_mq_drain_queue(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100186}
187
188static void blk_mq_unfreeze_queue(struct request_queue *q)
189{
190 bool wake = false;
191
192 spin_lock_irq(q->queue_lock);
193 if (!--q->bypass_depth) {
194 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
195 wake = true;
196 }
197 WARN_ON_ONCE(q->bypass_depth < 0);
198 spin_unlock_irq(q->queue_lock);
199 if (wake)
200 wake_up_all(&q->mq_freeze_wq);
201}
202
203bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
204{
205 return blk_mq_has_free_tags(hctx->tags);
206}
207EXPORT_SYMBOL(blk_mq_can_queue);
208
Jens Axboe94eddfb2013-11-19 09:25:07 -0700209static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
210 struct request *rq, unsigned int rw_flags)
Jens Axboe320ae512013-10-24 09:20:05 +0100211{
Jens Axboe94eddfb2013-11-19 09:25:07 -0700212 if (blk_queue_io_stat(q))
213 rw_flags |= REQ_IO_STAT;
214
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200215 INIT_LIST_HEAD(&rq->queuelist);
216 /* csd/requeue_work/fifo_time is initialized before use */
217 rq->q = q;
Jens Axboe320ae512013-10-24 09:20:05 +0100218 rq->mq_ctx = ctx;
Jens Axboe0d2602c2014-05-13 15:10:52 -0600219 rq->cmd_flags |= rw_flags;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200220 rq->cmd_type = 0;
221 /* do not touch atomic flags, it needs atomic ops against the timer */
222 rq->cpu = -1;
223 rq->__data_len = 0;
224 rq->__sector = (sector_t) -1;
225 rq->bio = NULL;
226 rq->biotail = NULL;
227 INIT_HLIST_NODE(&rq->hash);
228 RB_CLEAR_NODE(&rq->rb_node);
229 memset(&rq->flush, 0, max(sizeof(rq->flush), sizeof(rq->elv)));
230 rq->rq_disk = NULL;
231 rq->part = NULL;
Ming Lei0fec08b2014-01-03 10:00:08 -0700232 rq->start_time = jiffies;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200233#ifdef CONFIG_BLK_CGROUP
234 rq->rl = NULL;
Ming Lei0fec08b2014-01-03 10:00:08 -0700235 set_start_time_ns(rq);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200236 rq->io_start_time_ns = 0;
237#endif
238 rq->nr_phys_segments = 0;
239#if defined(CONFIG_BLK_DEV_INTEGRITY)
240 rq->nr_integrity_segments = 0;
241#endif
242 rq->ioprio = 0;
243 rq->special = NULL;
244 /* tag was already set */
245 rq->errors = 0;
246 memset(rq->__cmd, 0, sizeof(rq->__cmd));
247 rq->cmd = rq->__cmd;
248 rq->cmd_len = BLK_MAX_CDB;
249
250 rq->extra_len = 0;
251 rq->sense_len = 0;
252 rq->resid_len = 0;
253 rq->sense = NULL;
254
255 rq->deadline = 0;
256 INIT_LIST_HEAD(&rq->timeout_list);
257 rq->timeout = 0;
258 rq->retries = 0;
259 rq->end_io = NULL;
260 rq->end_io_data = NULL;
261 rq->next_rq = NULL;
262
Jens Axboe320ae512013-10-24 09:20:05 +0100263 ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
264}
265
Jens Axboe320ae512013-10-24 09:20:05 +0100266static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
267 int rw, gfp_t gfp,
268 bool reserved)
269{
270 struct request *rq;
271
272 do {
273 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
274 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
275
Jens Axboe4bb659b2014-05-09 09:36:49 -0600276 rq = __blk_mq_alloc_request(hctx, ctx, gfp & ~__GFP_WAIT,
277 reserved);
Jens Axboe320ae512013-10-24 09:20:05 +0100278 if (rq) {
Jens Axboe94eddfb2013-11-19 09:25:07 -0700279 blk_mq_rq_ctx_init(q, ctx, rq, rw);
Jens Axboe320ae512013-10-24 09:20:05 +0100280 break;
Jeff Moyer959a35f2013-12-03 14:23:00 -0700281 }
Jens Axboe320ae512013-10-24 09:20:05 +0100282
Jens Axboee4043dc2014-04-09 10:18:23 -0600283 if (gfp & __GFP_WAIT) {
284 __blk_mq_run_hw_queue(hctx);
285 blk_mq_put_ctx(ctx);
286 } else {
287 blk_mq_put_ctx(ctx);
Jeff Moyer959a35f2013-12-03 14:23:00 -0700288 break;
Jens Axboee4043dc2014-04-09 10:18:23 -0600289 }
Jeff Moyer959a35f2013-12-03 14:23:00 -0700290
Jens Axboe0d2602c2014-05-13 15:10:52 -0600291 blk_mq_wait_for_tags(hctx, reserved);
Jens Axboe320ae512013-10-24 09:20:05 +0100292 } while (1);
293
294 return rq;
295}
296
Christoph Hellwig18741982014-02-10 09:29:00 -0700297struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
Jens Axboe320ae512013-10-24 09:20:05 +0100298{
299 struct request *rq;
300
301 if (blk_mq_queue_enter(q))
302 return NULL;
303
Christoph Hellwig18741982014-02-10 09:29:00 -0700304 rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
Jeff Moyer959a35f2013-12-03 14:23:00 -0700305 if (rq)
306 blk_mq_put_ctx(rq->mq_ctx);
Jens Axboe320ae512013-10-24 09:20:05 +0100307 return rq;
308}
Jens Axboe4bb659b2014-05-09 09:36:49 -0600309EXPORT_SYMBOL(blk_mq_alloc_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100310
311struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
312 gfp_t gfp)
313{
314 struct request *rq;
315
316 if (blk_mq_queue_enter(q))
317 return NULL;
318
319 rq = blk_mq_alloc_request_pinned(q, rw, gfp, true);
Jeff Moyer959a35f2013-12-03 14:23:00 -0700320 if (rq)
321 blk_mq_put_ctx(rq->mq_ctx);
Jens Axboe320ae512013-10-24 09:20:05 +0100322 return rq;
323}
324EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
325
Jens Axboe320ae512013-10-24 09:20:05 +0100326static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
327 struct blk_mq_ctx *ctx, struct request *rq)
328{
329 const int tag = rq->tag;
330 struct request_queue *q = rq->q;
331
Jens Axboe0d2602c2014-05-13 15:10:52 -0600332 if (rq->cmd_flags & REQ_MQ_INFLIGHT)
333 atomic_dec(&hctx->nr_active);
334
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200335 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600336 blk_mq_put_tag(hctx, tag, &ctx->last_tag);
Jens Axboe320ae512013-10-24 09:20:05 +0100337 blk_mq_queue_exit(q);
338}
339
340void blk_mq_free_request(struct request *rq)
341{
342 struct blk_mq_ctx *ctx = rq->mq_ctx;
343 struct blk_mq_hw_ctx *hctx;
344 struct request_queue *q = rq->q;
345
346 ctx->rq_completed[rq_is_sync(rq)]++;
347
348 hctx = q->mq_ops->map_queue(q, ctx->cpu);
349 __blk_mq_free_request(hctx, ctx, rq);
350}
351
Christoph Hellwig8727af42014-04-14 10:30:08 +0200352/*
353 * Clone all relevant state from a request that has been put on hold in
354 * the flush state machine into the preallocated flush request that hangs
355 * off the request queue.
356 *
357 * For a driver the flush request should be invisible, that's why we are
358 * impersonating the original request here.
359 */
360void blk_mq_clone_flush_request(struct request *flush_rq,
361 struct request *orig_rq)
362{
363 struct blk_mq_hw_ctx *hctx =
364 orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
365
366 flush_rq->mq_ctx = orig_rq->mq_ctx;
367 flush_rq->tag = orig_rq->tag;
368 memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
369 hctx->cmd_size);
370}
371
Christoph Hellwig63151a42014-04-16 09:44:52 +0200372inline void __blk_mq_end_io(struct request *rq, int error)
Jens Axboe320ae512013-10-24 09:20:05 +0100373{
Ming Lei0d11e6a2013-12-05 10:50:39 -0700374 blk_account_io_done(rq);
375
Christoph Hellwig91b63632014-04-16 09:44:53 +0200376 if (rq->end_io) {
Jens Axboe320ae512013-10-24 09:20:05 +0100377 rq->end_io(rq, error);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200378 } else {
379 if (unlikely(blk_bidi_rq(rq)))
380 blk_mq_free_request(rq->next_rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100381 blk_mq_free_request(rq);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200382 }
Jens Axboe320ae512013-10-24 09:20:05 +0100383}
Christoph Hellwig63151a42014-04-16 09:44:52 +0200384EXPORT_SYMBOL(__blk_mq_end_io);
385
386void blk_mq_end_io(struct request *rq, int error)
387{
388 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
389 BUG();
390 __blk_mq_end_io(rq, error);
391}
392EXPORT_SYMBOL(blk_mq_end_io);
Jens Axboe320ae512013-10-24 09:20:05 +0100393
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800394static void __blk_mq_complete_request_remote(void *data)
Jens Axboe320ae512013-10-24 09:20:05 +0100395{
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800396 struct request *rq = data;
Jens Axboe320ae512013-10-24 09:20:05 +0100397
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800398 rq->q->softirq_done_fn(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100399}
400
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800401void __blk_mq_complete_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100402{
403 struct blk_mq_ctx *ctx = rq->mq_ctx;
Christoph Hellwig38535202014-04-25 02:32:53 -0700404 bool shared = false;
Jens Axboe320ae512013-10-24 09:20:05 +0100405 int cpu;
406
Christoph Hellwig38535202014-04-25 02:32:53 -0700407 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800408 rq->q->softirq_done_fn(rq);
409 return;
410 }
Jens Axboe320ae512013-10-24 09:20:05 +0100411
412 cpu = get_cpu();
Christoph Hellwig38535202014-04-25 02:32:53 -0700413 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
414 shared = cpus_share_cache(cpu, ctx->cpu);
415
416 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800417 rq->csd.func = __blk_mq_complete_request_remote;
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800418 rq->csd.info = rq;
419 rq->csd.flags = 0;
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +0100420 smp_call_function_single_async(ctx->cpu, &rq->csd);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800421 } else {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800422 rq->q->softirq_done_fn(rq);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800423 }
Jens Axboe320ae512013-10-24 09:20:05 +0100424 put_cpu();
425}
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800426
427/**
428 * blk_mq_complete_request - end I/O on a request
429 * @rq: the request being processed
430 *
431 * Description:
432 * Ends all I/O on a request. It does not handle partial completions.
433 * The actual completion happens out-of-order, through a IPI handler.
434 **/
435void blk_mq_complete_request(struct request *rq)
436{
437 if (unlikely(blk_should_fake_timeout(rq->q)))
438 return;
439 if (!blk_mark_rq_complete(rq))
440 __blk_mq_complete_request(rq);
441}
442EXPORT_SYMBOL(blk_mq_complete_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100443
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800444static void blk_mq_start_request(struct request *rq, bool last)
Jens Axboe320ae512013-10-24 09:20:05 +0100445{
446 struct request_queue *q = rq->q;
447
448 trace_block_rq_issue(q, rq);
449
Christoph Hellwig742ee692014-04-14 10:30:06 +0200450 rq->resid_len = blk_rq_bytes(rq);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200451 if (unlikely(blk_bidi_rq(rq)))
452 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
Christoph Hellwig742ee692014-04-14 10:30:06 +0200453
Jens Axboe320ae512013-10-24 09:20:05 +0100454 /*
455 * Just mark start time and set the started bit. Due to memory
456 * ordering, we know we'll see the correct deadline as long as
457 * REQ_ATOMIC_STARTED is seen.
458 */
459 rq->deadline = jiffies + q->rq_timeout;
Jens Axboe87ee7b12014-04-24 08:51:47 -0600460
461 /*
462 * Mark us as started and clear complete. Complete might have been
463 * set if requeue raced with timeout, which then marked it as
464 * complete. So be sure to clear complete again when we start
465 * the request, otherwise we'll ignore the completion event.
466 */
Jens Axboe320ae512013-10-24 09:20:05 +0100467 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
Jens Axboe87ee7b12014-04-24 08:51:47 -0600468 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800469
470 if (q->dma_drain_size && blk_rq_bytes(rq)) {
471 /*
472 * Make sure space for the drain appears. We know we can do
473 * this because max_hw_segments has been adjusted to be one
474 * fewer than the device can handle.
475 */
476 rq->nr_phys_segments++;
477 }
478
479 /*
480 * Flag the last request in the series so that drivers know when IO
481 * should be kicked off, if they don't do it on a per-request basis.
482 *
483 * Note: the flag isn't the only condition drivers should do kick off.
484 * If drive is busy, the last request might not have the bit set.
485 */
486 if (last)
487 rq->cmd_flags |= REQ_END;
Jens Axboe320ae512013-10-24 09:20:05 +0100488}
489
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200490static void __blk_mq_requeue_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100491{
492 struct request_queue *q = rq->q;
493
494 trace_block_rq_requeue(q, rq);
495 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800496
497 rq->cmd_flags &= ~REQ_END;
498
499 if (q->dma_drain_size && blk_rq_bytes(rq))
500 rq->nr_phys_segments--;
Jens Axboe320ae512013-10-24 09:20:05 +0100501}
502
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200503void blk_mq_requeue_request(struct request *rq)
504{
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200505 __blk_mq_requeue_request(rq);
506 blk_clear_rq_complete(rq);
507
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200508 BUG_ON(blk_queued_rq(rq));
509 blk_mq_insert_request(rq, true, true, false);
510}
511EXPORT_SYMBOL(blk_mq_requeue_request);
512
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600513struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
514{
515 return tags->rqs[tag];
516}
517EXPORT_SYMBOL(blk_mq_tag_to_rq);
518
Jens Axboe320ae512013-10-24 09:20:05 +0100519struct blk_mq_timeout_data {
520 struct blk_mq_hw_ctx *hctx;
521 unsigned long *next;
522 unsigned int *next_set;
523};
524
525static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
526{
527 struct blk_mq_timeout_data *data = __data;
528 struct blk_mq_hw_ctx *hctx = data->hctx;
529 unsigned int tag;
530
531 /* It may not be in flight yet (this is where
532 * the REQ_ATOMIC_STARTED flag comes in). The requests are
533 * statically allocated, so we know it's always safe to access the
534 * memory associated with a bit offset into ->rqs[].
535 */
536 tag = 0;
537 do {
538 struct request *rq;
539
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600540 tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
541 if (tag >= hctx->tags->nr_tags)
Jens Axboe320ae512013-10-24 09:20:05 +0100542 break;
543
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600544 rq = blk_mq_tag_to_rq(hctx->tags, tag++);
545 if (rq->q != hctx->queue)
546 continue;
Jens Axboe320ae512013-10-24 09:20:05 +0100547 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
548 continue;
549
550 blk_rq_check_expired(rq, data->next, data->next_set);
551 } while (1);
552}
553
554static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
555 unsigned long *next,
556 unsigned int *next_set)
557{
558 struct blk_mq_timeout_data data = {
559 .hctx = hctx,
560 .next = next,
561 .next_set = next_set,
562 };
563
564 /*
565 * Ask the tagging code to iterate busy requests, so we can
566 * check them for timeout.
567 */
568 blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
569}
570
Jens Axboe87ee7b12014-04-24 08:51:47 -0600571static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq)
572{
573 struct request_queue *q = rq->q;
574
575 /*
576 * We know that complete is set at this point. If STARTED isn't set
577 * anymore, then the request isn't active and the "timeout" should
578 * just be ignored. This can happen due to the bitflag ordering.
579 * Timeout first checks if STARTED is set, and if it is, assumes
580 * the request is active. But if we race with completion, then
581 * we both flags will get cleared. So check here again, and ignore
582 * a timeout event with a request that isn't active.
583 */
584 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
585 return BLK_EH_NOT_HANDLED;
586
587 if (!q->mq_ops->timeout)
588 return BLK_EH_RESET_TIMER;
589
590 return q->mq_ops->timeout(rq);
591}
592
Jens Axboe320ae512013-10-24 09:20:05 +0100593static void blk_mq_rq_timer(unsigned long data)
594{
595 struct request_queue *q = (struct request_queue *) data;
596 struct blk_mq_hw_ctx *hctx;
597 unsigned long next = 0;
598 int i, next_set = 0;
599
Jens Axboe484b4062014-05-21 14:01:15 -0600600 queue_for_each_hw_ctx(q, hctx, i) {
601 /*
602 * If not software queues are currently mapped to this
603 * hardware queue, there's nothing to check
604 */
605 if (!hctx->nr_ctx || !hctx->tags)
606 continue;
607
Jens Axboe320ae512013-10-24 09:20:05 +0100608 blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
Jens Axboe484b4062014-05-21 14:01:15 -0600609 }
Jens Axboe320ae512013-10-24 09:20:05 +0100610
Jens Axboe0d2602c2014-05-13 15:10:52 -0600611 if (next_set) {
612 next = blk_rq_timeout(round_jiffies_up(next));
613 mod_timer(&q->timeout, next);
614 } else {
615 queue_for_each_hw_ctx(q, hctx, i)
616 blk_mq_tag_idle(hctx);
617 }
Jens Axboe320ae512013-10-24 09:20:05 +0100618}
619
620/*
621 * Reverse check our software queue for entries that we could potentially
622 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
623 * too much time checking for merges.
624 */
625static bool blk_mq_attempt_merge(struct request_queue *q,
626 struct blk_mq_ctx *ctx, struct bio *bio)
627{
628 struct request *rq;
629 int checked = 8;
630
631 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
632 int el_ret;
633
634 if (!checked--)
635 break;
636
637 if (!blk_rq_merge_ok(rq, bio))
638 continue;
639
640 el_ret = blk_try_merge(rq, bio);
641 if (el_ret == ELEVATOR_BACK_MERGE) {
642 if (bio_attempt_back_merge(q, rq, bio)) {
643 ctx->rq_merged++;
644 return true;
645 }
646 break;
647 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
648 if (bio_attempt_front_merge(q, rq, bio)) {
649 ctx->rq_merged++;
650 return true;
651 }
652 break;
653 }
654 }
655
656 return false;
657}
658
Jens Axboe320ae512013-10-24 09:20:05 +0100659/*
Jens Axboe1429d7c2014-05-19 09:23:55 -0600660 * Process software queues that have been marked busy, splicing them
661 * to the for-dispatch
662 */
663static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
664{
665 struct blk_mq_ctx *ctx;
666 int i;
667
668 for (i = 0; i < hctx->ctx_map.map_size; i++) {
669 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
670 unsigned int off, bit;
671
672 if (!bm->word)
673 continue;
674
675 bit = 0;
676 off = i * hctx->ctx_map.bits_per_word;
677 do {
678 bit = find_next_bit(&bm->word, bm->depth, bit);
679 if (bit >= bm->depth)
680 break;
681
682 ctx = hctx->ctxs[bit + off];
683 clear_bit(bit, &bm->word);
684 spin_lock(&ctx->lock);
685 list_splice_tail_init(&ctx->rq_list, list);
686 spin_unlock(&ctx->lock);
687
688 bit++;
689 } while (1);
690 }
691}
692
693/*
Jens Axboe320ae512013-10-24 09:20:05 +0100694 * Run this hardware queue, pulling any software queues mapped to it in.
695 * Note that this function currently has various problems around ordering
696 * of IO. In particular, we'd like FIFO behaviour on handling existing
697 * items on the hctx->dispatch list. Ignore that for now.
698 */
699static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
700{
701 struct request_queue *q = hctx->queue;
Jens Axboe320ae512013-10-24 09:20:05 +0100702 struct request *rq;
703 LIST_HEAD(rq_list);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600704 int queued;
Jens Axboe320ae512013-10-24 09:20:05 +0100705
Jens Axboefd1270d2014-04-16 09:23:48 -0600706 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
Jens Axboee4043dc2014-04-09 10:18:23 -0600707
Jens Axboe5d12f902014-03-19 15:25:02 -0600708 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
Jens Axboe320ae512013-10-24 09:20:05 +0100709 return;
710
711 hctx->run++;
712
713 /*
714 * Touch any software queue that has pending entries.
715 */
Jens Axboe1429d7c2014-05-19 09:23:55 -0600716 flush_busy_ctxs(hctx, &rq_list);
Jens Axboe320ae512013-10-24 09:20:05 +0100717
718 /*
719 * If we have previous entries on our dispatch list, grab them
720 * and stuff them at the front for more fair dispatch.
721 */
722 if (!list_empty_careful(&hctx->dispatch)) {
723 spin_lock(&hctx->lock);
724 if (!list_empty(&hctx->dispatch))
725 list_splice_init(&hctx->dispatch, &rq_list);
726 spin_unlock(&hctx->lock);
727 }
728
729 /*
Jens Axboe320ae512013-10-24 09:20:05 +0100730 * Now process all the entries, sending them to the driver.
731 */
Jens Axboe1429d7c2014-05-19 09:23:55 -0600732 queued = 0;
Jens Axboe320ae512013-10-24 09:20:05 +0100733 while (!list_empty(&rq_list)) {
734 int ret;
735
736 rq = list_first_entry(&rq_list, struct request, queuelist);
737 list_del_init(&rq->queuelist);
Jens Axboe320ae512013-10-24 09:20:05 +0100738
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800739 blk_mq_start_request(rq, list_empty(&rq_list));
Jens Axboe320ae512013-10-24 09:20:05 +0100740
741 ret = q->mq_ops->queue_rq(hctx, rq);
742 switch (ret) {
743 case BLK_MQ_RQ_QUEUE_OK:
744 queued++;
745 continue;
746 case BLK_MQ_RQ_QUEUE_BUSY:
Jens Axboe320ae512013-10-24 09:20:05 +0100747 list_add(&rq->queuelist, &rq_list);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200748 __blk_mq_requeue_request(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100749 break;
750 default:
751 pr_err("blk-mq: bad return on queue: %d\n", ret);
Jens Axboe320ae512013-10-24 09:20:05 +0100752 case BLK_MQ_RQ_QUEUE_ERROR:
Christoph Hellwig1e93b8c2014-02-11 08:27:13 -0800753 rq->errors = -EIO;
Jens Axboe320ae512013-10-24 09:20:05 +0100754 blk_mq_end_io(rq, rq->errors);
755 break;
756 }
757
758 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
759 break;
760 }
761
762 if (!queued)
763 hctx->dispatched[0]++;
764 else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
765 hctx->dispatched[ilog2(queued) + 1]++;
766
767 /*
768 * Any items that need requeuing? Stuff them into hctx->dispatch,
769 * that is where we will continue on next queue run.
770 */
771 if (!list_empty(&rq_list)) {
772 spin_lock(&hctx->lock);
773 list_splice(&rq_list, &hctx->dispatch);
774 spin_unlock(&hctx->lock);
775 }
776}
777
Jens Axboe506e9312014-05-07 10:26:44 -0600778/*
779 * It'd be great if the workqueue API had a way to pass
780 * in a mask and had some smarts for more clever placement.
781 * For now we just round-robin here, switching for every
782 * BLK_MQ_CPU_WORK_BATCH queued items.
783 */
784static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
785{
786 int cpu = hctx->next_cpu;
787
788 if (--hctx->next_cpu_batch <= 0) {
789 int next_cpu;
790
791 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
792 if (next_cpu >= nr_cpu_ids)
793 next_cpu = cpumask_first(hctx->cpumask);
794
795 hctx->next_cpu = next_cpu;
796 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
797 }
798
799 return cpu;
800}
801
Jens Axboe320ae512013-10-24 09:20:05 +0100802void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
803{
Jens Axboe5d12f902014-03-19 15:25:02 -0600804 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
Jens Axboe320ae512013-10-24 09:20:05 +0100805 return;
806
Jens Axboee4043dc2014-04-09 10:18:23 -0600807 if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
Jens Axboe320ae512013-10-24 09:20:05 +0100808 __blk_mq_run_hw_queue(hctx);
Jens Axboee4043dc2014-04-09 10:18:23 -0600809 else if (hctx->queue->nr_hw_queues == 1)
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600810 kblockd_schedule_delayed_work(&hctx->run_work, 0);
Jens Axboee4043dc2014-04-09 10:18:23 -0600811 else {
812 unsigned int cpu;
813
Jens Axboe506e9312014-05-07 10:26:44 -0600814 cpu = blk_mq_hctx_next_cpu(hctx);
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600815 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
Jens Axboee4043dc2014-04-09 10:18:23 -0600816 }
Jens Axboe320ae512013-10-24 09:20:05 +0100817}
818
819void blk_mq_run_queues(struct request_queue *q, bool async)
820{
821 struct blk_mq_hw_ctx *hctx;
822 int i;
823
824 queue_for_each_hw_ctx(q, hctx, i) {
825 if ((!blk_mq_hctx_has_pending(hctx) &&
826 list_empty_careful(&hctx->dispatch)) ||
Jens Axboe5d12f902014-03-19 15:25:02 -0600827 test_bit(BLK_MQ_S_STOPPED, &hctx->state))
Jens Axboe320ae512013-10-24 09:20:05 +0100828 continue;
829
Jens Axboee4043dc2014-04-09 10:18:23 -0600830 preempt_disable();
Jens Axboe320ae512013-10-24 09:20:05 +0100831 blk_mq_run_hw_queue(hctx, async);
Jens Axboee4043dc2014-04-09 10:18:23 -0600832 preempt_enable();
Jens Axboe320ae512013-10-24 09:20:05 +0100833 }
834}
835EXPORT_SYMBOL(blk_mq_run_queues);
836
837void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
838{
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600839 cancel_delayed_work(&hctx->run_work);
840 cancel_delayed_work(&hctx->delay_work);
Jens Axboe320ae512013-10-24 09:20:05 +0100841 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
842}
843EXPORT_SYMBOL(blk_mq_stop_hw_queue);
844
Christoph Hellwig280d45f2013-10-25 14:45:58 +0100845void blk_mq_stop_hw_queues(struct request_queue *q)
846{
847 struct blk_mq_hw_ctx *hctx;
848 int i;
849
850 queue_for_each_hw_ctx(q, hctx, i)
851 blk_mq_stop_hw_queue(hctx);
852}
853EXPORT_SYMBOL(blk_mq_stop_hw_queues);
854
Jens Axboe320ae512013-10-24 09:20:05 +0100855void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
856{
857 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboee4043dc2014-04-09 10:18:23 -0600858
859 preempt_disable();
Jens Axboe320ae512013-10-24 09:20:05 +0100860 __blk_mq_run_hw_queue(hctx);
Jens Axboee4043dc2014-04-09 10:18:23 -0600861 preempt_enable();
Jens Axboe320ae512013-10-24 09:20:05 +0100862}
863EXPORT_SYMBOL(blk_mq_start_hw_queue);
864
Christoph Hellwig2f268552014-04-16 09:44:56 +0200865void blk_mq_start_hw_queues(struct request_queue *q)
866{
867 struct blk_mq_hw_ctx *hctx;
868 int i;
869
870 queue_for_each_hw_ctx(q, hctx, i)
871 blk_mq_start_hw_queue(hctx);
872}
873EXPORT_SYMBOL(blk_mq_start_hw_queues);
874
875
Christoph Hellwig1b4a3252014-04-16 09:44:54 +0200876void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +0100877{
878 struct blk_mq_hw_ctx *hctx;
879 int i;
880
881 queue_for_each_hw_ctx(q, hctx, i) {
882 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
883 continue;
884
885 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboee4043dc2014-04-09 10:18:23 -0600886 preempt_disable();
Christoph Hellwig1b4a3252014-04-16 09:44:54 +0200887 blk_mq_run_hw_queue(hctx, async);
Jens Axboee4043dc2014-04-09 10:18:23 -0600888 preempt_enable();
Jens Axboe320ae512013-10-24 09:20:05 +0100889 }
890}
891EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
892
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600893static void blk_mq_run_work_fn(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +0100894{
895 struct blk_mq_hw_ctx *hctx;
896
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600897 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
Jens Axboee4043dc2014-04-09 10:18:23 -0600898
Jens Axboe320ae512013-10-24 09:20:05 +0100899 __blk_mq_run_hw_queue(hctx);
900}
901
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600902static void blk_mq_delay_work_fn(struct work_struct *work)
903{
904 struct blk_mq_hw_ctx *hctx;
905
906 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
907
908 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
909 __blk_mq_run_hw_queue(hctx);
910}
911
912void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
913{
914 unsigned long tmo = msecs_to_jiffies(msecs);
915
916 if (hctx->queue->nr_hw_queues == 1)
917 kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
918 else {
919 unsigned int cpu;
920
Jens Axboe506e9312014-05-07 10:26:44 -0600921 cpu = blk_mq_hctx_next_cpu(hctx);
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600922 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
923 }
924}
925EXPORT_SYMBOL(blk_mq_delay_queue);
926
Jens Axboe320ae512013-10-24 09:20:05 +0100927static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
Christoph Hellwig72a0a362014-02-07 10:22:36 -0800928 struct request *rq, bool at_head)
Jens Axboe320ae512013-10-24 09:20:05 +0100929{
930 struct blk_mq_ctx *ctx = rq->mq_ctx;
931
Jens Axboe01b983c2013-11-19 18:59:10 -0700932 trace_block_rq_insert(hctx->queue, rq);
933
Christoph Hellwig72a0a362014-02-07 10:22:36 -0800934 if (at_head)
935 list_add(&rq->queuelist, &ctx->rq_list);
936 else
937 list_add_tail(&rq->queuelist, &ctx->rq_list);
Jens Axboe4bb659b2014-05-09 09:36:49 -0600938
Jens Axboe320ae512013-10-24 09:20:05 +0100939 blk_mq_hctx_mark_pending(hctx, ctx);
940
941 /*
942 * We do this early, to ensure we are on the right CPU.
943 */
Jens Axboe87ee7b12014-04-24 08:51:47 -0600944 blk_add_timer(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100945}
946
Christoph Hellwigeeabc852014-03-21 08:57:37 -0600947void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
948 bool async)
Jens Axboe320ae512013-10-24 09:20:05 +0100949{
950 struct request_queue *q = rq->q;
951 struct blk_mq_hw_ctx *hctx;
Christoph Hellwigeeabc852014-03-21 08:57:37 -0600952 struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +0100953
954 current_ctx = blk_mq_get_ctx(q);
Christoph Hellwigeeabc852014-03-21 08:57:37 -0600955 if (!cpu_online(ctx->cpu))
956 rq->mq_ctx = ctx = current_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +0100957
Jens Axboe320ae512013-10-24 09:20:05 +0100958 hctx = q->mq_ops->map_queue(q, ctx->cpu);
959
Christoph Hellwigeeabc852014-03-21 08:57:37 -0600960 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
961 !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
962 blk_insert_flush(rq);
963 } else {
964 spin_lock(&ctx->lock);
965 __blk_mq_insert_request(hctx, rq, at_head);
966 spin_unlock(&ctx->lock);
967 }
Jens Axboe320ae512013-10-24 09:20:05 +0100968
Jens Axboe320ae512013-10-24 09:20:05 +0100969 if (run_queue)
970 blk_mq_run_hw_queue(hctx, async);
Jens Axboee4043dc2014-04-09 10:18:23 -0600971
972 blk_mq_put_ctx(current_ctx);
Jens Axboe320ae512013-10-24 09:20:05 +0100973}
974
975static void blk_mq_insert_requests(struct request_queue *q,
976 struct blk_mq_ctx *ctx,
977 struct list_head *list,
978 int depth,
979 bool from_schedule)
980
981{
982 struct blk_mq_hw_ctx *hctx;
983 struct blk_mq_ctx *current_ctx;
984
985 trace_block_unplug(q, depth, !from_schedule);
986
987 current_ctx = blk_mq_get_ctx(q);
988
989 if (!cpu_online(ctx->cpu))
990 ctx = current_ctx;
991 hctx = q->mq_ops->map_queue(q, ctx->cpu);
992
993 /*
994 * preemption doesn't flush plug list, so it's possible ctx->cpu is
995 * offline now
996 */
997 spin_lock(&ctx->lock);
998 while (!list_empty(list)) {
999 struct request *rq;
1000
1001 rq = list_first_entry(list, struct request, queuelist);
1002 list_del_init(&rq->queuelist);
1003 rq->mq_ctx = ctx;
Christoph Hellwig72a0a362014-02-07 10:22:36 -08001004 __blk_mq_insert_request(hctx, rq, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001005 }
1006 spin_unlock(&ctx->lock);
1007
Jens Axboe320ae512013-10-24 09:20:05 +01001008 blk_mq_run_hw_queue(hctx, from_schedule);
Jens Axboee4043dc2014-04-09 10:18:23 -06001009 blk_mq_put_ctx(current_ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001010}
1011
1012static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1013{
1014 struct request *rqa = container_of(a, struct request, queuelist);
1015 struct request *rqb = container_of(b, struct request, queuelist);
1016
1017 return !(rqa->mq_ctx < rqb->mq_ctx ||
1018 (rqa->mq_ctx == rqb->mq_ctx &&
1019 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1020}
1021
1022void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1023{
1024 struct blk_mq_ctx *this_ctx;
1025 struct request_queue *this_q;
1026 struct request *rq;
1027 LIST_HEAD(list);
1028 LIST_HEAD(ctx_list);
1029 unsigned int depth;
1030
1031 list_splice_init(&plug->mq_list, &list);
1032
1033 list_sort(NULL, &list, plug_ctx_cmp);
1034
1035 this_q = NULL;
1036 this_ctx = NULL;
1037 depth = 0;
1038
1039 while (!list_empty(&list)) {
1040 rq = list_entry_rq(list.next);
1041 list_del_init(&rq->queuelist);
1042 BUG_ON(!rq->q);
1043 if (rq->mq_ctx != this_ctx) {
1044 if (this_ctx) {
1045 blk_mq_insert_requests(this_q, this_ctx,
1046 &ctx_list, depth,
1047 from_schedule);
1048 }
1049
1050 this_ctx = rq->mq_ctx;
1051 this_q = rq->q;
1052 depth = 0;
1053 }
1054
1055 depth++;
1056 list_add_tail(&rq->queuelist, &ctx_list);
1057 }
1058
1059 /*
1060 * If 'this_ctx' is set, we know we have entries to complete
1061 * on 'ctx_list'. Do those.
1062 */
1063 if (this_ctx) {
1064 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1065 from_schedule);
1066 }
1067}
1068
1069static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1070{
1071 init_request_from_bio(rq, bio);
1072 blk_account_io_start(rq, 1);
1073}
1074
1075static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1076{
1077 struct blk_mq_hw_ctx *hctx;
1078 struct blk_mq_ctx *ctx;
1079 const int is_sync = rw_is_sync(bio->bi_rw);
1080 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1081 int rw = bio_data_dir(bio);
1082 struct request *rq;
1083 unsigned int use_plug, request_count = 0;
1084
1085 /*
1086 * If we have multiple hardware queues, just go directly to
1087 * one of those for sync IO.
1088 */
1089 use_plug = !is_flush_fua && ((q->nr_hw_queues == 1) || !is_sync);
1090
1091 blk_queue_bounce(q, &bio);
1092
Nicholas Bellinger14ec77f2014-02-07 13:45:39 -07001093 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1094 bio_endio(bio, -EIO);
1095 return;
1096 }
1097
Robert Elliottda41a582014-05-20 16:46:26 -05001098 if (use_plug && !blk_queue_nomerges(q) &&
1099 blk_attempt_plug_merge(q, bio, &request_count))
Jens Axboe320ae512013-10-24 09:20:05 +01001100 return;
1101
1102 if (blk_mq_queue_enter(q)) {
1103 bio_endio(bio, -EIO);
1104 return;
1105 }
1106
1107 ctx = blk_mq_get_ctx(q);
1108 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1109
Shaohua Li27fbf4e2014-02-19 20:20:21 +08001110 if (is_sync)
1111 rw |= REQ_SYNC;
Jens Axboe320ae512013-10-24 09:20:05 +01001112 trace_block_getrq(q, bio, rw);
Jens Axboe4bb659b2014-05-09 09:36:49 -06001113 rq = __blk_mq_alloc_request(hctx, ctx, GFP_ATOMIC, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001114 if (likely(rq))
Christoph Hellwig18741982014-02-10 09:29:00 -07001115 blk_mq_rq_ctx_init(q, ctx, rq, rw);
Jens Axboe320ae512013-10-24 09:20:05 +01001116 else {
1117 blk_mq_put_ctx(ctx);
1118 trace_block_sleeprq(q, bio, rw);
Christoph Hellwig18741982014-02-10 09:29:00 -07001119 rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
1120 false);
Jens Axboe320ae512013-10-24 09:20:05 +01001121 ctx = rq->mq_ctx;
1122 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1123 }
1124
1125 hctx->queued++;
1126
1127 if (unlikely(is_flush_fua)) {
1128 blk_mq_bio_to_request(rq, bio);
Jens Axboe320ae512013-10-24 09:20:05 +01001129 blk_insert_flush(rq);
1130 goto run_queue;
1131 }
1132
1133 /*
1134 * A task plug currently exists. Since this is completely lockless,
1135 * utilize that to temporarily store requests until the task is
1136 * either done or scheduled away.
1137 */
1138 if (use_plug) {
1139 struct blk_plug *plug = current->plug;
1140
1141 if (plug) {
1142 blk_mq_bio_to_request(rq, bio);
Shaohua Li92f399c2013-10-29 12:01:03 -06001143 if (list_empty(&plug->mq_list))
Jens Axboe320ae512013-10-24 09:20:05 +01001144 trace_block_plug(q);
1145 else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1146 blk_flush_plug_list(plug, false);
1147 trace_block_plug(q);
1148 }
1149 list_add_tail(&rq->queuelist, &plug->mq_list);
1150 blk_mq_put_ctx(ctx);
1151 return;
1152 }
1153 }
1154
Jens Axboec6d600c2014-04-30 13:43:56 -06001155 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) {
Jens Axboecf4b50a2014-05-09 14:54:08 -06001156 blk_mq_bio_to_request(rq, bio);
Jens Axboec6d600c2014-04-30 13:43:56 -06001157 spin_lock(&ctx->lock);
1158insert_rq:
Christoph Hellwig72a0a362014-02-07 10:22:36 -08001159 __blk_mq_insert_request(hctx, rq, false);
Jens Axboec6d600c2014-04-30 13:43:56 -06001160 spin_unlock(&ctx->lock);
Jens Axboec6d600c2014-04-30 13:43:56 -06001161 } else {
1162 spin_lock(&ctx->lock);
1163 if (!blk_mq_attempt_merge(q, ctx, bio)) {
Jens Axboecf4b50a2014-05-09 14:54:08 -06001164 blk_mq_bio_to_request(rq, bio);
Jens Axboec6d600c2014-04-30 13:43:56 -06001165 goto insert_rq;
1166 }
1167
1168 spin_unlock(&ctx->lock);
1169 __blk_mq_free_request(hctx, ctx, rq);
Jens Axboe320ae512013-10-24 09:20:05 +01001170 }
1171
Jens Axboe320ae512013-10-24 09:20:05 +01001172
1173 /*
1174 * For a SYNC request, send it to the hardware immediately. For an
1175 * ASYNC request, just ensure that we run it later on. The latter
1176 * allows for merging opportunities and more efficient dispatching.
1177 */
1178run_queue:
1179 blk_mq_run_hw_queue(hctx, !is_sync || is_flush_fua);
Jens Axboee4043dc2014-04-09 10:18:23 -06001180 blk_mq_put_ctx(ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001181}
1182
1183/*
1184 * Default mapping to a software queue, since we use one per CPU.
1185 */
1186struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1187{
1188 return q->queue_hw_ctx[q->mq_map[cpu]];
1189}
1190EXPORT_SYMBOL(blk_mq_map_queue);
1191
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001192struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set,
Jens Axboe320ae512013-10-24 09:20:05 +01001193 unsigned int hctx_index)
1194{
Jens Axboe4bb659b2014-05-09 09:36:49 -06001195 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL,
1196 set->numa_node);
Jens Axboe320ae512013-10-24 09:20:05 +01001197}
1198EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
1199
1200void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
1201 unsigned int hctx_index)
1202{
1203 kfree(hctx);
1204}
1205EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
1206
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001207static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1208 struct blk_mq_tags *tags, unsigned int hctx_idx)
Jens Axboe320ae512013-10-24 09:20:05 +01001209{
1210 struct page *page;
1211
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001212 if (tags->rqs && set->ops->exit_request) {
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001213 int i;
1214
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001215 for (i = 0; i < tags->nr_tags; i++) {
1216 if (!tags->rqs[i])
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001217 continue;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001218 set->ops->exit_request(set->driver_data, tags->rqs[i],
1219 hctx_idx, i);
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001220 }
1221 }
1222
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001223 while (!list_empty(&tags->page_list)) {
1224 page = list_first_entry(&tags->page_list, struct page, lru);
Dave Hansen67534712014-01-08 20:17:46 -07001225 list_del_init(&page->lru);
Jens Axboe320ae512013-10-24 09:20:05 +01001226 __free_pages(page, page->private);
1227 }
1228
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001229 kfree(tags->rqs);
Jens Axboe320ae512013-10-24 09:20:05 +01001230
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001231 blk_mq_free_tags(tags);
Jens Axboe320ae512013-10-24 09:20:05 +01001232}
1233
1234static size_t order_to_size(unsigned int order)
1235{
Ming Lei4ca08502014-04-19 18:00:18 +08001236 return (size_t)PAGE_SIZE << order;
Jens Axboe320ae512013-10-24 09:20:05 +01001237}
1238
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001239static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1240 unsigned int hctx_idx)
Jens Axboe320ae512013-10-24 09:20:05 +01001241{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001242 struct blk_mq_tags *tags;
Jens Axboe320ae512013-10-24 09:20:05 +01001243 unsigned int i, j, entries_per_page, max_order = 4;
1244 size_t rq_size, left;
1245
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001246 tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1247 set->numa_node);
1248 if (!tags)
1249 return NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01001250
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001251 INIT_LIST_HEAD(&tags->page_list);
1252
1253 tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
1254 GFP_KERNEL, set->numa_node);
1255 if (!tags->rqs) {
1256 blk_mq_free_tags(tags);
1257 return NULL;
1258 }
Jens Axboe320ae512013-10-24 09:20:05 +01001259
1260 /*
1261 * rq_size is the size of the request plus driver payload, rounded
1262 * to the cacheline size
1263 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001264 rq_size = round_up(sizeof(struct request) + set->cmd_size,
Jens Axboe320ae512013-10-24 09:20:05 +01001265 cache_line_size());
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001266 left = rq_size * set->queue_depth;
Jens Axboe320ae512013-10-24 09:20:05 +01001267
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001268 for (i = 0; i < set->queue_depth; ) {
Jens Axboe320ae512013-10-24 09:20:05 +01001269 int this_order = max_order;
1270 struct page *page;
1271 int to_do;
1272 void *p;
1273
1274 while (left < order_to_size(this_order - 1) && this_order)
1275 this_order--;
1276
1277 do {
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001278 page = alloc_pages_node(set->numa_node, GFP_KERNEL,
1279 this_order);
Jens Axboe320ae512013-10-24 09:20:05 +01001280 if (page)
1281 break;
1282 if (!this_order--)
1283 break;
1284 if (order_to_size(this_order) < rq_size)
1285 break;
1286 } while (1);
1287
1288 if (!page)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001289 goto fail;
Jens Axboe320ae512013-10-24 09:20:05 +01001290
1291 page->private = this_order;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001292 list_add_tail(&page->lru, &tags->page_list);
Jens Axboe320ae512013-10-24 09:20:05 +01001293
1294 p = page_address(page);
1295 entries_per_page = order_to_size(this_order) / rq_size;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001296 to_do = min(entries_per_page, set->queue_depth - i);
Jens Axboe320ae512013-10-24 09:20:05 +01001297 left -= to_do * rq_size;
1298 for (j = 0; j < to_do; j++) {
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001299 tags->rqs[i] = p;
1300 if (set->ops->init_request) {
1301 if (set->ops->init_request(set->driver_data,
1302 tags->rqs[i], hctx_idx, i,
1303 set->numa_node))
1304 goto fail;
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001305 }
1306
Jens Axboe320ae512013-10-24 09:20:05 +01001307 p += rq_size;
1308 i++;
1309 }
1310 }
1311
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001312 return tags;
Jens Axboe320ae512013-10-24 09:20:05 +01001313
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001314fail:
1315 pr_warn("%s: failed to allocate requests\n", __func__);
1316 blk_mq_free_rq_map(set, tags, hctx_idx);
1317 return NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01001318}
1319
Jens Axboe1429d7c2014-05-19 09:23:55 -06001320static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1321{
1322 kfree(bitmap->map);
1323}
1324
1325static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1326{
1327 unsigned int bpw = 8, total, num_maps, i;
1328
1329 bitmap->bits_per_word = bpw;
1330
1331 num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1332 bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1333 GFP_KERNEL, node);
1334 if (!bitmap->map)
1335 return -ENOMEM;
1336
1337 bitmap->map_size = num_maps;
1338
1339 total = nr_cpu_ids;
1340 for (i = 0; i < num_maps; i++) {
1341 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1342 total -= bitmap->map[i].depth;
1343 }
1344
1345 return 0;
1346}
1347
Jens Axboe484b4062014-05-21 14:01:15 -06001348static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1349{
1350 struct request_queue *q = hctx->queue;
1351 struct blk_mq_ctx *ctx;
1352 LIST_HEAD(tmp);
1353
1354 /*
1355 * Move ctx entries to new CPU, if this one is going away.
1356 */
1357 ctx = __blk_mq_get_ctx(q, cpu);
1358
1359 spin_lock(&ctx->lock);
1360 if (!list_empty(&ctx->rq_list)) {
1361 list_splice_init(&ctx->rq_list, &tmp);
1362 blk_mq_hctx_clear_pending(hctx, ctx);
1363 }
1364 spin_unlock(&ctx->lock);
1365
1366 if (list_empty(&tmp))
1367 return NOTIFY_OK;
1368
1369 ctx = blk_mq_get_ctx(q);
1370 spin_lock(&ctx->lock);
1371
1372 while (!list_empty(&tmp)) {
1373 struct request *rq;
1374
1375 rq = list_first_entry(&tmp, struct request, queuelist);
1376 rq->mq_ctx = ctx;
1377 list_move_tail(&rq->queuelist, &ctx->rq_list);
1378 }
1379
1380 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1381 blk_mq_hctx_mark_pending(hctx, ctx);
1382
1383 spin_unlock(&ctx->lock);
1384
1385 blk_mq_run_hw_queue(hctx, true);
1386 blk_mq_put_ctx(ctx);
1387 return NOTIFY_OK;
1388}
1389
1390static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1391{
1392 struct request_queue *q = hctx->queue;
1393 struct blk_mq_tag_set *set = q->tag_set;
1394
1395 if (set->tags[hctx->queue_num])
1396 return NOTIFY_OK;
1397
1398 set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1399 if (!set->tags[hctx->queue_num])
1400 return NOTIFY_STOP;
1401
1402 hctx->tags = set->tags[hctx->queue_num];
1403 return NOTIFY_OK;
1404}
1405
1406static int blk_mq_hctx_notify(void *data, unsigned long action,
1407 unsigned int cpu)
1408{
1409 struct blk_mq_hw_ctx *hctx = data;
1410
1411 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1412 return blk_mq_hctx_cpu_offline(hctx, cpu);
1413 else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1414 return blk_mq_hctx_cpu_online(hctx, cpu);
1415
1416 return NOTIFY_OK;
1417}
1418
Jens Axboe320ae512013-10-24 09:20:05 +01001419static int blk_mq_init_hw_queues(struct request_queue *q,
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001420 struct blk_mq_tag_set *set)
Jens Axboe320ae512013-10-24 09:20:05 +01001421{
1422 struct blk_mq_hw_ctx *hctx;
1423 unsigned int i, j;
1424
1425 /*
1426 * Initialize hardware queues
1427 */
1428 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe320ae512013-10-24 09:20:05 +01001429 int node;
1430
1431 node = hctx->numa_node;
1432 if (node == NUMA_NO_NODE)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001433 node = hctx->numa_node = set->numa_node;
Jens Axboe320ae512013-10-24 09:20:05 +01001434
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001435 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1436 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
Jens Axboe320ae512013-10-24 09:20:05 +01001437 spin_lock_init(&hctx->lock);
1438 INIT_LIST_HEAD(&hctx->dispatch);
1439 hctx->queue = q;
1440 hctx->queue_num = i;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001441 hctx->flags = set->flags;
1442 hctx->cmd_size = set->cmd_size;
Jens Axboe320ae512013-10-24 09:20:05 +01001443
1444 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1445 blk_mq_hctx_notify, hctx);
1446 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1447
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001448 hctx->tags = set->tags[i];
Jens Axboe320ae512013-10-24 09:20:05 +01001449
1450 /*
1451 * Allocate space for all possible cpus to avoid allocation in
1452 * runtime
1453 */
1454 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1455 GFP_KERNEL, node);
1456 if (!hctx->ctxs)
1457 break;
1458
Jens Axboe1429d7c2014-05-19 09:23:55 -06001459 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
Jens Axboe320ae512013-10-24 09:20:05 +01001460 break;
1461
Jens Axboe320ae512013-10-24 09:20:05 +01001462 hctx->nr_ctx = 0;
1463
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001464 if (set->ops->init_hctx &&
1465 set->ops->init_hctx(hctx, set->driver_data, i))
Jens Axboe320ae512013-10-24 09:20:05 +01001466 break;
1467 }
1468
1469 if (i == q->nr_hw_queues)
1470 return 0;
1471
1472 /*
1473 * Init failed
1474 */
1475 queue_for_each_hw_ctx(q, hctx, j) {
1476 if (i == j)
1477 break;
1478
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001479 if (set->ops->exit_hctx)
1480 set->ops->exit_hctx(hctx, j);
Jens Axboe320ae512013-10-24 09:20:05 +01001481
1482 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
Jens Axboe320ae512013-10-24 09:20:05 +01001483 kfree(hctx->ctxs);
Jens Axboe1429d7c2014-05-19 09:23:55 -06001484 blk_mq_free_bitmap(&hctx->ctx_map);
Jens Axboe320ae512013-10-24 09:20:05 +01001485 }
1486
1487 return 1;
1488}
1489
1490static void blk_mq_init_cpu_queues(struct request_queue *q,
1491 unsigned int nr_hw_queues)
1492{
1493 unsigned int i;
1494
1495 for_each_possible_cpu(i) {
1496 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1497 struct blk_mq_hw_ctx *hctx;
1498
1499 memset(__ctx, 0, sizeof(*__ctx));
1500 __ctx->cpu = i;
1501 spin_lock_init(&__ctx->lock);
1502 INIT_LIST_HEAD(&__ctx->rq_list);
1503 __ctx->queue = q;
1504
1505 /* If the cpu isn't online, the cpu is mapped to first hctx */
Jens Axboe320ae512013-10-24 09:20:05 +01001506 if (!cpu_online(i))
1507 continue;
1508
Jens Axboee4043dc2014-04-09 10:18:23 -06001509 hctx = q->mq_ops->map_queue(q, i);
1510 cpumask_set_cpu(i, hctx->cpumask);
1511 hctx->nr_ctx++;
1512
Jens Axboe320ae512013-10-24 09:20:05 +01001513 /*
1514 * Set local node, IFF we have more than one hw queue. If
1515 * not, we remain on the home node of the device
1516 */
1517 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1518 hctx->numa_node = cpu_to_node(i);
1519 }
1520}
1521
1522static void blk_mq_map_swqueue(struct request_queue *q)
1523{
1524 unsigned int i;
1525 struct blk_mq_hw_ctx *hctx;
1526 struct blk_mq_ctx *ctx;
1527
1528 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboee4043dc2014-04-09 10:18:23 -06001529 cpumask_clear(hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01001530 hctx->nr_ctx = 0;
1531 }
1532
1533 /*
1534 * Map software to hardware queues
1535 */
1536 queue_for_each_ctx(q, ctx, i) {
1537 /* If the cpu isn't online, the cpu is mapped to first hctx */
Jens Axboee4043dc2014-04-09 10:18:23 -06001538 if (!cpu_online(i))
1539 continue;
1540
Jens Axboe320ae512013-10-24 09:20:05 +01001541 hctx = q->mq_ops->map_queue(q, i);
Jens Axboee4043dc2014-04-09 10:18:23 -06001542 cpumask_set_cpu(i, hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01001543 ctx->index_hw = hctx->nr_ctx;
1544 hctx->ctxs[hctx->nr_ctx++] = ctx;
1545 }
Jens Axboe506e9312014-05-07 10:26:44 -06001546
1547 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe484b4062014-05-21 14:01:15 -06001548 /*
1549 * If not software queues are mapped to this hardware queue,
1550 * disable it and free the request entries
1551 */
1552 if (!hctx->nr_ctx) {
1553 struct blk_mq_tag_set *set = q->tag_set;
1554
1555 if (set->tags[i]) {
1556 blk_mq_free_rq_map(set, set->tags[i], i);
1557 set->tags[i] = NULL;
1558 hctx->tags = NULL;
1559 }
1560 continue;
1561 }
1562
1563 /*
1564 * Initialize batch roundrobin counts
1565 */
Jens Axboe506e9312014-05-07 10:26:44 -06001566 hctx->next_cpu = cpumask_first(hctx->cpumask);
1567 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1568 }
Jens Axboe320ae512013-10-24 09:20:05 +01001569}
1570
Jens Axboe0d2602c2014-05-13 15:10:52 -06001571static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1572{
1573 struct blk_mq_hw_ctx *hctx;
1574 struct request_queue *q;
1575 bool shared;
1576 int i;
1577
1578 if (set->tag_list.next == set->tag_list.prev)
1579 shared = false;
1580 else
1581 shared = true;
1582
1583 list_for_each_entry(q, &set->tag_list, tag_set_list) {
1584 blk_mq_freeze_queue(q);
1585
1586 queue_for_each_hw_ctx(q, hctx, i) {
1587 if (shared)
1588 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1589 else
1590 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1591 }
1592 blk_mq_unfreeze_queue(q);
1593 }
1594}
1595
1596static void blk_mq_del_queue_tag_set(struct request_queue *q)
1597{
1598 struct blk_mq_tag_set *set = q->tag_set;
1599
1600 blk_mq_freeze_queue(q);
1601
1602 mutex_lock(&set->tag_list_lock);
1603 list_del_init(&q->tag_set_list);
1604 blk_mq_update_tag_set_depth(set);
1605 mutex_unlock(&set->tag_list_lock);
1606
1607 blk_mq_unfreeze_queue(q);
1608}
1609
1610static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1611 struct request_queue *q)
1612{
1613 q->tag_set = set;
1614
1615 mutex_lock(&set->tag_list_lock);
1616 list_add_tail(&q->tag_set_list, &set->tag_list);
1617 blk_mq_update_tag_set_depth(set);
1618 mutex_unlock(&set->tag_list_lock);
1619}
1620
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001621struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
Jens Axboe320ae512013-10-24 09:20:05 +01001622{
1623 struct blk_mq_hw_ctx **hctxs;
1624 struct blk_mq_ctx *ctx;
1625 struct request_queue *q;
1626 int i;
1627
Jens Axboe320ae512013-10-24 09:20:05 +01001628 ctx = alloc_percpu(struct blk_mq_ctx);
1629 if (!ctx)
1630 return ERR_PTR(-ENOMEM);
1631
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001632 hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1633 set->numa_node);
Jens Axboe320ae512013-10-24 09:20:05 +01001634
1635 if (!hctxs)
1636 goto err_percpu;
1637
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001638 for (i = 0; i < set->nr_hw_queues; i++) {
1639 hctxs[i] = set->ops->alloc_hctx(set, i);
Jens Axboe320ae512013-10-24 09:20:05 +01001640 if (!hctxs[i])
1641 goto err_hctxs;
1642
Jens Axboee4043dc2014-04-09 10:18:23 -06001643 if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
1644 goto err_hctxs;
1645
Jens Axboe0d2602c2014-05-13 15:10:52 -06001646 atomic_set(&hctxs[i]->nr_active, 0);
Jens Axboe320ae512013-10-24 09:20:05 +01001647 hctxs[i]->numa_node = NUMA_NO_NODE;
1648 hctxs[i]->queue_num = i;
1649 }
1650
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001651 q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
Jens Axboe320ae512013-10-24 09:20:05 +01001652 if (!q)
1653 goto err_hctxs;
1654
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001655 q->mq_map = blk_mq_make_queue_map(set);
Jens Axboe320ae512013-10-24 09:20:05 +01001656 if (!q->mq_map)
1657 goto err_map;
1658
1659 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1660 blk_queue_rq_timeout(q, 30000);
1661
1662 q->nr_queues = nr_cpu_ids;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001663 q->nr_hw_queues = set->nr_hw_queues;
Jens Axboe320ae512013-10-24 09:20:05 +01001664
1665 q->queue_ctx = ctx;
1666 q->queue_hw_ctx = hctxs;
1667
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001668 q->mq_ops = set->ops;
Jens Axboe94eddfb2013-11-19 09:25:07 -07001669 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
Jens Axboe320ae512013-10-24 09:20:05 +01001670
Christoph Hellwig1be036e2014-02-07 10:22:39 -08001671 q->sg_reserved_size = INT_MAX;
1672
Jens Axboe320ae512013-10-24 09:20:05 +01001673 blk_queue_make_request(q, blk_mq_make_request);
Jens Axboe87ee7b12014-04-24 08:51:47 -06001674 blk_queue_rq_timed_out(q, blk_mq_rq_timed_out);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001675 if (set->timeout)
1676 blk_queue_rq_timeout(q, set->timeout);
Jens Axboe320ae512013-10-24 09:20:05 +01001677
Jens Axboeeba71762014-05-20 15:17:27 -06001678 /*
1679 * Do this after blk_queue_make_request() overrides it...
1680 */
1681 q->nr_requests = set->queue_depth;
1682
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001683 if (set->ops->complete)
1684 blk_queue_softirq_done(q, set->ops->complete);
Christoph Hellwig30a91cb2014-02-10 03:24:38 -08001685
Jens Axboe320ae512013-10-24 09:20:05 +01001686 blk_mq_init_flush(q);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001687 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01001688
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001689 q->flush_rq = kzalloc(round_up(sizeof(struct request) +
1690 set->cmd_size, cache_line_size()),
1691 GFP_KERNEL);
Christoph Hellwig18741982014-02-10 09:29:00 -07001692 if (!q->flush_rq)
Jens Axboe320ae512013-10-24 09:20:05 +01001693 goto err_hw;
1694
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001695 if (blk_mq_init_hw_queues(q, set))
Christoph Hellwig18741982014-02-10 09:29:00 -07001696 goto err_flush_rq;
1697
Jens Axboe320ae512013-10-24 09:20:05 +01001698 mutex_lock(&all_q_mutex);
1699 list_add_tail(&q->all_q_node, &all_q_list);
1700 mutex_unlock(&all_q_mutex);
1701
Jens Axboe0d2602c2014-05-13 15:10:52 -06001702 blk_mq_add_queue_tag_set(set, q);
1703
Jens Axboe484b4062014-05-21 14:01:15 -06001704 blk_mq_map_swqueue(q);
1705
Jens Axboe320ae512013-10-24 09:20:05 +01001706 return q;
Christoph Hellwig18741982014-02-10 09:29:00 -07001707
1708err_flush_rq:
1709 kfree(q->flush_rq);
Jens Axboe320ae512013-10-24 09:20:05 +01001710err_hw:
1711 kfree(q->mq_map);
1712err_map:
1713 blk_cleanup_queue(q);
1714err_hctxs:
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001715 for (i = 0; i < set->nr_hw_queues; i++) {
Jens Axboe320ae512013-10-24 09:20:05 +01001716 if (!hctxs[i])
1717 break;
Jens Axboee4043dc2014-04-09 10:18:23 -06001718 free_cpumask_var(hctxs[i]->cpumask);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001719 set->ops->free_hctx(hctxs[i], i);
Jens Axboe320ae512013-10-24 09:20:05 +01001720 }
1721 kfree(hctxs);
1722err_percpu:
1723 free_percpu(ctx);
1724 return ERR_PTR(-ENOMEM);
1725}
1726EXPORT_SYMBOL(blk_mq_init_queue);
1727
1728void blk_mq_free_queue(struct request_queue *q)
1729{
1730 struct blk_mq_hw_ctx *hctx;
1731 int i;
1732
Jens Axboe0d2602c2014-05-13 15:10:52 -06001733 blk_mq_del_queue_tag_set(q);
1734
Jens Axboe320ae512013-10-24 09:20:05 +01001735 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe320ae512013-10-24 09:20:05 +01001736 kfree(hctx->ctxs);
Jens Axboe320ae512013-10-24 09:20:05 +01001737 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1738 if (q->mq_ops->exit_hctx)
1739 q->mq_ops->exit_hctx(hctx, i);
Jens Axboee4043dc2014-04-09 10:18:23 -06001740 free_cpumask_var(hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01001741 q->mq_ops->free_hctx(hctx, i);
1742 }
1743
1744 free_percpu(q->queue_ctx);
1745 kfree(q->queue_hw_ctx);
1746 kfree(q->mq_map);
1747
1748 q->queue_ctx = NULL;
1749 q->queue_hw_ctx = NULL;
1750 q->mq_map = NULL;
1751
1752 mutex_lock(&all_q_mutex);
1753 list_del_init(&q->all_q_node);
1754 mutex_unlock(&all_q_mutex);
1755}
Jens Axboe320ae512013-10-24 09:20:05 +01001756
1757/* Basically redo blk_mq_init_queue with queue frozen */
Paul Gortmakerf618ef72013-11-14 08:26:02 -07001758static void blk_mq_queue_reinit(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +01001759{
1760 blk_mq_freeze_queue(q);
1761
1762 blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1763
1764 /*
1765 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1766 * we should change hctx numa_node according to new topology (this
1767 * involves free and re-allocate memory, worthy doing?)
1768 */
1769
1770 blk_mq_map_swqueue(q);
1771
1772 blk_mq_unfreeze_queue(q);
1773}
1774
Paul Gortmakerf618ef72013-11-14 08:26:02 -07001775static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1776 unsigned long action, void *hcpu)
Jens Axboe320ae512013-10-24 09:20:05 +01001777{
1778 struct request_queue *q;
1779
1780 /*
Jens Axboe9fccfed2014-05-08 14:50:19 -06001781 * Before new mappings are established, hotadded cpu might already
1782 * start handling requests. This doesn't break anything as we map
1783 * offline CPUs to first hardware queue. We will re-init the queue
1784 * below to get optimal settings.
Jens Axboe320ae512013-10-24 09:20:05 +01001785 */
1786 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1787 action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1788 return NOTIFY_OK;
1789
1790 mutex_lock(&all_q_mutex);
1791 list_for_each_entry(q, &all_q_list, all_q_node)
1792 blk_mq_queue_reinit(q);
1793 mutex_unlock(&all_q_mutex);
1794 return NOTIFY_OK;
1795}
1796
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001797int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1798{
1799 int i;
1800
1801 if (!set->nr_hw_queues)
1802 return -EINVAL;
1803 if (!set->queue_depth || set->queue_depth > BLK_MQ_MAX_DEPTH)
1804 return -EINVAL;
1805 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
1806 return -EINVAL;
1807
1808 if (!set->nr_hw_queues ||
1809 !set->ops->queue_rq || !set->ops->map_queue ||
1810 !set->ops->alloc_hctx || !set->ops->free_hctx)
1811 return -EINVAL;
1812
1813
Ming Lei48479002014-04-19 18:00:17 +08001814 set->tags = kmalloc_node(set->nr_hw_queues *
1815 sizeof(struct blk_mq_tags *),
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001816 GFP_KERNEL, set->numa_node);
1817 if (!set->tags)
1818 goto out;
1819
1820 for (i = 0; i < set->nr_hw_queues; i++) {
1821 set->tags[i] = blk_mq_init_rq_map(set, i);
1822 if (!set->tags[i])
1823 goto out_unwind;
1824 }
1825
Jens Axboe0d2602c2014-05-13 15:10:52 -06001826 mutex_init(&set->tag_list_lock);
1827 INIT_LIST_HEAD(&set->tag_list);
1828
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001829 return 0;
1830
1831out_unwind:
1832 while (--i >= 0)
1833 blk_mq_free_rq_map(set, set->tags[i], i);
1834out:
1835 return -ENOMEM;
1836}
1837EXPORT_SYMBOL(blk_mq_alloc_tag_set);
1838
1839void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
1840{
1841 int i;
1842
Jens Axboe484b4062014-05-21 14:01:15 -06001843 for (i = 0; i < set->nr_hw_queues; i++) {
1844 if (set->tags[i])
1845 blk_mq_free_rq_map(set, set->tags[i], i);
1846 }
1847
Ming Lei981bd182014-04-24 00:07:34 +08001848 kfree(set->tags);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001849}
1850EXPORT_SYMBOL(blk_mq_free_tag_set);
1851
Jens Axboee3a2b3f2014-05-20 11:49:02 -06001852int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
1853{
1854 struct blk_mq_tag_set *set = q->tag_set;
1855 struct blk_mq_hw_ctx *hctx;
1856 int i, ret;
1857
1858 if (!set || nr > set->queue_depth)
1859 return -EINVAL;
1860
1861 ret = 0;
1862 queue_for_each_hw_ctx(q, hctx, i) {
1863 ret = blk_mq_tag_update_depth(hctx->tags, nr);
1864 if (ret)
1865 break;
1866 }
1867
1868 if (!ret)
1869 q->nr_requests = nr;
1870
1871 return ret;
1872}
1873
Jens Axboe676141e2014-03-20 13:29:18 -06001874void blk_mq_disable_hotplug(void)
1875{
1876 mutex_lock(&all_q_mutex);
1877}
1878
1879void blk_mq_enable_hotplug(void)
1880{
1881 mutex_unlock(&all_q_mutex);
1882}
1883
Jens Axboe320ae512013-10-24 09:20:05 +01001884static int __init blk_mq_init(void)
1885{
Jens Axboe320ae512013-10-24 09:20:05 +01001886 blk_mq_cpu_init();
1887
1888 /* Must be called after percpu_counter_hotcpu_callback() */
1889 hotcpu_notifier(blk_mq_queue_reinit_notify, -10);
1890
1891 return 0;
1892}
1893subsys_initcall(blk_mq_init);