blob: 159187a28d66521b4ab0109d3db38e6225ac71b3 [file] [log] [blame]
Jens Axboe75bb4622014-05-28 10:15:41 -06001/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
Jens Axboe320ae512013-10-24 09:20:05 +01007#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/backing-dev.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
Catalin Marinasf75782e2015-09-14 18:16:02 +010012#include <linux/kmemleak.h>
Jens Axboe320ae512013-10-24 09:20:05 +010013#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/workqueue.h>
17#include <linux/smp.h>
18#include <linux/llist.h>
19#include <linux/list_sort.h>
20#include <linux/cpu.h>
21#include <linux/cache.h>
22#include <linux/sched/sysctl.h>
Ingo Molnar105ab3d2017-02-01 16:36:40 +010023#include <linux/sched/topology.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
Jens Axboe320ae512013-10-24 09:20:05 +010025#include <linux/delay.h>
Jens Axboeaedcd722014-09-17 08:27:03 -060026#include <linux/crash_dump.h>
Jens Axboe88c7b2b2016-08-25 08:07:30 -060027#include <linux/prefetch.h>
Jens Axboe320ae512013-10-24 09:20:05 +010028
29#include <trace/events/block.h>
30
31#include <linux/blk-mq.h>
32#include "blk.h"
33#include "blk-mq.h"
34#include "blk-mq-tag.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -070035#include "blk-stat.h"
Jens Axboe87760e52016-11-09 12:38:14 -070036#include "blk-wbt.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070037#include "blk-mq-sched.h"
Jens Axboe320ae512013-10-24 09:20:05 +010038
39static DEFINE_MUTEX(all_q_mutex);
40static LIST_HEAD(all_q_list);
41
Jens Axboe320ae512013-10-24 09:20:05 +010042/*
43 * Check if any of the ctx's have pending work in this hardware queue
44 */
Jens Axboe50e1dab2017-01-26 14:42:34 -070045bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
Jens Axboe320ae512013-10-24 09:20:05 +010046{
Jens Axboebd166ef2017-01-17 06:03:22 -070047 return sbitmap_any_bit_set(&hctx->ctx_map) ||
48 !list_empty_careful(&hctx->dispatch) ||
49 blk_mq_sched_has_work(hctx);
Jens Axboe320ae512013-10-24 09:20:05 +010050}
51
52/*
53 * Mark this ctx as having pending work in this hardware queue
54 */
55static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
56 struct blk_mq_ctx *ctx)
57{
Omar Sandoval88459642016-09-17 08:38:44 -060058 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
59 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
Jens Axboe1429d7c2014-05-19 09:23:55 -060060}
61
62static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
63 struct blk_mq_ctx *ctx)
64{
Omar Sandoval88459642016-09-17 08:38:44 -060065 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
Jens Axboe320ae512013-10-24 09:20:05 +010066}
67
Keith Buschb4c6a022014-12-19 17:54:14 -070068void blk_mq_freeze_queue_start(struct request_queue *q)
Ming Lei43a5e4e2013-12-26 21:31:35 +080069{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +020070 int freeze_depth;
Tejun Heocddd5d12014-08-16 08:02:24 -040071
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +020072 freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
73 if (freeze_depth == 1) {
Dan Williams3ef28e82015-10-21 13:20:12 -040074 percpu_ref_kill(&q->q_usage_counter);
Mike Snitzerb94ec292015-03-11 23:56:38 -040075 blk_mq_run_hw_queues(q, false);
Tejun Heocddd5d12014-08-16 08:02:24 -040076 }
Tejun Heof3af0202014-11-04 13:52:27 -050077}
Keith Buschb4c6a022014-12-19 17:54:14 -070078EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
Tejun Heof3af0202014-11-04 13:52:27 -050079
Keith Busch6bae3632017-03-01 14:22:10 -050080void blk_mq_freeze_queue_wait(struct request_queue *q)
Tejun Heof3af0202014-11-04 13:52:27 -050081{
Dan Williams3ef28e82015-10-21 13:20:12 -040082 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
Ming Lei43a5e4e2013-12-26 21:31:35 +080083}
Keith Busch6bae3632017-03-01 14:22:10 -050084EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
Ming Lei43a5e4e2013-12-26 21:31:35 +080085
Keith Buschf91328c2017-03-01 14:22:11 -050086int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
87 unsigned long timeout)
88{
89 return wait_event_timeout(q->mq_freeze_wq,
90 percpu_ref_is_zero(&q->q_usage_counter),
91 timeout);
92}
93EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
Jens Axboe320ae512013-10-24 09:20:05 +010094
Tejun Heof3af0202014-11-04 13:52:27 -050095/*
96 * Guarantee no request is in use, so we can change any data structure of
97 * the queue afterward.
98 */
Dan Williams3ef28e82015-10-21 13:20:12 -040099void blk_freeze_queue(struct request_queue *q)
Tejun Heof3af0202014-11-04 13:52:27 -0500100{
Dan Williams3ef28e82015-10-21 13:20:12 -0400101 /*
102 * In the !blk_mq case we are only calling this to kill the
103 * q_usage_counter, otherwise this increases the freeze depth
104 * and waits for it to return to zero. For this reason there is
105 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
106 * exported to drivers as the only user for unfreeze is blk_mq.
107 */
Tejun Heof3af0202014-11-04 13:52:27 -0500108 blk_mq_freeze_queue_start(q);
109 blk_mq_freeze_queue_wait(q);
110}
Dan Williams3ef28e82015-10-21 13:20:12 -0400111
112void blk_mq_freeze_queue(struct request_queue *q)
113{
114 /*
115 * ...just an alias to keep freeze and unfreeze actions balanced
116 * in the blk_mq_* namespace
117 */
118 blk_freeze_queue(q);
119}
Jens Axboec761d962015-01-02 15:05:12 -0700120EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
Tejun Heof3af0202014-11-04 13:52:27 -0500121
Keith Buschb4c6a022014-12-19 17:54:14 -0700122void blk_mq_unfreeze_queue(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +0100123{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200124 int freeze_depth;
Jens Axboe320ae512013-10-24 09:20:05 +0100125
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200126 freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
127 WARN_ON_ONCE(freeze_depth < 0);
128 if (!freeze_depth) {
Dan Williams3ef28e82015-10-21 13:20:12 -0400129 percpu_ref_reinit(&q->q_usage_counter);
Jens Axboe320ae512013-10-24 09:20:05 +0100130 wake_up_all(&q->mq_freeze_wq);
Tejun Heoadd703f2014-07-01 10:34:38 -0600131 }
Jens Axboe320ae512013-10-24 09:20:05 +0100132}
Keith Buschb4c6a022014-12-19 17:54:14 -0700133EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
Jens Axboe320ae512013-10-24 09:20:05 +0100134
Bart Van Assche6a83e742016-11-02 10:09:51 -0600135/**
136 * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
137 * @q: request queue.
138 *
139 * Note: this function does not prevent that the struct request end_io()
140 * callback function is invoked. Additionally, it is not prevented that
141 * new queue_rq() calls occur unless the queue has been stopped first.
142 */
143void blk_mq_quiesce_queue(struct request_queue *q)
144{
145 struct blk_mq_hw_ctx *hctx;
146 unsigned int i;
147 bool rcu = false;
148
149 blk_mq_stop_hw_queues(q);
150
151 queue_for_each_hw_ctx(q, hctx, i) {
152 if (hctx->flags & BLK_MQ_F_BLOCKING)
153 synchronize_srcu(&hctx->queue_rq_srcu);
154 else
155 rcu = true;
156 }
157 if (rcu)
158 synchronize_rcu();
159}
160EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
161
Jens Axboeaed3ea92014-12-22 14:04:42 -0700162void blk_mq_wake_waiters(struct request_queue *q)
163{
164 struct blk_mq_hw_ctx *hctx;
165 unsigned int i;
166
167 queue_for_each_hw_ctx(q, hctx, i)
168 if (blk_mq_hw_queue_mapped(hctx))
169 blk_mq_tag_wakeup_all(hctx->tags, true);
Keith Busch3fd59402015-01-08 08:53:56 -0700170
171 /*
172 * If we are called because the queue has now been marked as
173 * dying, we need to ensure that processes currently waiting on
174 * the queue are notified as well.
175 */
176 wake_up_all(&q->mq_freeze_wq);
Jens Axboeaed3ea92014-12-22 14:04:42 -0700177}
178
Jens Axboe320ae512013-10-24 09:20:05 +0100179bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
180{
181 return blk_mq_has_free_tags(hctx->tags);
182}
183EXPORT_SYMBOL(blk_mq_can_queue);
184
Jens Axboe2c3ad662016-12-14 14:34:47 -0700185void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
186 struct request *rq, unsigned int op)
Jens Axboe320ae512013-10-24 09:20:05 +0100187{
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200188 INIT_LIST_HEAD(&rq->queuelist);
189 /* csd/requeue_work/fifo_time is initialized before use */
190 rq->q = q;
Jens Axboe320ae512013-10-24 09:20:05 +0100191 rq->mq_ctx = ctx;
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600192 rq->cmd_flags = op;
Christoph Hellwige8064022016-10-20 15:12:13 +0200193 if (blk_queue_io_stat(q))
194 rq->rq_flags |= RQF_IO_STAT;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200195 /* do not touch atomic flags, it needs atomic ops against the timer */
196 rq->cpu = -1;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200197 INIT_HLIST_NODE(&rq->hash);
198 RB_CLEAR_NODE(&rq->rb_node);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200199 rq->rq_disk = NULL;
200 rq->part = NULL;
Jens Axboe3ee32372014-06-09 09:36:53 -0600201 rq->start_time = jiffies;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200202#ifdef CONFIG_BLK_CGROUP
203 rq->rl = NULL;
Ming Lei0fec08b2014-01-03 10:00:08 -0700204 set_start_time_ns(rq);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200205 rq->io_start_time_ns = 0;
206#endif
207 rq->nr_phys_segments = 0;
208#if defined(CONFIG_BLK_DEV_INTEGRITY)
209 rq->nr_integrity_segments = 0;
210#endif
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200211 rq->special = NULL;
212 /* tag was already set */
213 rq->errors = 0;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200214 rq->extra_len = 0;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200215
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200216 INIT_LIST_HEAD(&rq->timeout_list);
Jens Axboef6be4fb2014-06-06 11:03:48 -0600217 rq->timeout = 0;
218
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200219 rq->end_io = NULL;
220 rq->end_io_data = NULL;
221 rq->next_rq = NULL;
222
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600223 ctx->rq_dispatched[op_is_sync(op)]++;
Jens Axboe320ae512013-10-24 09:20:05 +0100224}
Jens Axboe2c3ad662016-12-14 14:34:47 -0700225EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
Jens Axboe320ae512013-10-24 09:20:05 +0100226
Jens Axboe2c3ad662016-12-14 14:34:47 -0700227struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
228 unsigned int op)
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200229{
230 struct request *rq;
231 unsigned int tag;
232
Ming Leicb96a422014-06-01 00:43:37 +0800233 tag = blk_mq_get_tag(data);
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200234 if (tag != BLK_MQ_TAG_FAIL) {
Jens Axboebd166ef2017-01-17 06:03:22 -0700235 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
236
237 rq = tags->static_rqs[tag];
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200238
Jens Axboebd166ef2017-01-17 06:03:22 -0700239 if (data->flags & BLK_MQ_REQ_INTERNAL) {
240 rq->tag = -1;
241 rq->internal_tag = tag;
242 } else {
Jens Axboe200e86b2017-01-25 08:11:38 -0700243 if (blk_mq_tag_busy(data->hctx)) {
244 rq->rq_flags = RQF_MQ_INFLIGHT;
245 atomic_inc(&data->hctx->nr_active);
246 }
Jens Axboebd166ef2017-01-17 06:03:22 -0700247 rq->tag = tag;
248 rq->internal_tag = -1;
Omar Sandoval562bef42017-02-27 09:47:55 -0800249 data->hctx->tags->rqs[rq->tag] = rq;
Jens Axboebd166ef2017-01-17 06:03:22 -0700250 }
251
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600252 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200253 return rq;
254 }
255
256 return NULL;
257}
Jens Axboe2c3ad662016-12-14 14:34:47 -0700258EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200259
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100260struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
261 unsigned int flags)
Jens Axboe320ae512013-10-24 09:20:05 +0100262{
Jens Axboe5a797e02017-01-26 12:22:11 -0700263 struct blk_mq_alloc_data alloc_data = { .flags = flags };
Jens Axboebd166ef2017-01-17 06:03:22 -0700264 struct request *rq;
Joe Lawrencea492f072014-08-28 08:15:21 -0600265 int ret;
Jens Axboe320ae512013-10-24 09:20:05 +0100266
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100267 ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
Joe Lawrencea492f072014-08-28 08:15:21 -0600268 if (ret)
269 return ERR_PTR(ret);
Jens Axboe320ae512013-10-24 09:20:05 +0100270
Jens Axboebd166ef2017-01-17 06:03:22 -0700271 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
Jens Axboe841bac22016-09-21 10:08:43 -0600272
Jens Axboebd166ef2017-01-17 06:03:22 -0700273 blk_mq_put_ctx(alloc_data.ctx);
274 blk_queue_exit(q);
275
276 if (!rq)
Joe Lawrencea492f072014-08-28 08:15:21 -0600277 return ERR_PTR(-EWOULDBLOCK);
Christoph Hellwig0c4de0f2016-07-19 11:31:50 +0200278
279 rq->__data_len = 0;
280 rq->__sector = (sector_t) -1;
281 rq->bio = rq->biotail = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +0100282 return rq;
283}
Jens Axboe4bb659b2014-05-09 09:36:49 -0600284EXPORT_SYMBOL(blk_mq_alloc_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100285
Ming Lin1f5bd332016-06-13 16:45:21 +0200286struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
287 unsigned int flags, unsigned int hctx_idx)
288{
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800289 struct blk_mq_alloc_data alloc_data = { .flags = flags };
Ming Lin1f5bd332016-06-13 16:45:21 +0200290 struct request *rq;
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800291 unsigned int cpu;
Ming Lin1f5bd332016-06-13 16:45:21 +0200292 int ret;
293
294 /*
295 * If the tag allocator sleeps we could get an allocation for a
296 * different hardware context. No need to complicate the low level
297 * allocator for this for the rare use case of a command tied to
298 * a specific queue.
299 */
300 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
301 return ERR_PTR(-EINVAL);
302
303 if (hctx_idx >= q->nr_hw_queues)
304 return ERR_PTR(-EIO);
305
306 ret = blk_queue_enter(q, true);
307 if (ret)
308 return ERR_PTR(ret);
309
Christoph Hellwigc8712c62016-09-23 10:25:48 -0600310 /*
311 * Check if the hardware context is actually mapped to anything.
312 * If not tell the caller that it should skip this queue.
313 */
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800314 alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
315 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
316 blk_queue_exit(q);
317 return ERR_PTR(-EXDEV);
Christoph Hellwigc8712c62016-09-23 10:25:48 -0600318 }
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800319 cpu = cpumask_first(alloc_data.hctx->cpumask);
320 alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
Ming Lin1f5bd332016-06-13 16:45:21 +0200321
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800322 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
323
324 blk_mq_put_ctx(alloc_data.ctx);
325 blk_queue_exit(q);
326
327 if (!rq)
328 return ERR_PTR(-EWOULDBLOCK);
Ming Lin1f5bd332016-06-13 16:45:21 +0200329
330 return rq;
331}
332EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
333
Jens Axboebd166ef2017-01-17 06:03:22 -0700334void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
335 struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100336{
Jens Axboebd166ef2017-01-17 06:03:22 -0700337 const int sched_tag = rq->internal_tag;
Jens Axboe320ae512013-10-24 09:20:05 +0100338 struct request_queue *q = rq->q;
339
Christoph Hellwige8064022016-10-20 15:12:13 +0200340 if (rq->rq_flags & RQF_MQ_INFLIGHT)
Jens Axboe0d2602c2014-05-13 15:10:52 -0600341 atomic_dec(&hctx->nr_active);
Jens Axboe87760e52016-11-09 12:38:14 -0700342
343 wbt_done(q->rq_wb, &rq->issue_stat);
Christoph Hellwige8064022016-10-20 15:12:13 +0200344 rq->rq_flags = 0;
Jens Axboe0d2602c2014-05-13 15:10:52 -0600345
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200346 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
Jens Axboe06426ad2016-11-14 13:01:59 -0700347 clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
Jens Axboebd166ef2017-01-17 06:03:22 -0700348 if (rq->tag != -1)
349 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
350 if (sched_tag != -1)
351 blk_mq_sched_completed_request(hctx, rq);
Jens Axboe50e1dab2017-01-26 14:42:34 -0700352 blk_mq_sched_restart_queues(hctx);
Dan Williams3ef28e82015-10-21 13:20:12 -0400353 blk_queue_exit(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100354}
355
Jens Axboebd166ef2017-01-17 06:03:22 -0700356static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx,
Jens Axboe16a3c2a2016-12-15 14:27:46 -0700357 struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100358{
359 struct blk_mq_ctx *ctx = rq->mq_ctx;
Jens Axboe7c7f2f22014-11-17 10:41:57 -0700360
361 ctx->rq_completed[rq_is_sync(rq)]++;
Jens Axboebd166ef2017-01-17 06:03:22 -0700362 __blk_mq_finish_request(hctx, ctx, rq);
363}
364
365void blk_mq_finish_request(struct request *rq)
366{
367 blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
Jens Axboe7c7f2f22014-11-17 10:41:57 -0700368}
Jens Axboe7c7f2f22014-11-17 10:41:57 -0700369
370void blk_mq_free_request(struct request *rq)
371{
Jens Axboebd166ef2017-01-17 06:03:22 -0700372 blk_mq_sched_put_request(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100373}
Jens Axboe1a3b5952014-11-17 10:40:48 -0700374EXPORT_SYMBOL_GPL(blk_mq_free_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100375
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700376inline void __blk_mq_end_request(struct request *rq, int error)
Jens Axboe320ae512013-10-24 09:20:05 +0100377{
Ming Lei0d11e6a2013-12-05 10:50:39 -0700378 blk_account_io_done(rq);
379
Christoph Hellwig91b63632014-04-16 09:44:53 +0200380 if (rq->end_io) {
Jens Axboe87760e52016-11-09 12:38:14 -0700381 wbt_done(rq->q->rq_wb, &rq->issue_stat);
Jens Axboe320ae512013-10-24 09:20:05 +0100382 rq->end_io(rq, error);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200383 } else {
384 if (unlikely(blk_bidi_rq(rq)))
385 blk_mq_free_request(rq->next_rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100386 blk_mq_free_request(rq);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200387 }
Jens Axboe320ae512013-10-24 09:20:05 +0100388}
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700389EXPORT_SYMBOL(__blk_mq_end_request);
Christoph Hellwig63151a42014-04-16 09:44:52 +0200390
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700391void blk_mq_end_request(struct request *rq, int error)
Christoph Hellwig63151a42014-04-16 09:44:52 +0200392{
393 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
394 BUG();
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700395 __blk_mq_end_request(rq, error);
Christoph Hellwig63151a42014-04-16 09:44:52 +0200396}
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700397EXPORT_SYMBOL(blk_mq_end_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100398
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800399static void __blk_mq_complete_request_remote(void *data)
Jens Axboe320ae512013-10-24 09:20:05 +0100400{
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800401 struct request *rq = data;
Jens Axboe320ae512013-10-24 09:20:05 +0100402
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800403 rq->q->softirq_done_fn(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100404}
405
Jens Axboeed851862014-05-30 21:20:50 -0600406static void blk_mq_ipi_complete_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100407{
408 struct blk_mq_ctx *ctx = rq->mq_ctx;
Christoph Hellwig38535202014-04-25 02:32:53 -0700409 bool shared = false;
Jens Axboe320ae512013-10-24 09:20:05 +0100410 int cpu;
411
Christoph Hellwig38535202014-04-25 02:32:53 -0700412 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800413 rq->q->softirq_done_fn(rq);
414 return;
415 }
Jens Axboe320ae512013-10-24 09:20:05 +0100416
417 cpu = get_cpu();
Christoph Hellwig38535202014-04-25 02:32:53 -0700418 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
419 shared = cpus_share_cache(cpu, ctx->cpu);
420
421 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800422 rq->csd.func = __blk_mq_complete_request_remote;
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800423 rq->csd.info = rq;
424 rq->csd.flags = 0;
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +0100425 smp_call_function_single_async(ctx->cpu, &rq->csd);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800426 } else {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800427 rq->q->softirq_done_fn(rq);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800428 }
Jens Axboe320ae512013-10-24 09:20:05 +0100429 put_cpu();
430}
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800431
Jens Axboecf43e6b2016-11-07 21:32:37 -0700432static void blk_mq_stat_add(struct request *rq)
433{
434 if (rq->rq_flags & RQF_STATS) {
435 /*
436 * We could rq->mq_ctx here, but there's less of a risk
437 * of races if we have the completion event add the stats
438 * to the local software queue.
439 */
440 struct blk_mq_ctx *ctx;
441
442 ctx = __blk_mq_get_ctx(rq->q, raw_smp_processor_id());
443 blk_stat_add(&ctx->stat[rq_data_dir(rq)], rq);
444 }
445}
446
Jens Axboe1fa8cc52015-11-05 14:32:55 -0700447static void __blk_mq_complete_request(struct request *rq)
Jens Axboeed851862014-05-30 21:20:50 -0600448{
449 struct request_queue *q = rq->q;
450
Jens Axboecf43e6b2016-11-07 21:32:37 -0700451 blk_mq_stat_add(rq);
452
Jens Axboeed851862014-05-30 21:20:50 -0600453 if (!q->softirq_done_fn)
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700454 blk_mq_end_request(rq, rq->errors);
Jens Axboeed851862014-05-30 21:20:50 -0600455 else
456 blk_mq_ipi_complete_request(rq);
457}
458
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800459/**
460 * blk_mq_complete_request - end I/O on a request
461 * @rq: the request being processed
462 *
463 * Description:
464 * Ends all I/O on a request. It does not handle partial completions.
465 * The actual completion happens out-of-order, through a IPI handler.
466 **/
Christoph Hellwigf4829a92015-09-27 21:01:50 +0200467void blk_mq_complete_request(struct request *rq, int error)
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800468{
Jens Axboe95f09682014-05-27 17:46:48 -0600469 struct request_queue *q = rq->q;
470
471 if (unlikely(blk_should_fake_timeout(q)))
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800472 return;
Christoph Hellwigf4829a92015-09-27 21:01:50 +0200473 if (!blk_mark_rq_complete(rq)) {
474 rq->errors = error;
Jens Axboeed851862014-05-30 21:20:50 -0600475 __blk_mq_complete_request(rq);
Christoph Hellwigf4829a92015-09-27 21:01:50 +0200476 }
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800477}
478EXPORT_SYMBOL(blk_mq_complete_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100479
Keith Busch973c0192015-01-07 18:55:43 -0700480int blk_mq_request_started(struct request *rq)
481{
482 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
483}
484EXPORT_SYMBOL_GPL(blk_mq_request_started);
485
Christoph Hellwige2490072014-09-13 16:40:09 -0700486void blk_mq_start_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100487{
488 struct request_queue *q = rq->q;
489
Jens Axboebd166ef2017-01-17 06:03:22 -0700490 blk_mq_sched_started_request(rq);
491
Jens Axboe320ae512013-10-24 09:20:05 +0100492 trace_block_rq_issue(q, rq);
493
Jens Axboecf43e6b2016-11-07 21:32:37 -0700494 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
495 blk_stat_set_issue_time(&rq->issue_stat);
496 rq->rq_flags |= RQF_STATS;
Jens Axboe87760e52016-11-09 12:38:14 -0700497 wbt_issue(q->rq_wb, &rq->issue_stat);
Jens Axboecf43e6b2016-11-07 21:32:37 -0700498 }
499
Ming Lei2b8393b2014-06-10 00:16:41 +0800500 blk_add_timer(rq);
Jens Axboe87ee7b12014-04-24 08:51:47 -0600501
502 /*
Jens Axboe538b7532014-09-16 10:37:37 -0600503 * Ensure that ->deadline is visible before set the started
504 * flag and clear the completed flag.
505 */
506 smp_mb__before_atomic();
507
508 /*
Jens Axboe87ee7b12014-04-24 08:51:47 -0600509 * Mark us as started and clear complete. Complete might have been
510 * set if requeue raced with timeout, which then marked it as
511 * complete. So be sure to clear complete again when we start
512 * the request, otherwise we'll ignore the completion event.
513 */
Jens Axboe4b570522014-05-29 11:00:11 -0600514 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
515 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
516 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
517 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800518
519 if (q->dma_drain_size && blk_rq_bytes(rq)) {
520 /*
521 * Make sure space for the drain appears. We know we can do
522 * this because max_hw_segments has been adjusted to be one
523 * fewer than the device can handle.
524 */
525 rq->nr_phys_segments++;
526 }
Jens Axboe320ae512013-10-24 09:20:05 +0100527}
Christoph Hellwige2490072014-09-13 16:40:09 -0700528EXPORT_SYMBOL(blk_mq_start_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100529
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200530static void __blk_mq_requeue_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100531{
532 struct request_queue *q = rq->q;
533
534 trace_block_rq_requeue(q, rq);
Jens Axboe87760e52016-11-09 12:38:14 -0700535 wbt_requeue(q->rq_wb, &rq->issue_stat);
Jens Axboebd166ef2017-01-17 06:03:22 -0700536 blk_mq_sched_requeue_request(rq);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800537
Christoph Hellwige2490072014-09-13 16:40:09 -0700538 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
539 if (q->dma_drain_size && blk_rq_bytes(rq))
540 rq->nr_phys_segments--;
541 }
Jens Axboe320ae512013-10-24 09:20:05 +0100542}
543
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700544void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200545{
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200546 __blk_mq_requeue_request(rq);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200547
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200548 BUG_ON(blk_queued_rq(rq));
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700549 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200550}
551EXPORT_SYMBOL(blk_mq_requeue_request);
552
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600553static void blk_mq_requeue_work(struct work_struct *work)
554{
555 struct request_queue *q =
Mike Snitzer28494502016-09-14 13:28:30 -0400556 container_of(work, struct request_queue, requeue_work.work);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600557 LIST_HEAD(rq_list);
558 struct request *rq, *next;
559 unsigned long flags;
560
561 spin_lock_irqsave(&q->requeue_lock, flags);
562 list_splice_init(&q->requeue_list, &rq_list);
563 spin_unlock_irqrestore(&q->requeue_lock, flags);
564
565 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
Christoph Hellwige8064022016-10-20 15:12:13 +0200566 if (!(rq->rq_flags & RQF_SOFTBARRIER))
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600567 continue;
568
Christoph Hellwige8064022016-10-20 15:12:13 +0200569 rq->rq_flags &= ~RQF_SOFTBARRIER;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600570 list_del_init(&rq->queuelist);
Jens Axboebd6737f2017-01-27 01:00:47 -0700571 blk_mq_sched_insert_request(rq, true, false, false, true);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600572 }
573
574 while (!list_empty(&rq_list)) {
575 rq = list_entry(rq_list.next, struct request, queuelist);
576 list_del_init(&rq->queuelist);
Jens Axboebd6737f2017-01-27 01:00:47 -0700577 blk_mq_sched_insert_request(rq, false, false, false, true);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600578 }
579
Bart Van Assche52d7f1b2016-10-28 17:20:32 -0700580 blk_mq_run_hw_queues(q, false);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600581}
582
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700583void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
584 bool kick_requeue_list)
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600585{
586 struct request_queue *q = rq->q;
587 unsigned long flags;
588
589 /*
590 * We abuse this flag that is otherwise used by the I/O scheduler to
591 * request head insertation from the workqueue.
592 */
Christoph Hellwige8064022016-10-20 15:12:13 +0200593 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600594
595 spin_lock_irqsave(&q->requeue_lock, flags);
596 if (at_head) {
Christoph Hellwige8064022016-10-20 15:12:13 +0200597 rq->rq_flags |= RQF_SOFTBARRIER;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600598 list_add(&rq->queuelist, &q->requeue_list);
599 } else {
600 list_add_tail(&rq->queuelist, &q->requeue_list);
601 }
602 spin_unlock_irqrestore(&q->requeue_lock, flags);
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700603
604 if (kick_requeue_list)
605 blk_mq_kick_requeue_list(q);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600606}
607EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
608
609void blk_mq_kick_requeue_list(struct request_queue *q)
610{
Mike Snitzer28494502016-09-14 13:28:30 -0400611 kblockd_schedule_delayed_work(&q->requeue_work, 0);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600612}
613EXPORT_SYMBOL(blk_mq_kick_requeue_list);
614
Mike Snitzer28494502016-09-14 13:28:30 -0400615void blk_mq_delay_kick_requeue_list(struct request_queue *q,
616 unsigned long msecs)
617{
618 kblockd_schedule_delayed_work(&q->requeue_work,
619 msecs_to_jiffies(msecs));
620}
621EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
622
Jens Axboe1885b242015-01-07 18:55:45 -0700623void blk_mq_abort_requeue_list(struct request_queue *q)
624{
625 unsigned long flags;
626 LIST_HEAD(rq_list);
627
628 spin_lock_irqsave(&q->requeue_lock, flags);
629 list_splice_init(&q->requeue_list, &rq_list);
630 spin_unlock_irqrestore(&q->requeue_lock, flags);
631
632 while (!list_empty(&rq_list)) {
633 struct request *rq;
634
635 rq = list_first_entry(&rq_list, struct request, queuelist);
636 list_del_init(&rq->queuelist);
637 rq->errors = -EIO;
638 blk_mq_end_request(rq, rq->errors);
639 }
640}
641EXPORT_SYMBOL(blk_mq_abort_requeue_list);
642
Jens Axboe0e62f512014-06-04 10:23:49 -0600643struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
644{
Jens Axboe88c7b2b2016-08-25 08:07:30 -0600645 if (tag < tags->nr_tags) {
646 prefetch(tags->rqs[tag]);
Hannes Reinecke4ee86ba2016-03-15 12:03:28 -0700647 return tags->rqs[tag];
Jens Axboe88c7b2b2016-08-25 08:07:30 -0600648 }
Hannes Reinecke4ee86ba2016-03-15 12:03:28 -0700649
650 return NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600651}
652EXPORT_SYMBOL(blk_mq_tag_to_rq);
653
Jens Axboe320ae512013-10-24 09:20:05 +0100654struct blk_mq_timeout_data {
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700655 unsigned long next;
656 unsigned int next_set;
Jens Axboe320ae512013-10-24 09:20:05 +0100657};
658
Christoph Hellwig90415832014-09-22 10:21:48 -0600659void blk_mq_rq_timed_out(struct request *req, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100660{
Jens Axboef8a5b122016-12-13 09:24:51 -0700661 const struct blk_mq_ops *ops = req->q->mq_ops;
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700662 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
Jens Axboe87ee7b12014-04-24 08:51:47 -0600663
664 /*
665 * We know that complete is set at this point. If STARTED isn't set
666 * anymore, then the request isn't active and the "timeout" should
667 * just be ignored. This can happen due to the bitflag ordering.
668 * Timeout first checks if STARTED is set, and if it is, assumes
669 * the request is active. But if we race with completion, then
670 * we both flags will get cleared. So check here again, and ignore
671 * a timeout event with a request that isn't active.
672 */
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700673 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
674 return;
Jens Axboe87ee7b12014-04-24 08:51:47 -0600675
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700676 if (ops->timeout)
Christoph Hellwig0152fb62014-09-13 16:40:13 -0700677 ret = ops->timeout(req, reserved);
Jens Axboe87ee7b12014-04-24 08:51:47 -0600678
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700679 switch (ret) {
680 case BLK_EH_HANDLED:
681 __blk_mq_complete_request(req);
682 break;
683 case BLK_EH_RESET_TIMER:
684 blk_add_timer(req);
685 blk_clear_rq_complete(req);
686 break;
687 case BLK_EH_NOT_HANDLED:
688 break;
689 default:
690 printk(KERN_ERR "block: bad eh return: %d\n", ret);
691 break;
692 }
Jens Axboe87ee7b12014-04-24 08:51:47 -0600693}
Keith Busch5b3f25f2015-01-07 18:55:46 -0700694
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700695static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
696 struct request *rq, void *priv, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100697{
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700698 struct blk_mq_timeout_data *data = priv;
699
Keith Buscheb130db2015-01-08 08:59:53 -0700700 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
701 /*
702 * If a request wasn't started before the queue was
703 * marked dying, kill it here or it'll go unnoticed.
704 */
Keith Buscha59e0f52016-02-11 13:05:38 -0700705 if (unlikely(blk_queue_dying(rq->q))) {
706 rq->errors = -EIO;
707 blk_mq_end_request(rq, rq->errors);
708 }
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700709 return;
Keith Buscheb130db2015-01-08 08:59:53 -0700710 }
Jens Axboe320ae512013-10-24 09:20:05 +0100711
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700712 if (time_after_eq(jiffies, rq->deadline)) {
713 if (!blk_mark_rq_complete(rq))
Christoph Hellwig0152fb62014-09-13 16:40:13 -0700714 blk_mq_rq_timed_out(rq, reserved);
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700715 } else if (!data->next_set || time_after(data->next, rq->deadline)) {
716 data->next = rq->deadline;
717 data->next_set = 1;
718 }
Jens Axboe320ae512013-10-24 09:20:05 +0100719}
720
Christoph Hellwig287922eb2015-10-30 20:57:30 +0800721static void blk_mq_timeout_work(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +0100722{
Christoph Hellwig287922eb2015-10-30 20:57:30 +0800723 struct request_queue *q =
724 container_of(work, struct request_queue, timeout_work);
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700725 struct blk_mq_timeout_data data = {
726 .next = 0,
727 .next_set = 0,
728 };
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700729 int i;
Jens Axboe320ae512013-10-24 09:20:05 +0100730
Gabriel Krisman Bertazi71f79fb2016-08-01 08:23:39 -0600731 /* A deadlock might occur if a request is stuck requiring a
732 * timeout at the same time a queue freeze is waiting
733 * completion, since the timeout code would not be able to
734 * acquire the queue reference here.
735 *
736 * That's why we don't use blk_queue_enter here; instead, we use
737 * percpu_ref_tryget directly, because we need to be able to
738 * obtain a reference even in the short window between the queue
739 * starting to freeze, by dropping the first reference in
740 * blk_mq_freeze_queue_start, and the moment the last request is
741 * consumed, marked by the instant q_usage_counter reaches
742 * zero.
743 */
744 if (!percpu_ref_tryget(&q->q_usage_counter))
Christoph Hellwig287922eb2015-10-30 20:57:30 +0800745 return;
746
Christoph Hellwig0bf6cd52015-09-27 21:01:51 +0200747 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
Jens Axboe320ae512013-10-24 09:20:05 +0100748
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700749 if (data.next_set) {
750 data.next = blk_rq_timeout(round_jiffies_up(data.next));
751 mod_timer(&q->timeout, data.next);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600752 } else {
Christoph Hellwig0bf6cd52015-09-27 21:01:51 +0200753 struct blk_mq_hw_ctx *hctx;
754
Ming Leif054b562015-04-21 10:00:19 +0800755 queue_for_each_hw_ctx(q, hctx, i) {
756 /* the hctx may be unmapped, so check it here */
757 if (blk_mq_hw_queue_mapped(hctx))
758 blk_mq_tag_idle(hctx);
759 }
Jens Axboe0d2602c2014-05-13 15:10:52 -0600760 }
Christoph Hellwig287922eb2015-10-30 20:57:30 +0800761 blk_queue_exit(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100762}
763
764/*
765 * Reverse check our software queue for entries that we could potentially
766 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
767 * too much time checking for merges.
768 */
769static bool blk_mq_attempt_merge(struct request_queue *q,
770 struct blk_mq_ctx *ctx, struct bio *bio)
771{
772 struct request *rq;
773 int checked = 8;
774
775 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100776 bool merged = false;
Jens Axboe320ae512013-10-24 09:20:05 +0100777
778 if (!checked--)
779 break;
780
781 if (!blk_rq_merge_ok(rq, bio))
782 continue;
783
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100784 switch (blk_try_merge(rq, bio)) {
785 case ELEVATOR_BACK_MERGE:
786 if (blk_mq_sched_allow_merge(q, rq, bio))
787 merged = bio_attempt_back_merge(q, rq, bio);
788 break;
789 case ELEVATOR_FRONT_MERGE:
790 if (blk_mq_sched_allow_merge(q, rq, bio))
791 merged = bio_attempt_front_merge(q, rq, bio);
792 break;
Christoph Hellwig1e739732017-02-08 14:46:49 +0100793 case ELEVATOR_DISCARD_MERGE:
794 merged = bio_attempt_discard_merge(q, rq, bio);
795 break;
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100796 default:
Jens Axboebd166ef2017-01-17 06:03:22 -0700797 continue;
Jens Axboe320ae512013-10-24 09:20:05 +0100798 }
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100799
800 if (merged)
801 ctx->rq_merged++;
802 return merged;
Jens Axboe320ae512013-10-24 09:20:05 +0100803 }
804
805 return false;
806}
807
Omar Sandoval88459642016-09-17 08:38:44 -0600808struct flush_busy_ctx_data {
809 struct blk_mq_hw_ctx *hctx;
810 struct list_head *list;
811};
812
813static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
814{
815 struct flush_busy_ctx_data *flush_data = data;
816 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
817 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
818
819 sbitmap_clear_bit(sb, bitnr);
820 spin_lock(&ctx->lock);
821 list_splice_tail_init(&ctx->rq_list, flush_data->list);
822 spin_unlock(&ctx->lock);
823 return true;
824}
825
Jens Axboe320ae512013-10-24 09:20:05 +0100826/*
Jens Axboe1429d7c2014-05-19 09:23:55 -0600827 * Process software queues that have been marked busy, splicing them
828 * to the for-dispatch
829 */
Jens Axboe2c3ad662016-12-14 14:34:47 -0700830void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
Jens Axboe1429d7c2014-05-19 09:23:55 -0600831{
Omar Sandoval88459642016-09-17 08:38:44 -0600832 struct flush_busy_ctx_data data = {
833 .hctx = hctx,
834 .list = list,
835 };
Jens Axboe1429d7c2014-05-19 09:23:55 -0600836
Omar Sandoval88459642016-09-17 08:38:44 -0600837 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600838}
Jens Axboe2c3ad662016-12-14 14:34:47 -0700839EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600840
Jens Axboe703fd1c2016-09-16 13:59:14 -0600841static inline unsigned int queued_to_index(unsigned int queued)
842{
843 if (!queued)
844 return 0;
Jens Axboe1429d7c2014-05-19 09:23:55 -0600845
Jens Axboe703fd1c2016-09-16 13:59:14 -0600846 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600847}
848
Jens Axboebd6737f2017-01-27 01:00:47 -0700849bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
850 bool wait)
Jens Axboebd166ef2017-01-17 06:03:22 -0700851{
852 struct blk_mq_alloc_data data = {
853 .q = rq->q,
Jens Axboebd166ef2017-01-17 06:03:22 -0700854 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
855 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
856 };
857
Jens Axboebd166ef2017-01-17 06:03:22 -0700858 if (rq->tag != -1) {
859done:
860 if (hctx)
861 *hctx = data.hctx;
862 return true;
863 }
864
Sagi Grimberg415b8062017-02-27 10:04:39 -0700865 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
866 data.flags |= BLK_MQ_REQ_RESERVED;
867
Jens Axboebd166ef2017-01-17 06:03:22 -0700868 rq->tag = blk_mq_get_tag(&data);
869 if (rq->tag >= 0) {
Jens Axboe200e86b2017-01-25 08:11:38 -0700870 if (blk_mq_tag_busy(data.hctx)) {
871 rq->rq_flags |= RQF_MQ_INFLIGHT;
872 atomic_inc(&data.hctx->nr_active);
873 }
Jens Axboebd166ef2017-01-17 06:03:22 -0700874 data.hctx->tags->rqs[rq->tag] = rq;
875 goto done;
876 }
877
878 return false;
879}
880
Jens Axboe113285b2017-03-02 13:26:04 -0700881static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
882 struct request *rq)
Jens Axboe99cf1dc2017-01-26 12:32:32 -0700883{
Jens Axboe99cf1dc2017-01-26 12:32:32 -0700884 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
885 rq->tag = -1;
886
887 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
888 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
889 atomic_dec(&hctx->nr_active);
890 }
891}
892
Jens Axboe113285b2017-03-02 13:26:04 -0700893static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
894 struct request *rq)
895{
896 if (rq->tag == -1 || rq->internal_tag == -1)
897 return;
898
899 __blk_mq_put_driver_tag(hctx, rq);
900}
901
902static void blk_mq_put_driver_tag(struct request *rq)
903{
904 struct blk_mq_hw_ctx *hctx;
905
906 if (rq->tag == -1 || rq->internal_tag == -1)
907 return;
908
909 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
910 __blk_mq_put_driver_tag(hctx, rq);
911}
912
Jens Axboebd166ef2017-01-17 06:03:22 -0700913/*
914 * If we fail getting a driver tag because all the driver tags are already
915 * assigned and on the dispatch list, BUT the first entry does not have a
916 * tag, then we could deadlock. For that case, move entries with assigned
917 * driver tags to the front, leaving the set of tagged requests in the
918 * same order, and the untagged set in the same order.
919 */
920static bool reorder_tags_to_front(struct list_head *list)
921{
922 struct request *rq, *tmp, *first = NULL;
923
924 list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
925 if (rq == first)
926 break;
927 if (rq->tag != -1) {
928 list_move(&rq->queuelist, list);
929 if (!first)
930 first = rq;
931 }
932 }
933
934 return first != NULL;
935}
936
Omar Sandovalda55f2c2017-02-22 10:58:29 -0800937static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags,
938 void *key)
939{
940 struct blk_mq_hw_ctx *hctx;
941
942 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
943
944 list_del(&wait->task_list);
945 clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
946 blk_mq_run_hw_queue(hctx, true);
947 return 1;
948}
949
950static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
951{
952 struct sbq_wait_state *ws;
953
954 /*
955 * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
956 * The thread which wins the race to grab this bit adds the hardware
957 * queue to the wait queue.
958 */
959 if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
960 test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
961 return false;
962
963 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
964 ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
965
966 /*
967 * As soon as this returns, it's no longer safe to fiddle with
968 * hctx->dispatch_wait, since a completion can wake up the wait queue
969 * and unlock the bit.
970 */
971 add_wait_queue(&ws->wait, &hctx->dispatch_wait);
972 return true;
973}
974
Jens Axboef04c3df2016-12-07 08:41:17 -0700975bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
976{
977 struct request_queue *q = hctx->queue;
978 struct request *rq;
979 LIST_HEAD(driver_list);
980 struct list_head *dptr;
981 int queued, ret = BLK_MQ_RQ_QUEUE_OK;
982
983 /*
984 * Start off with dptr being NULL, so we start the first request
985 * immediately, even if we have more pending.
986 */
987 dptr = NULL;
988
989 /*
990 * Now process all the entries, sending them to the driver.
991 */
992 queued = 0;
993 while (!list_empty(list)) {
994 struct blk_mq_queue_data bd;
995
996 rq = list_first_entry(list, struct request, queuelist);
Jens Axboebd166ef2017-01-17 06:03:22 -0700997 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
998 if (!queued && reorder_tags_to_front(list))
999 continue;
Jens Axboe3c782d62017-01-26 12:50:36 -07001000
1001 /*
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001002 * The initial allocation attempt failed, so we need to
1003 * rerun the hardware queue when a tag is freed.
Jens Axboe3c782d62017-01-26 12:50:36 -07001004 */
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001005 if (blk_mq_dispatch_wait_add(hctx)) {
1006 /*
1007 * It's possible that a tag was freed in the
1008 * window between the allocation failure and
1009 * adding the hardware queue to the wait queue.
1010 */
1011 if (!blk_mq_get_driver_tag(rq, &hctx, false))
1012 break;
1013 } else {
Jens Axboe3c782d62017-01-26 12:50:36 -07001014 break;
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001015 }
Jens Axboebd166ef2017-01-17 06:03:22 -07001016 }
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001017
Jens Axboef04c3df2016-12-07 08:41:17 -07001018 list_del_init(&rq->queuelist);
1019
1020 bd.rq = rq;
1021 bd.list = dptr;
Jens Axboe113285b2017-03-02 13:26:04 -07001022
1023 /*
1024 * Flag last if we have no more requests, or if we have more
1025 * but can't assign a driver tag to it.
1026 */
1027 if (list_empty(list))
1028 bd.last = true;
1029 else {
1030 struct request *nxt;
1031
1032 nxt = list_first_entry(list, struct request, queuelist);
1033 bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1034 }
Jens Axboef04c3df2016-12-07 08:41:17 -07001035
1036 ret = q->mq_ops->queue_rq(hctx, &bd);
1037 switch (ret) {
1038 case BLK_MQ_RQ_QUEUE_OK:
1039 queued++;
1040 break;
1041 case BLK_MQ_RQ_QUEUE_BUSY:
Jens Axboe113285b2017-03-02 13:26:04 -07001042 blk_mq_put_driver_tag_hctx(hctx, rq);
Jens Axboef04c3df2016-12-07 08:41:17 -07001043 list_add(&rq->queuelist, list);
1044 __blk_mq_requeue_request(rq);
1045 break;
1046 default:
1047 pr_err("blk-mq: bad return on queue: %d\n", ret);
1048 case BLK_MQ_RQ_QUEUE_ERROR:
1049 rq->errors = -EIO;
1050 blk_mq_end_request(rq, rq->errors);
1051 break;
1052 }
1053
1054 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
1055 break;
1056
1057 /*
1058 * We've done the first request. If we have more than 1
1059 * left in the list, set dptr to defer issue.
1060 */
1061 if (!dptr && list->next != list->prev)
1062 dptr = &driver_list;
1063 }
1064
1065 hctx->dispatched[queued_to_index(queued)]++;
1066
1067 /*
1068 * Any items that need requeuing? Stuff them into hctx->dispatch,
1069 * that is where we will continue on next queue run.
1070 */
1071 if (!list_empty(list)) {
Jens Axboe113285b2017-03-02 13:26:04 -07001072 /*
1073 * If we got a driver tag for the next request already,
1074 * free it again.
1075 */
1076 rq = list_first_entry(list, struct request, queuelist);
1077 blk_mq_put_driver_tag(rq);
1078
Jens Axboef04c3df2016-12-07 08:41:17 -07001079 spin_lock(&hctx->lock);
Jens Axboec13660a2017-01-26 12:40:07 -07001080 list_splice_init(list, &hctx->dispatch);
Jens Axboef04c3df2016-12-07 08:41:17 -07001081 spin_unlock(&hctx->lock);
1082
1083 /*
1084 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
1085 * it's possible the queue is stopped and restarted again
1086 * before this. Queue restart will dispatch requests. And since
1087 * requests in rq_list aren't added into hctx->dispatch yet,
1088 * the requests in rq_list might get lost.
1089 *
1090 * blk_mq_run_hw_queue() already checks the STOPPED bit
Jens Axboebd166ef2017-01-17 06:03:22 -07001091 *
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001092 * If RESTART or TAG_WAITING is set, then let completion restart
1093 * the queue instead of potentially looping here.
Jens Axboebd166ef2017-01-17 06:03:22 -07001094 */
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001095 if (!blk_mq_sched_needs_restart(hctx) &&
1096 !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
Jens Axboebd166ef2017-01-17 06:03:22 -07001097 blk_mq_run_hw_queue(hctx, true);
Jens Axboef04c3df2016-12-07 08:41:17 -07001098 }
1099
Jens Axboe2aa0f212017-02-17 11:35:35 -07001100 return queued != 0;
Jens Axboef04c3df2016-12-07 08:41:17 -07001101}
1102
Bart Van Assche6a83e742016-11-02 10:09:51 -06001103static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1104{
1105 int srcu_idx;
1106
1107 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1108 cpu_online(hctx->next_cpu));
1109
1110 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1111 rcu_read_lock();
Jens Axboebd166ef2017-01-17 06:03:22 -07001112 blk_mq_sched_dispatch_requests(hctx);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001113 rcu_read_unlock();
1114 } else {
1115 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
Jens Axboebd166ef2017-01-17 06:03:22 -07001116 blk_mq_sched_dispatch_requests(hctx);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001117 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
1118 }
1119}
1120
Jens Axboe506e9312014-05-07 10:26:44 -06001121/*
1122 * It'd be great if the workqueue API had a way to pass
1123 * in a mask and had some smarts for more clever placement.
1124 * For now we just round-robin here, switching for every
1125 * BLK_MQ_CPU_WORK_BATCH queued items.
1126 */
1127static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1128{
Christoph Hellwigb657d7e2014-11-24 09:27:23 +01001129 if (hctx->queue->nr_hw_queues == 1)
1130 return WORK_CPU_UNBOUND;
Jens Axboe506e9312014-05-07 10:26:44 -06001131
1132 if (--hctx->next_cpu_batch <= 0) {
Gabriel Krisman Bertazic02ebfd2016-09-28 00:24:24 -03001133 int next_cpu;
Jens Axboe506e9312014-05-07 10:26:44 -06001134
1135 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1136 if (next_cpu >= nr_cpu_ids)
1137 next_cpu = cpumask_first(hctx->cpumask);
1138
1139 hctx->next_cpu = next_cpu;
1140 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1141 }
1142
Christoph Hellwigb657d7e2014-11-24 09:27:23 +01001143 return hctx->next_cpu;
Jens Axboe506e9312014-05-07 10:26:44 -06001144}
1145
Jens Axboe320ae512013-10-24 09:20:05 +01001146void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1147{
Bart Van Assche5d1b25c2016-10-28 17:19:15 -07001148 if (unlikely(blk_mq_hctx_stopped(hctx) ||
1149 !blk_mq_hw_queue_mapped(hctx)))
Jens Axboe320ae512013-10-24 09:20:05 +01001150 return;
1151
Jens Axboe1b792f22016-09-21 10:12:13 -06001152 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001153 int cpu = get_cpu();
1154 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
Paolo Bonzini398205b2014-11-07 23:03:59 +01001155 __blk_mq_run_hw_queue(hctx);
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001156 put_cpu();
Paolo Bonzini398205b2014-11-07 23:03:59 +01001157 return;
1158 }
Jens Axboee4043dc2014-04-09 10:18:23 -06001159
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001160 put_cpu();
Jens Axboee4043dc2014-04-09 10:18:23 -06001161 }
Paolo Bonzini398205b2014-11-07 23:03:59 +01001162
Jens Axboe27489a32016-08-24 15:54:25 -06001163 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
Jens Axboe320ae512013-10-24 09:20:05 +01001164}
1165
Mike Snitzerb94ec292015-03-11 23:56:38 -04001166void blk_mq_run_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +01001167{
1168 struct blk_mq_hw_ctx *hctx;
1169 int i;
1170
1171 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001172 if (!blk_mq_hctx_has_pending(hctx) ||
Bart Van Assche5d1b25c2016-10-28 17:19:15 -07001173 blk_mq_hctx_stopped(hctx))
Jens Axboe320ae512013-10-24 09:20:05 +01001174 continue;
1175
Mike Snitzerb94ec292015-03-11 23:56:38 -04001176 blk_mq_run_hw_queue(hctx, async);
Jens Axboe320ae512013-10-24 09:20:05 +01001177 }
1178}
Mike Snitzerb94ec292015-03-11 23:56:38 -04001179EXPORT_SYMBOL(blk_mq_run_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01001180
Bart Van Asschefd001442016-10-28 17:19:37 -07001181/**
1182 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1183 * @q: request queue.
1184 *
1185 * The caller is responsible for serializing this function against
1186 * blk_mq_{start,stop}_hw_queue().
1187 */
1188bool blk_mq_queue_stopped(struct request_queue *q)
1189{
1190 struct blk_mq_hw_ctx *hctx;
1191 int i;
1192
1193 queue_for_each_hw_ctx(q, hctx, i)
1194 if (blk_mq_hctx_stopped(hctx))
1195 return true;
1196
1197 return false;
1198}
1199EXPORT_SYMBOL(blk_mq_queue_stopped);
1200
Jens Axboe320ae512013-10-24 09:20:05 +01001201void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1202{
Jens Axboe27489a32016-08-24 15:54:25 -06001203 cancel_work(&hctx->run_work);
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001204 cancel_delayed_work(&hctx->delay_work);
Jens Axboe320ae512013-10-24 09:20:05 +01001205 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1206}
1207EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1208
Christoph Hellwig280d45f2013-10-25 14:45:58 +01001209void blk_mq_stop_hw_queues(struct request_queue *q)
1210{
1211 struct blk_mq_hw_ctx *hctx;
1212 int i;
1213
1214 queue_for_each_hw_ctx(q, hctx, i)
1215 blk_mq_stop_hw_queue(hctx);
1216}
1217EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1218
Jens Axboe320ae512013-10-24 09:20:05 +01001219void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1220{
1221 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboee4043dc2014-04-09 10:18:23 -06001222
Jens Axboe0ffbce82014-06-25 08:22:34 -06001223 blk_mq_run_hw_queue(hctx, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001224}
1225EXPORT_SYMBOL(blk_mq_start_hw_queue);
1226
Christoph Hellwig2f268552014-04-16 09:44:56 +02001227void blk_mq_start_hw_queues(struct request_queue *q)
1228{
1229 struct blk_mq_hw_ctx *hctx;
1230 int i;
1231
1232 queue_for_each_hw_ctx(q, hctx, i)
1233 blk_mq_start_hw_queue(hctx);
1234}
1235EXPORT_SYMBOL(blk_mq_start_hw_queues);
1236
Jens Axboeae911c52016-12-08 13:19:30 -07001237void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1238{
1239 if (!blk_mq_hctx_stopped(hctx))
1240 return;
1241
1242 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1243 blk_mq_run_hw_queue(hctx, async);
1244}
1245EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1246
Christoph Hellwig1b4a3252014-04-16 09:44:54 +02001247void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +01001248{
1249 struct blk_mq_hw_ctx *hctx;
1250 int i;
1251
Jens Axboeae911c52016-12-08 13:19:30 -07001252 queue_for_each_hw_ctx(q, hctx, i)
1253 blk_mq_start_stopped_hw_queue(hctx, async);
Jens Axboe320ae512013-10-24 09:20:05 +01001254}
1255EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1256
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001257static void blk_mq_run_work_fn(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +01001258{
1259 struct blk_mq_hw_ctx *hctx;
1260
Jens Axboe27489a32016-08-24 15:54:25 -06001261 hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
Jens Axboee4043dc2014-04-09 10:18:23 -06001262
Jens Axboe320ae512013-10-24 09:20:05 +01001263 __blk_mq_run_hw_queue(hctx);
1264}
1265
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001266static void blk_mq_delay_work_fn(struct work_struct *work)
1267{
1268 struct blk_mq_hw_ctx *hctx;
1269
1270 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
1271
1272 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
1273 __blk_mq_run_hw_queue(hctx);
1274}
1275
1276void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1277{
Ming Lei19c66e52014-12-03 19:38:04 +08001278 if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
1279 return;
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001280
Jens Axboe7e79dad2017-01-19 07:58:59 -07001281 blk_mq_stop_hw_queue(hctx);
Christoph Hellwigb657d7e2014-11-24 09:27:23 +01001282 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1283 &hctx->delay_work, msecs_to_jiffies(msecs));
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001284}
1285EXPORT_SYMBOL(blk_mq_delay_queue);
1286
Ming Leicfd0c552015-10-20 23:13:57 +08001287static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
Ming Leicfd0c552015-10-20 23:13:57 +08001288 struct request *rq,
1289 bool at_head)
Jens Axboe320ae512013-10-24 09:20:05 +01001290{
Jens Axboee57690f2016-08-24 15:34:35 -06001291 struct blk_mq_ctx *ctx = rq->mq_ctx;
1292
Jens Axboe01b983c2013-11-19 18:59:10 -07001293 trace_block_rq_insert(hctx->queue, rq);
1294
Christoph Hellwig72a0a362014-02-07 10:22:36 -08001295 if (at_head)
1296 list_add(&rq->queuelist, &ctx->rq_list);
1297 else
1298 list_add_tail(&rq->queuelist, &ctx->rq_list);
Ming Leicfd0c552015-10-20 23:13:57 +08001299}
Jens Axboe4bb659b2014-05-09 09:36:49 -06001300
Jens Axboe2c3ad662016-12-14 14:34:47 -07001301void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1302 bool at_head)
Ming Leicfd0c552015-10-20 23:13:57 +08001303{
1304 struct blk_mq_ctx *ctx = rq->mq_ctx;
1305
Jens Axboee57690f2016-08-24 15:34:35 -06001306 __blk_mq_insert_req_list(hctx, rq, at_head);
Jens Axboe320ae512013-10-24 09:20:05 +01001307 blk_mq_hctx_mark_pending(hctx, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001308}
1309
Jens Axboebd166ef2017-01-17 06:03:22 -07001310void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1311 struct list_head *list)
Jens Axboe320ae512013-10-24 09:20:05 +01001312
1313{
Jens Axboe320ae512013-10-24 09:20:05 +01001314 /*
1315 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1316 * offline now
1317 */
1318 spin_lock(&ctx->lock);
1319 while (!list_empty(list)) {
1320 struct request *rq;
1321
1322 rq = list_first_entry(list, struct request, queuelist);
Jens Axboee57690f2016-08-24 15:34:35 -06001323 BUG_ON(rq->mq_ctx != ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001324 list_del_init(&rq->queuelist);
Jens Axboee57690f2016-08-24 15:34:35 -06001325 __blk_mq_insert_req_list(hctx, rq, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001326 }
Ming Leicfd0c552015-10-20 23:13:57 +08001327 blk_mq_hctx_mark_pending(hctx, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001328 spin_unlock(&ctx->lock);
Jens Axboe320ae512013-10-24 09:20:05 +01001329}
1330
1331static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1332{
1333 struct request *rqa = container_of(a, struct request, queuelist);
1334 struct request *rqb = container_of(b, struct request, queuelist);
1335
1336 return !(rqa->mq_ctx < rqb->mq_ctx ||
1337 (rqa->mq_ctx == rqb->mq_ctx &&
1338 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1339}
1340
1341void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1342{
1343 struct blk_mq_ctx *this_ctx;
1344 struct request_queue *this_q;
1345 struct request *rq;
1346 LIST_HEAD(list);
1347 LIST_HEAD(ctx_list);
1348 unsigned int depth;
1349
1350 list_splice_init(&plug->mq_list, &list);
1351
1352 list_sort(NULL, &list, plug_ctx_cmp);
1353
1354 this_q = NULL;
1355 this_ctx = NULL;
1356 depth = 0;
1357
1358 while (!list_empty(&list)) {
1359 rq = list_entry_rq(list.next);
1360 list_del_init(&rq->queuelist);
1361 BUG_ON(!rq->q);
1362 if (rq->mq_ctx != this_ctx) {
1363 if (this_ctx) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001364 trace_block_unplug(this_q, depth, from_schedule);
1365 blk_mq_sched_insert_requests(this_q, this_ctx,
1366 &ctx_list,
1367 from_schedule);
Jens Axboe320ae512013-10-24 09:20:05 +01001368 }
1369
1370 this_ctx = rq->mq_ctx;
1371 this_q = rq->q;
1372 depth = 0;
1373 }
1374
1375 depth++;
1376 list_add_tail(&rq->queuelist, &ctx_list);
1377 }
1378
1379 /*
1380 * If 'this_ctx' is set, we know we have entries to complete
1381 * on 'ctx_list'. Do those.
1382 */
1383 if (this_ctx) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001384 trace_block_unplug(this_q, depth, from_schedule);
1385 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1386 from_schedule);
Jens Axboe320ae512013-10-24 09:20:05 +01001387 }
1388}
1389
1390static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1391{
1392 init_request_from_bio(rq, bio);
Jens Axboe4b570522014-05-29 11:00:11 -06001393
Jens Axboe6e85eaf2016-12-02 20:00:14 -07001394 blk_account_io_start(rq, true);
Jens Axboe320ae512013-10-24 09:20:05 +01001395}
1396
Jens Axboe274a5842014-08-15 12:44:08 -06001397static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1398{
1399 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1400 !blk_queue_nomerges(hctx->queue);
1401}
1402
Jens Axboe07068d52014-05-22 10:40:51 -06001403static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1404 struct blk_mq_ctx *ctx,
1405 struct request *rq, struct bio *bio)
1406{
Ming Leie18378a2015-10-20 23:13:54 +08001407 if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
Jens Axboe07068d52014-05-22 10:40:51 -06001408 blk_mq_bio_to_request(rq, bio);
1409 spin_lock(&ctx->lock);
1410insert_rq:
1411 __blk_mq_insert_request(hctx, rq, false);
1412 spin_unlock(&ctx->lock);
1413 return false;
1414 } else {
Jens Axboe274a5842014-08-15 12:44:08 -06001415 struct request_queue *q = hctx->queue;
1416
Jens Axboe07068d52014-05-22 10:40:51 -06001417 spin_lock(&ctx->lock);
1418 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1419 blk_mq_bio_to_request(rq, bio);
1420 goto insert_rq;
1421 }
1422
1423 spin_unlock(&ctx->lock);
Jens Axboebd166ef2017-01-17 06:03:22 -07001424 __blk_mq_finish_request(hctx, ctx, rq);
Jens Axboe07068d52014-05-22 10:40:51 -06001425 return true;
1426 }
1427}
1428
Jens Axboefd2d3322017-01-12 10:04:45 -07001429static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1430{
Jens Axboebd166ef2017-01-17 06:03:22 -07001431 if (rq->tag != -1)
1432 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1433
1434 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
Jens Axboefd2d3322017-01-12 10:04:45 -07001435}
1436
Jens Axboe066a4a72016-11-11 12:24:46 -07001437static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
Shaohua Lif984df12015-05-08 10:51:32 -07001438{
Shaohua Lif984df12015-05-08 10:51:32 -07001439 struct request_queue *q = rq->q;
Shaohua Lif984df12015-05-08 10:51:32 -07001440 struct blk_mq_queue_data bd = {
1441 .rq = rq,
1442 .list = NULL,
1443 .last = 1
1444 };
Jens Axboebd166ef2017-01-17 06:03:22 -07001445 struct blk_mq_hw_ctx *hctx;
1446 blk_qc_t new_cookie;
1447 int ret;
Shaohua Lif984df12015-05-08 10:51:32 -07001448
Jens Axboebd166ef2017-01-17 06:03:22 -07001449 if (q->elevator)
Bart Van Assche2253efc2016-10-28 17:20:02 -07001450 goto insert;
1451
Jens Axboebd166ef2017-01-17 06:03:22 -07001452 if (!blk_mq_get_driver_tag(rq, &hctx, false))
1453 goto insert;
1454
1455 new_cookie = request_to_qc_t(hctx, rq);
1456
Shaohua Lif984df12015-05-08 10:51:32 -07001457 /*
1458 * For OK queue, we are done. For error, kill it. Any other
1459 * error (busy), just add it to our list as we previously
1460 * would have done
1461 */
1462 ret = q->mq_ops->queue_rq(hctx, &bd);
Jens Axboe7b371632015-11-05 10:41:40 -07001463 if (ret == BLK_MQ_RQ_QUEUE_OK) {
1464 *cookie = new_cookie;
Bart Van Assche2253efc2016-10-28 17:20:02 -07001465 return;
Shaohua Lif984df12015-05-08 10:51:32 -07001466 }
Jens Axboe7b371632015-11-05 10:41:40 -07001467
1468 __blk_mq_requeue_request(rq);
1469
1470 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1471 *cookie = BLK_QC_T_NONE;
1472 rq->errors = -EIO;
1473 blk_mq_end_request(rq, rq->errors);
Bart Van Assche2253efc2016-10-28 17:20:02 -07001474 return;
Jens Axboe7b371632015-11-05 10:41:40 -07001475 }
1476
Bart Van Assche2253efc2016-10-28 17:20:02 -07001477insert:
Jens Axboebd6737f2017-01-27 01:00:47 -07001478 blk_mq_sched_insert_request(rq, false, true, true, false);
Shaohua Lif984df12015-05-08 10:51:32 -07001479}
1480
Jens Axboe07068d52014-05-22 10:40:51 -06001481/*
1482 * Multiple hardware queue variant. This will not use per-process plugs,
1483 * but will attempt to bypass the hctx queueing if we can go straight to
1484 * hardware for SYNC IO.
1485 */
Jens Axboedece1632015-11-05 10:41:16 -07001486static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
Jens Axboe07068d52014-05-22 10:40:51 -06001487{
Christoph Hellwigef295ec2016-10-28 08:48:16 -06001488 const int is_sync = op_is_sync(bio->bi_opf);
Christoph Hellwigf73f44e2017-01-27 08:30:47 -07001489 const int is_flush_fua = op_is_flush(bio->bi_opf);
Jens Axboe5a797e02017-01-26 12:22:11 -07001490 struct blk_mq_alloc_data data = { .flags = 0 };
Jens Axboe07068d52014-05-22 10:40:51 -06001491 struct request *rq;
Bart Van Assche6a83e742016-11-02 10:09:51 -06001492 unsigned int request_count = 0, srcu_idx;
Shaohua Lif984df12015-05-08 10:51:32 -07001493 struct blk_plug *plug;
Shaohua Li5b3f3412015-05-08 10:51:33 -07001494 struct request *same_queue_rq = NULL;
Jens Axboe7b371632015-11-05 10:41:40 -07001495 blk_qc_t cookie;
Jens Axboe87760e52016-11-09 12:38:14 -07001496 unsigned int wb_acct;
Jens Axboe07068d52014-05-22 10:40:51 -06001497
1498 blk_queue_bounce(q, &bio);
1499
1500 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001501 bio_io_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -07001502 return BLK_QC_T_NONE;
Jens Axboe07068d52014-05-22 10:40:51 -06001503 }
1504
Kent Overstreet54efd502015-04-23 22:37:18 -07001505 blk_queue_split(q, &bio, q->bio_split);
1506
Omar Sandoval87c279e2016-06-01 22:18:48 -07001507 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1508 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1509 return BLK_QC_T_NONE;
Shaohua Lif984df12015-05-08 10:51:32 -07001510
Jens Axboebd166ef2017-01-17 06:03:22 -07001511 if (blk_mq_sched_bio_merge(q, bio))
1512 return BLK_QC_T_NONE;
1513
Jens Axboe87760e52016-11-09 12:38:14 -07001514 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1515
Jens Axboebd166ef2017-01-17 06:03:22 -07001516 trace_block_getrq(q, bio, bio->bi_opf);
1517
1518 rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
Jens Axboe87760e52016-11-09 12:38:14 -07001519 if (unlikely(!rq)) {
1520 __wbt_done(q->rq_wb, wb_acct);
Jens Axboedece1632015-11-05 10:41:16 -07001521 return BLK_QC_T_NONE;
Jens Axboe87760e52016-11-09 12:38:14 -07001522 }
1523
1524 wbt_track(&rq->issue_stat, wb_acct);
Jens Axboe07068d52014-05-22 10:40:51 -06001525
Jens Axboefd2d3322017-01-12 10:04:45 -07001526 cookie = request_to_qc_t(data.hctx, rq);
Jens Axboe07068d52014-05-22 10:40:51 -06001527
1528 if (unlikely(is_flush_fua)) {
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001529 if (q->elevator)
1530 goto elv_insert;
Jens Axboe07068d52014-05-22 10:40:51 -06001531 blk_mq_bio_to_request(rq, bio);
1532 blk_insert_flush(rq);
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001533 goto run_queue;
Jens Axboe07068d52014-05-22 10:40:51 -06001534 }
1535
Shaohua Lif984df12015-05-08 10:51:32 -07001536 plug = current->plug;
Jens Axboee167dfb2014-10-29 11:18:26 -06001537 /*
1538 * If the driver supports defer issued based on 'last', then
1539 * queue it up like normal since we can potentially save some
1540 * CPU this way.
1541 */
Shaohua Lif984df12015-05-08 10:51:32 -07001542 if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1543 !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1544 struct request *old_rq = NULL;
Jens Axboe07068d52014-05-22 10:40:51 -06001545
1546 blk_mq_bio_to_request(rq, bio);
Jens Axboe07068d52014-05-22 10:40:51 -06001547
1548 /*
Bart Van Assche6a83e742016-11-02 10:09:51 -06001549 * We do limited plugging. If the bio can be merged, do that.
Shaohua Lif984df12015-05-08 10:51:32 -07001550 * Otherwise the existing request in the plug list will be
1551 * issued. So the plug list will have one request at most
Jens Axboe07068d52014-05-22 10:40:51 -06001552 */
Shaohua Lif984df12015-05-08 10:51:32 -07001553 if (plug) {
Shaohua Li5b3f3412015-05-08 10:51:33 -07001554 /*
1555 * The plug list might get flushed before this. If that
Jens Axboeb094f892015-11-20 20:29:45 -07001556 * happens, same_queue_rq is invalid and plug list is
1557 * empty
1558 */
Shaohua Li5b3f3412015-05-08 10:51:33 -07001559 if (same_queue_rq && !list_empty(&plug->mq_list)) {
1560 old_rq = same_queue_rq;
Shaohua Lif984df12015-05-08 10:51:32 -07001561 list_del_init(&old_rq->queuelist);
Jens Axboe07068d52014-05-22 10:40:51 -06001562 }
Shaohua Lif984df12015-05-08 10:51:32 -07001563 list_add_tail(&rq->queuelist, &plug->mq_list);
1564 } else /* is_sync */
1565 old_rq = rq;
1566 blk_mq_put_ctx(data.ctx);
1567 if (!old_rq)
Jens Axboe7b371632015-11-05 10:41:40 -07001568 goto done;
Bart Van Assche6a83e742016-11-02 10:09:51 -06001569
1570 if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
1571 rcu_read_lock();
Jens Axboe066a4a72016-11-11 12:24:46 -07001572 blk_mq_try_issue_directly(old_rq, &cookie);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001573 rcu_read_unlock();
1574 } else {
1575 srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
Jens Axboe066a4a72016-11-11 12:24:46 -07001576 blk_mq_try_issue_directly(old_rq, &cookie);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001577 srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
1578 }
Jens Axboe7b371632015-11-05 10:41:40 -07001579 goto done;
Jens Axboe07068d52014-05-22 10:40:51 -06001580 }
1581
Jens Axboebd166ef2017-01-17 06:03:22 -07001582 if (q->elevator) {
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001583elv_insert:
Jens Axboebd166ef2017-01-17 06:03:22 -07001584 blk_mq_put_ctx(data.ctx);
1585 blk_mq_bio_to_request(rq, bio);
Jens Axboe0abad772017-01-26 12:28:10 -07001586 blk_mq_sched_insert_request(rq, false, true,
Jens Axboebd6737f2017-01-27 01:00:47 -07001587 !is_sync || is_flush_fua, true);
Jens Axboebd166ef2017-01-17 06:03:22 -07001588 goto done;
1589 }
Jens Axboe07068d52014-05-22 10:40:51 -06001590 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1591 /*
1592 * For a SYNC request, send it to the hardware immediately. For
1593 * an ASYNC request, just ensure that we run it later on. The
1594 * latter allows for merging opportunities and more efficient
1595 * dispatching.
1596 */
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001597run_queue:
Jens Axboe07068d52014-05-22 10:40:51 -06001598 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1599 }
Jens Axboe07068d52014-05-22 10:40:51 -06001600 blk_mq_put_ctx(data.ctx);
Jens Axboe7b371632015-11-05 10:41:40 -07001601done:
1602 return cookie;
Jens Axboe07068d52014-05-22 10:40:51 -06001603}
1604
1605/*
1606 * Single hardware queue variant. This will attempt to use any per-process
1607 * plug for merging and IO deferral.
1608 */
Jens Axboedece1632015-11-05 10:41:16 -07001609static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
Jens Axboe07068d52014-05-22 10:40:51 -06001610{
Christoph Hellwigef295ec2016-10-28 08:48:16 -06001611 const int is_sync = op_is_sync(bio->bi_opf);
Christoph Hellwigf73f44e2017-01-27 08:30:47 -07001612 const int is_flush_fua = op_is_flush(bio->bi_opf);
Jeff Moyere6c44382015-05-08 10:51:30 -07001613 struct blk_plug *plug;
1614 unsigned int request_count = 0;
Jens Axboe5a797e02017-01-26 12:22:11 -07001615 struct blk_mq_alloc_data data = { .flags = 0 };
Jens Axboe07068d52014-05-22 10:40:51 -06001616 struct request *rq;
Jens Axboe7b371632015-11-05 10:41:40 -07001617 blk_qc_t cookie;
Jens Axboe87760e52016-11-09 12:38:14 -07001618 unsigned int wb_acct;
Jens Axboe07068d52014-05-22 10:40:51 -06001619
Jens Axboe07068d52014-05-22 10:40:51 -06001620 blk_queue_bounce(q, &bio);
1621
1622 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001623 bio_io_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -07001624 return BLK_QC_T_NONE;
Jens Axboe07068d52014-05-22 10:40:51 -06001625 }
1626
Kent Overstreet54efd502015-04-23 22:37:18 -07001627 blk_queue_split(q, &bio, q->bio_split);
1628
Omar Sandoval87c279e2016-06-01 22:18:48 -07001629 if (!is_flush_fua && !blk_queue_nomerges(q)) {
1630 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1631 return BLK_QC_T_NONE;
1632 } else
1633 request_count = blk_plug_queued_count(q);
Jens Axboe07068d52014-05-22 10:40:51 -06001634
Jens Axboebd166ef2017-01-17 06:03:22 -07001635 if (blk_mq_sched_bio_merge(q, bio))
1636 return BLK_QC_T_NONE;
1637
Jens Axboe87760e52016-11-09 12:38:14 -07001638 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1639
Jens Axboebd166ef2017-01-17 06:03:22 -07001640 trace_block_getrq(q, bio, bio->bi_opf);
1641
1642 rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
Jens Axboe87760e52016-11-09 12:38:14 -07001643 if (unlikely(!rq)) {
1644 __wbt_done(q->rq_wb, wb_acct);
Jens Axboedece1632015-11-05 10:41:16 -07001645 return BLK_QC_T_NONE;
Jens Axboe87760e52016-11-09 12:38:14 -07001646 }
1647
1648 wbt_track(&rq->issue_stat, wb_acct);
Jens Axboe320ae512013-10-24 09:20:05 +01001649
Jens Axboefd2d3322017-01-12 10:04:45 -07001650 cookie = request_to_qc_t(data.hctx, rq);
Jens Axboe320ae512013-10-24 09:20:05 +01001651
1652 if (unlikely(is_flush_fua)) {
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001653 if (q->elevator)
1654 goto elv_insert;
Jens Axboe320ae512013-10-24 09:20:05 +01001655 blk_mq_bio_to_request(rq, bio);
Jens Axboe320ae512013-10-24 09:20:05 +01001656 blk_insert_flush(rq);
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001657 goto run_queue;
Jens Axboe320ae512013-10-24 09:20:05 +01001658 }
1659
1660 /*
1661 * A task plug currently exists. Since this is completely lockless,
1662 * utilize that to temporarily store requests until the task is
1663 * either done or scheduled away.
1664 */
Jeff Moyere6c44382015-05-08 10:51:30 -07001665 plug = current->plug;
1666 if (plug) {
Shaohua Li600271d2016-11-03 17:03:54 -07001667 struct request *last = NULL;
1668
Jeff Moyere6c44382015-05-08 10:51:30 -07001669 blk_mq_bio_to_request(rq, bio);
Ming Lei0a6219a2016-11-16 18:07:05 +08001670
1671 /*
1672 * @request_count may become stale because of schedule
1673 * out, so check the list again.
1674 */
1675 if (list_empty(&plug->mq_list))
1676 request_count = 0;
Ming Lei676d0602015-10-20 23:13:56 +08001677 if (!request_count)
Jeff Moyere6c44382015-05-08 10:51:30 -07001678 trace_block_plug(q);
Shaohua Li600271d2016-11-03 17:03:54 -07001679 else
1680 last = list_entry_rq(plug->mq_list.prev);
Jens Axboeb094f892015-11-20 20:29:45 -07001681
1682 blk_mq_put_ctx(data.ctx);
1683
Shaohua Li600271d2016-11-03 17:03:54 -07001684 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1685 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
Jeff Moyere6c44382015-05-08 10:51:30 -07001686 blk_flush_plug_list(plug, false);
1687 trace_block_plug(q);
Jens Axboe320ae512013-10-24 09:20:05 +01001688 }
Jens Axboeb094f892015-11-20 20:29:45 -07001689
Jeff Moyere6c44382015-05-08 10:51:30 -07001690 list_add_tail(&rq->queuelist, &plug->mq_list);
Jens Axboe7b371632015-11-05 10:41:40 -07001691 return cookie;
Jens Axboe320ae512013-10-24 09:20:05 +01001692 }
1693
Jens Axboebd166ef2017-01-17 06:03:22 -07001694 if (q->elevator) {
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001695elv_insert:
Jens Axboebd166ef2017-01-17 06:03:22 -07001696 blk_mq_put_ctx(data.ctx);
1697 blk_mq_bio_to_request(rq, bio);
Jens Axboe0abad772017-01-26 12:28:10 -07001698 blk_mq_sched_insert_request(rq, false, true,
Jens Axboebd6737f2017-01-27 01:00:47 -07001699 !is_sync || is_flush_fua, true);
Jens Axboebd166ef2017-01-17 06:03:22 -07001700 goto done;
1701 }
Jens Axboe07068d52014-05-22 10:40:51 -06001702 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1703 /*
1704 * For a SYNC request, send it to the hardware immediately. For
1705 * an ASYNC request, just ensure that we run it later on. The
1706 * latter allows for merging opportunities and more efficient
1707 * dispatching.
1708 */
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001709run_queue:
Jens Axboe07068d52014-05-22 10:40:51 -06001710 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
Jens Axboe320ae512013-10-24 09:20:05 +01001711 }
1712
Jens Axboe07068d52014-05-22 10:40:51 -06001713 blk_mq_put_ctx(data.ctx);
Jens Axboebd166ef2017-01-17 06:03:22 -07001714done:
Jens Axboe7b371632015-11-05 10:41:40 -07001715 return cookie;
Jens Axboe320ae512013-10-24 09:20:05 +01001716}
1717
Jens Axboecc71a6f2017-01-11 14:29:56 -07001718void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1719 unsigned int hctx_idx)
Jens Axboe320ae512013-10-24 09:20:05 +01001720{
1721 struct page *page;
1722
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001723 if (tags->rqs && set->ops->exit_request) {
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001724 int i;
1725
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001726 for (i = 0; i < tags->nr_tags; i++) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001727 struct request *rq = tags->static_rqs[i];
1728
1729 if (!rq)
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001730 continue;
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001731 set->ops->exit_request(set->driver_data, rq,
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001732 hctx_idx, i);
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001733 tags->static_rqs[i] = NULL;
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001734 }
1735 }
1736
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001737 while (!list_empty(&tags->page_list)) {
1738 page = list_first_entry(&tags->page_list, struct page, lru);
Dave Hansen67534712014-01-08 20:17:46 -07001739 list_del_init(&page->lru);
Catalin Marinasf75782e2015-09-14 18:16:02 +01001740 /*
1741 * Remove kmemleak object previously allocated in
1742 * blk_mq_init_rq_map().
1743 */
1744 kmemleak_free(page_address(page));
Jens Axboe320ae512013-10-24 09:20:05 +01001745 __free_pages(page, page->private);
1746 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07001747}
Jens Axboe320ae512013-10-24 09:20:05 +01001748
Jens Axboecc71a6f2017-01-11 14:29:56 -07001749void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1750{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001751 kfree(tags->rqs);
Jens Axboecc71a6f2017-01-11 14:29:56 -07001752 tags->rqs = NULL;
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001753 kfree(tags->static_rqs);
1754 tags->static_rqs = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01001755
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001756 blk_mq_free_tags(tags);
Jens Axboe320ae512013-10-24 09:20:05 +01001757}
1758
Jens Axboecc71a6f2017-01-11 14:29:56 -07001759struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1760 unsigned int hctx_idx,
1761 unsigned int nr_tags,
1762 unsigned int reserved_tags)
Jens Axboe320ae512013-10-24 09:20:05 +01001763{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001764 struct blk_mq_tags *tags;
Shaohua Li59f082e2017-02-01 09:53:14 -08001765 int node;
Jens Axboe320ae512013-10-24 09:20:05 +01001766
Shaohua Li59f082e2017-02-01 09:53:14 -08001767 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1768 if (node == NUMA_NO_NODE)
1769 node = set->numa_node;
1770
1771 tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
Shaohua Li24391c02015-01-23 14:18:00 -07001772 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001773 if (!tags)
1774 return NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01001775
Jens Axboecc71a6f2017-01-11 14:29:56 -07001776 tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02001777 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
Shaohua Li59f082e2017-02-01 09:53:14 -08001778 node);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001779 if (!tags->rqs) {
1780 blk_mq_free_tags(tags);
1781 return NULL;
1782 }
Jens Axboe320ae512013-10-24 09:20:05 +01001783
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001784 tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1785 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
Shaohua Li59f082e2017-02-01 09:53:14 -08001786 node);
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001787 if (!tags->static_rqs) {
1788 kfree(tags->rqs);
1789 blk_mq_free_tags(tags);
1790 return NULL;
1791 }
1792
Jens Axboecc71a6f2017-01-11 14:29:56 -07001793 return tags;
1794}
1795
1796static size_t order_to_size(unsigned int order)
1797{
1798 return (size_t)PAGE_SIZE << order;
1799}
1800
1801int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1802 unsigned int hctx_idx, unsigned int depth)
1803{
1804 unsigned int i, j, entries_per_page, max_order = 4;
1805 size_t rq_size, left;
Shaohua Li59f082e2017-02-01 09:53:14 -08001806 int node;
1807
1808 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1809 if (node == NUMA_NO_NODE)
1810 node = set->numa_node;
Jens Axboecc71a6f2017-01-11 14:29:56 -07001811
1812 INIT_LIST_HEAD(&tags->page_list);
1813
Jens Axboe320ae512013-10-24 09:20:05 +01001814 /*
1815 * rq_size is the size of the request plus driver payload, rounded
1816 * to the cacheline size
1817 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001818 rq_size = round_up(sizeof(struct request) + set->cmd_size,
Jens Axboe320ae512013-10-24 09:20:05 +01001819 cache_line_size());
Jens Axboecc71a6f2017-01-11 14:29:56 -07001820 left = rq_size * depth;
Jens Axboe320ae512013-10-24 09:20:05 +01001821
Jens Axboecc71a6f2017-01-11 14:29:56 -07001822 for (i = 0; i < depth; ) {
Jens Axboe320ae512013-10-24 09:20:05 +01001823 int this_order = max_order;
1824 struct page *page;
1825 int to_do;
1826 void *p;
1827
Bartlomiej Zolnierkiewiczb3a834b2016-05-16 09:54:47 -06001828 while (this_order && left < order_to_size(this_order - 1))
Jens Axboe320ae512013-10-24 09:20:05 +01001829 this_order--;
1830
1831 do {
Shaohua Li59f082e2017-02-01 09:53:14 -08001832 page = alloc_pages_node(node,
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02001833 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
Jens Axboea5164402014-09-10 09:02:03 -06001834 this_order);
Jens Axboe320ae512013-10-24 09:20:05 +01001835 if (page)
1836 break;
1837 if (!this_order--)
1838 break;
1839 if (order_to_size(this_order) < rq_size)
1840 break;
1841 } while (1);
1842
1843 if (!page)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001844 goto fail;
Jens Axboe320ae512013-10-24 09:20:05 +01001845
1846 page->private = this_order;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001847 list_add_tail(&page->lru, &tags->page_list);
Jens Axboe320ae512013-10-24 09:20:05 +01001848
1849 p = page_address(page);
Catalin Marinasf75782e2015-09-14 18:16:02 +01001850 /*
1851 * Allow kmemleak to scan these pages as they contain pointers
1852 * to additional allocations like via ops->init_request().
1853 */
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02001854 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
Jens Axboe320ae512013-10-24 09:20:05 +01001855 entries_per_page = order_to_size(this_order) / rq_size;
Jens Axboecc71a6f2017-01-11 14:29:56 -07001856 to_do = min(entries_per_page, depth - i);
Jens Axboe320ae512013-10-24 09:20:05 +01001857 left -= to_do * rq_size;
1858 for (j = 0; j < to_do; j++) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001859 struct request *rq = p;
1860
1861 tags->static_rqs[i] = rq;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001862 if (set->ops->init_request) {
1863 if (set->ops->init_request(set->driver_data,
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001864 rq, hctx_idx, i,
Shaohua Li59f082e2017-02-01 09:53:14 -08001865 node)) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001866 tags->static_rqs[i] = NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001867 goto fail;
Jens Axboea5164402014-09-10 09:02:03 -06001868 }
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001869 }
1870
Jens Axboe320ae512013-10-24 09:20:05 +01001871 p += rq_size;
1872 i++;
1873 }
1874 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07001875 return 0;
Jens Axboe320ae512013-10-24 09:20:05 +01001876
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001877fail:
Jens Axboecc71a6f2017-01-11 14:29:56 -07001878 blk_mq_free_rqs(set, tags, hctx_idx);
1879 return -ENOMEM;
Jens Axboe320ae512013-10-24 09:20:05 +01001880}
1881
Jens Axboee57690f2016-08-24 15:34:35 -06001882/*
1883 * 'cpu' is going away. splice any existing rq_list entries from this
1884 * software queue to the hw queue dispatch list, and ensure that it
1885 * gets run.
1886 */
Thomas Gleixner9467f852016-09-22 08:05:17 -06001887static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
Jens Axboe484b4062014-05-21 14:01:15 -06001888{
Thomas Gleixner9467f852016-09-22 08:05:17 -06001889 struct blk_mq_hw_ctx *hctx;
Jens Axboe484b4062014-05-21 14:01:15 -06001890 struct blk_mq_ctx *ctx;
1891 LIST_HEAD(tmp);
1892
Thomas Gleixner9467f852016-09-22 08:05:17 -06001893 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
Jens Axboee57690f2016-08-24 15:34:35 -06001894 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
Jens Axboe484b4062014-05-21 14:01:15 -06001895
1896 spin_lock(&ctx->lock);
1897 if (!list_empty(&ctx->rq_list)) {
1898 list_splice_init(&ctx->rq_list, &tmp);
1899 blk_mq_hctx_clear_pending(hctx, ctx);
1900 }
1901 spin_unlock(&ctx->lock);
1902
1903 if (list_empty(&tmp))
Thomas Gleixner9467f852016-09-22 08:05:17 -06001904 return 0;
Jens Axboe484b4062014-05-21 14:01:15 -06001905
Jens Axboee57690f2016-08-24 15:34:35 -06001906 spin_lock(&hctx->lock);
1907 list_splice_tail_init(&tmp, &hctx->dispatch);
1908 spin_unlock(&hctx->lock);
Jens Axboe484b4062014-05-21 14:01:15 -06001909
1910 blk_mq_run_hw_queue(hctx, true);
Thomas Gleixner9467f852016-09-22 08:05:17 -06001911 return 0;
Jens Axboe484b4062014-05-21 14:01:15 -06001912}
1913
Thomas Gleixner9467f852016-09-22 08:05:17 -06001914static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
Jens Axboe484b4062014-05-21 14:01:15 -06001915{
Thomas Gleixner9467f852016-09-22 08:05:17 -06001916 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1917 &hctx->cpuhp_dead);
Jens Axboe484b4062014-05-21 14:01:15 -06001918}
1919
Ming Leic3b4afc2015-06-04 22:25:04 +08001920/* hctx->ctxs will be freed in queue's release handler */
Ming Lei08e98fc2014-09-25 23:23:38 +08001921static void blk_mq_exit_hctx(struct request_queue *q,
1922 struct blk_mq_tag_set *set,
1923 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1924{
Ming Leif70ced02014-09-25 23:23:47 +08001925 unsigned flush_start_tag = set->queue_depth;
1926
Ming Lei08e98fc2014-09-25 23:23:38 +08001927 blk_mq_tag_idle(hctx);
1928
Ming Leif70ced02014-09-25 23:23:47 +08001929 if (set->ops->exit_request)
1930 set->ops->exit_request(set->driver_data,
1931 hctx->fq->flush_rq, hctx_idx,
1932 flush_start_tag + hctx_idx);
1933
Ming Lei08e98fc2014-09-25 23:23:38 +08001934 if (set->ops->exit_hctx)
1935 set->ops->exit_hctx(hctx, hctx_idx);
1936
Bart Van Assche6a83e742016-11-02 10:09:51 -06001937 if (hctx->flags & BLK_MQ_F_BLOCKING)
1938 cleanup_srcu_struct(&hctx->queue_rq_srcu);
1939
Thomas Gleixner9467f852016-09-22 08:05:17 -06001940 blk_mq_remove_cpuhp(hctx);
Ming Leif70ced02014-09-25 23:23:47 +08001941 blk_free_flush_queue(hctx->fq);
Omar Sandoval88459642016-09-17 08:38:44 -06001942 sbitmap_free(&hctx->ctx_map);
Ming Lei08e98fc2014-09-25 23:23:38 +08001943}
1944
Ming Lei624dbe42014-05-27 23:35:13 +08001945static void blk_mq_exit_hw_queues(struct request_queue *q,
1946 struct blk_mq_tag_set *set, int nr_queue)
1947{
1948 struct blk_mq_hw_ctx *hctx;
1949 unsigned int i;
1950
1951 queue_for_each_hw_ctx(q, hctx, i) {
1952 if (i == nr_queue)
1953 break;
Ming Lei08e98fc2014-09-25 23:23:38 +08001954 blk_mq_exit_hctx(q, set, hctx, i);
Ming Lei624dbe42014-05-27 23:35:13 +08001955 }
Ming Lei624dbe42014-05-27 23:35:13 +08001956}
1957
Ming Lei08e98fc2014-09-25 23:23:38 +08001958static int blk_mq_init_hctx(struct request_queue *q,
1959 struct blk_mq_tag_set *set,
1960 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1961{
1962 int node;
Ming Leif70ced02014-09-25 23:23:47 +08001963 unsigned flush_start_tag = set->queue_depth;
Ming Lei08e98fc2014-09-25 23:23:38 +08001964
1965 node = hctx->numa_node;
1966 if (node == NUMA_NO_NODE)
1967 node = hctx->numa_node = set->numa_node;
1968
Jens Axboe27489a32016-08-24 15:54:25 -06001969 INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
Ming Lei08e98fc2014-09-25 23:23:38 +08001970 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1971 spin_lock_init(&hctx->lock);
1972 INIT_LIST_HEAD(&hctx->dispatch);
1973 hctx->queue = q;
1974 hctx->queue_num = hctx_idx;
Jeff Moyer2404e602015-11-03 10:40:06 -05001975 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
Ming Lei08e98fc2014-09-25 23:23:38 +08001976
Thomas Gleixner9467f852016-09-22 08:05:17 -06001977 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
Ming Lei08e98fc2014-09-25 23:23:38 +08001978
1979 hctx->tags = set->tags[hctx_idx];
1980
1981 /*
1982 * Allocate space for all possible cpus to avoid allocation at
1983 * runtime
1984 */
1985 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1986 GFP_KERNEL, node);
1987 if (!hctx->ctxs)
1988 goto unregister_cpu_notifier;
1989
Omar Sandoval88459642016-09-17 08:38:44 -06001990 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
1991 node))
Ming Lei08e98fc2014-09-25 23:23:38 +08001992 goto free_ctxs;
1993
1994 hctx->nr_ctx = 0;
1995
1996 if (set->ops->init_hctx &&
1997 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1998 goto free_bitmap;
1999
Ming Leif70ced02014-09-25 23:23:47 +08002000 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2001 if (!hctx->fq)
2002 goto exit_hctx;
2003
2004 if (set->ops->init_request &&
2005 set->ops->init_request(set->driver_data,
2006 hctx->fq->flush_rq, hctx_idx,
2007 flush_start_tag + hctx_idx, node))
2008 goto free_fq;
2009
Bart Van Assche6a83e742016-11-02 10:09:51 -06002010 if (hctx->flags & BLK_MQ_F_BLOCKING)
2011 init_srcu_struct(&hctx->queue_rq_srcu);
2012
Ming Lei08e98fc2014-09-25 23:23:38 +08002013 return 0;
2014
Ming Leif70ced02014-09-25 23:23:47 +08002015 free_fq:
2016 kfree(hctx->fq);
2017 exit_hctx:
2018 if (set->ops->exit_hctx)
2019 set->ops->exit_hctx(hctx, hctx_idx);
Ming Lei08e98fc2014-09-25 23:23:38 +08002020 free_bitmap:
Omar Sandoval88459642016-09-17 08:38:44 -06002021 sbitmap_free(&hctx->ctx_map);
Ming Lei08e98fc2014-09-25 23:23:38 +08002022 free_ctxs:
2023 kfree(hctx->ctxs);
2024 unregister_cpu_notifier:
Thomas Gleixner9467f852016-09-22 08:05:17 -06002025 blk_mq_remove_cpuhp(hctx);
Ming Lei08e98fc2014-09-25 23:23:38 +08002026 return -1;
2027}
2028
Jens Axboe320ae512013-10-24 09:20:05 +01002029static void blk_mq_init_cpu_queues(struct request_queue *q,
2030 unsigned int nr_hw_queues)
2031{
2032 unsigned int i;
2033
2034 for_each_possible_cpu(i) {
2035 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2036 struct blk_mq_hw_ctx *hctx;
2037
Jens Axboe320ae512013-10-24 09:20:05 +01002038 __ctx->cpu = i;
2039 spin_lock_init(&__ctx->lock);
2040 INIT_LIST_HEAD(&__ctx->rq_list);
2041 __ctx->queue = q;
Jens Axboecf43e6b2016-11-07 21:32:37 -07002042 blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
2043 blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
Jens Axboe320ae512013-10-24 09:20:05 +01002044
2045 /* If the cpu isn't online, the cpu is mapped to first hctx */
Jens Axboe320ae512013-10-24 09:20:05 +01002046 if (!cpu_online(i))
2047 continue;
2048
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02002049 hctx = blk_mq_map_queue(q, i);
Jens Axboee4043dc2014-04-09 10:18:23 -06002050
Jens Axboe320ae512013-10-24 09:20:05 +01002051 /*
2052 * Set local node, IFF we have more than one hw queue. If
2053 * not, we remain on the home node of the device
2054 */
2055 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
Raghavendra K Tbffed452015-12-02 16:59:05 +05302056 hctx->numa_node = local_memory_node(cpu_to_node(i));
Jens Axboe320ae512013-10-24 09:20:05 +01002057 }
2058}
2059
Jens Axboecc71a6f2017-01-11 14:29:56 -07002060static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2061{
2062 int ret = 0;
2063
2064 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2065 set->queue_depth, set->reserved_tags);
2066 if (!set->tags[hctx_idx])
2067 return false;
2068
2069 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2070 set->queue_depth);
2071 if (!ret)
2072 return true;
2073
2074 blk_mq_free_rq_map(set->tags[hctx_idx]);
2075 set->tags[hctx_idx] = NULL;
2076 return false;
2077}
2078
2079static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2080 unsigned int hctx_idx)
2081{
Jens Axboebd166ef2017-01-17 06:03:22 -07002082 if (set->tags[hctx_idx]) {
2083 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2084 blk_mq_free_rq_map(set->tags[hctx_idx]);
2085 set->tags[hctx_idx] = NULL;
2086 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07002087}
2088
Akinobu Mita57783222015-09-27 02:09:23 +09002089static void blk_mq_map_swqueue(struct request_queue *q,
2090 const struct cpumask *online_mask)
Jens Axboe320ae512013-10-24 09:20:05 +01002091{
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002092 unsigned int i, hctx_idx;
Jens Axboe320ae512013-10-24 09:20:05 +01002093 struct blk_mq_hw_ctx *hctx;
2094 struct blk_mq_ctx *ctx;
Ming Lei2a34c082015-04-21 10:00:20 +08002095 struct blk_mq_tag_set *set = q->tag_set;
Jens Axboe320ae512013-10-24 09:20:05 +01002096
Akinobu Mita60de0742015-09-27 02:09:25 +09002097 /*
2098 * Avoid others reading imcomplete hctx->cpumask through sysfs
2099 */
2100 mutex_lock(&q->sysfs_lock);
2101
Jens Axboe320ae512013-10-24 09:20:05 +01002102 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboee4043dc2014-04-09 10:18:23 -06002103 cpumask_clear(hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01002104 hctx->nr_ctx = 0;
2105 }
2106
2107 /*
2108 * Map software to hardware queues
2109 */
Thomas Gleixner897bb0c2016-03-19 11:30:33 +01002110 for_each_possible_cpu(i) {
Jens Axboe320ae512013-10-24 09:20:05 +01002111 /* If the cpu isn't online, the cpu is mapped to first hctx */
Akinobu Mita57783222015-09-27 02:09:23 +09002112 if (!cpumask_test_cpu(i, online_mask))
Jens Axboee4043dc2014-04-09 10:18:23 -06002113 continue;
2114
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002115 hctx_idx = q->mq_map[i];
2116 /* unmapped hw queue can be remapped after CPU topo changed */
Jens Axboecc71a6f2017-01-11 14:29:56 -07002117 if (!set->tags[hctx_idx] &&
2118 !__blk_mq_alloc_rq_map(set, hctx_idx)) {
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002119 /*
2120 * If tags initialization fail for some hctx,
2121 * that hctx won't be brought online. In this
2122 * case, remap the current ctx to hctx[0] which
2123 * is guaranteed to always have tags allocated
2124 */
Jens Axboecc71a6f2017-01-11 14:29:56 -07002125 q->mq_map[i] = 0;
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002126 }
2127
Thomas Gleixner897bb0c2016-03-19 11:30:33 +01002128 ctx = per_cpu_ptr(q->queue_ctx, i);
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02002129 hctx = blk_mq_map_queue(q, i);
Keith Busch868f2f02015-12-17 17:08:14 -07002130
Jens Axboee4043dc2014-04-09 10:18:23 -06002131 cpumask_set_cpu(i, hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01002132 ctx->index_hw = hctx->nr_ctx;
2133 hctx->ctxs[hctx->nr_ctx++] = ctx;
2134 }
Jens Axboe506e9312014-05-07 10:26:44 -06002135
Akinobu Mita60de0742015-09-27 02:09:25 +09002136 mutex_unlock(&q->sysfs_lock);
2137
Jens Axboe506e9312014-05-07 10:26:44 -06002138 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe484b4062014-05-21 14:01:15 -06002139 /*
Jens Axboea68aafa2014-08-15 13:19:15 -06002140 * If no software queues are mapped to this hardware queue,
2141 * disable it and free the request entries.
Jens Axboe484b4062014-05-21 14:01:15 -06002142 */
2143 if (!hctx->nr_ctx) {
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002144 /* Never unmap queue 0. We need it as a
2145 * fallback in case of a new remap fails
2146 * allocation
2147 */
Jens Axboecc71a6f2017-01-11 14:29:56 -07002148 if (i && set->tags[i])
2149 blk_mq_free_map_and_requests(set, i);
2150
Ming Lei2a34c082015-04-21 10:00:20 +08002151 hctx->tags = NULL;
Jens Axboe484b4062014-05-21 14:01:15 -06002152 continue;
2153 }
2154
Ming Lei2a34c082015-04-21 10:00:20 +08002155 hctx->tags = set->tags[i];
2156 WARN_ON(!hctx->tags);
2157
Jens Axboe484b4062014-05-21 14:01:15 -06002158 /*
Chong Yuan889fa312015-04-15 11:39:29 -06002159 * Set the map size to the number of mapped software queues.
2160 * This is more accurate and more efficient than looping
2161 * over all possibly mapped software queues.
2162 */
Omar Sandoval88459642016-09-17 08:38:44 -06002163 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
Chong Yuan889fa312015-04-15 11:39:29 -06002164
2165 /*
Jens Axboe484b4062014-05-21 14:01:15 -06002166 * Initialize batch roundrobin counts
2167 */
Jens Axboe506e9312014-05-07 10:26:44 -06002168 hctx->next_cpu = cpumask_first(hctx->cpumask);
2169 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2170 }
Jens Axboe320ae512013-10-24 09:20:05 +01002171}
2172
Jeff Moyer2404e602015-11-03 10:40:06 -05002173static void queue_set_hctx_shared(struct request_queue *q, bool shared)
Jens Axboe0d2602c2014-05-13 15:10:52 -06002174{
2175 struct blk_mq_hw_ctx *hctx;
Jens Axboe0d2602c2014-05-13 15:10:52 -06002176 int i;
2177
Jeff Moyer2404e602015-11-03 10:40:06 -05002178 queue_for_each_hw_ctx(q, hctx, i) {
2179 if (shared)
2180 hctx->flags |= BLK_MQ_F_TAG_SHARED;
2181 else
2182 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2183 }
2184}
2185
2186static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
2187{
2188 struct request_queue *q;
Jens Axboe0d2602c2014-05-13 15:10:52 -06002189
2190 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2191 blk_mq_freeze_queue(q);
Jeff Moyer2404e602015-11-03 10:40:06 -05002192 queue_set_hctx_shared(q, shared);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002193 blk_mq_unfreeze_queue(q);
2194 }
2195}
2196
2197static void blk_mq_del_queue_tag_set(struct request_queue *q)
2198{
2199 struct blk_mq_tag_set *set = q->tag_set;
2200
Jens Axboe0d2602c2014-05-13 15:10:52 -06002201 mutex_lock(&set->tag_list_lock);
2202 list_del_init(&q->tag_set_list);
Jeff Moyer2404e602015-11-03 10:40:06 -05002203 if (list_is_singular(&set->tag_list)) {
2204 /* just transitioned to unshared */
2205 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2206 /* update existing queue */
2207 blk_mq_update_tag_set_depth(set, false);
2208 }
Jens Axboe0d2602c2014-05-13 15:10:52 -06002209 mutex_unlock(&set->tag_list_lock);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002210}
2211
2212static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2213 struct request_queue *q)
2214{
2215 q->tag_set = set;
2216
2217 mutex_lock(&set->tag_list_lock);
Jeff Moyer2404e602015-11-03 10:40:06 -05002218
2219 /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2220 if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2221 set->flags |= BLK_MQ_F_TAG_SHARED;
2222 /* update existing queue */
2223 blk_mq_update_tag_set_depth(set, true);
2224 }
2225 if (set->flags & BLK_MQ_F_TAG_SHARED)
2226 queue_set_hctx_shared(q, true);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002227 list_add_tail(&q->tag_set_list, &set->tag_list);
Jeff Moyer2404e602015-11-03 10:40:06 -05002228
Jens Axboe0d2602c2014-05-13 15:10:52 -06002229 mutex_unlock(&set->tag_list_lock);
2230}
2231
Ming Leie09aae72015-01-29 20:17:27 +08002232/*
2233 * It is the actual release handler for mq, but we do it from
2234 * request queue's release handler for avoiding use-after-free
2235 * and headache because q->mq_kobj shouldn't have been introduced,
2236 * but we can't group ctx/kctx kobj without it.
2237 */
2238void blk_mq_release(struct request_queue *q)
2239{
2240 struct blk_mq_hw_ctx *hctx;
2241 unsigned int i;
2242
Jens Axboebd166ef2017-01-17 06:03:22 -07002243 blk_mq_sched_teardown(q);
2244
Ming Leie09aae72015-01-29 20:17:27 +08002245 /* hctx kobj stays in hctx */
Ming Leic3b4afc2015-06-04 22:25:04 +08002246 queue_for_each_hw_ctx(q, hctx, i) {
2247 if (!hctx)
2248 continue;
Ming Lei6c8b2322017-02-22 18:14:01 +08002249 kobject_put(&hctx->kobj);
Ming Leic3b4afc2015-06-04 22:25:04 +08002250 }
Ming Leie09aae72015-01-29 20:17:27 +08002251
Akinobu Mitaa723bab2015-09-27 02:09:21 +09002252 q->mq_map = NULL;
2253
Ming Leie09aae72015-01-29 20:17:27 +08002254 kfree(q->queue_hw_ctx);
2255
Ming Lei7ea5fe32017-02-22 18:14:00 +08002256 /*
2257 * release .mq_kobj and sw queue's kobject now because
2258 * both share lifetime with request queue.
2259 */
2260 blk_mq_sysfs_deinit(q);
2261
Ming Leie09aae72015-01-29 20:17:27 +08002262 free_percpu(q->queue_ctx);
2263}
2264
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002265struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
Jens Axboe320ae512013-10-24 09:20:05 +01002266{
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002267 struct request_queue *uninit_q, *q;
2268
2269 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2270 if (!uninit_q)
2271 return ERR_PTR(-ENOMEM);
2272
2273 q = blk_mq_init_allocated_queue(set, uninit_q);
2274 if (IS_ERR(q))
2275 blk_cleanup_queue(uninit_q);
2276
2277 return q;
2278}
2279EXPORT_SYMBOL(blk_mq_init_queue);
2280
Keith Busch868f2f02015-12-17 17:08:14 -07002281static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2282 struct request_queue *q)
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002283{
Keith Busch868f2f02015-12-17 17:08:14 -07002284 int i, j;
2285 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +01002286
Keith Busch868f2f02015-12-17 17:08:14 -07002287 blk_mq_sysfs_unregister(q);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002288 for (i = 0; i < set->nr_hw_queues; i++) {
Keith Busch868f2f02015-12-17 17:08:14 -07002289 int node;
Jens Axboef14bbe72014-05-27 12:06:53 -06002290
Keith Busch868f2f02015-12-17 17:08:14 -07002291 if (hctxs[i])
2292 continue;
2293
2294 node = blk_mq_hw_queue_to_node(q->mq_map, i);
Christoph Hellwigcdef54d2014-05-28 18:11:06 +02002295 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
2296 GFP_KERNEL, node);
Jens Axboe320ae512013-10-24 09:20:05 +01002297 if (!hctxs[i])
Keith Busch868f2f02015-12-17 17:08:14 -07002298 break;
Jens Axboe320ae512013-10-24 09:20:05 +01002299
Jens Axboea86073e2014-10-13 15:41:54 -06002300 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
Keith Busch868f2f02015-12-17 17:08:14 -07002301 node)) {
2302 kfree(hctxs[i]);
2303 hctxs[i] = NULL;
2304 break;
2305 }
Jens Axboee4043dc2014-04-09 10:18:23 -06002306
Jens Axboe0d2602c2014-05-13 15:10:52 -06002307 atomic_set(&hctxs[i]->nr_active, 0);
Jens Axboef14bbe72014-05-27 12:06:53 -06002308 hctxs[i]->numa_node = node;
Jens Axboe320ae512013-10-24 09:20:05 +01002309 hctxs[i]->queue_num = i;
Keith Busch868f2f02015-12-17 17:08:14 -07002310
2311 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2312 free_cpumask_var(hctxs[i]->cpumask);
2313 kfree(hctxs[i]);
2314 hctxs[i] = NULL;
2315 break;
2316 }
2317 blk_mq_hctx_kobj_init(hctxs[i]);
Jens Axboe320ae512013-10-24 09:20:05 +01002318 }
Keith Busch868f2f02015-12-17 17:08:14 -07002319 for (j = i; j < q->nr_hw_queues; j++) {
2320 struct blk_mq_hw_ctx *hctx = hctxs[j];
2321
2322 if (hctx) {
Jens Axboecc71a6f2017-01-11 14:29:56 -07002323 if (hctx->tags)
2324 blk_mq_free_map_and_requests(set, j);
Keith Busch868f2f02015-12-17 17:08:14 -07002325 blk_mq_exit_hctx(q, set, hctx, j);
Keith Busch868f2f02015-12-17 17:08:14 -07002326 kobject_put(&hctx->kobj);
Keith Busch868f2f02015-12-17 17:08:14 -07002327 hctxs[j] = NULL;
2328
2329 }
2330 }
2331 q->nr_hw_queues = i;
2332 blk_mq_sysfs_register(q);
2333}
2334
2335struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2336 struct request_queue *q)
2337{
Ming Lei66841672016-02-12 15:27:00 +08002338 /* mark the queue as mq asap */
2339 q->mq_ops = set->ops;
2340
Keith Busch868f2f02015-12-17 17:08:14 -07002341 q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2342 if (!q->queue_ctx)
Ming Linc7de5722016-05-25 23:23:27 -07002343 goto err_exit;
Keith Busch868f2f02015-12-17 17:08:14 -07002344
Ming Lei737f98c2017-02-22 18:13:59 +08002345 /* init q->mq_kobj and sw queues' kobjects */
2346 blk_mq_sysfs_init(q);
2347
Keith Busch868f2f02015-12-17 17:08:14 -07002348 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2349 GFP_KERNEL, set->numa_node);
2350 if (!q->queue_hw_ctx)
2351 goto err_percpu;
2352
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002353 q->mq_map = set->mq_map;
Keith Busch868f2f02015-12-17 17:08:14 -07002354
2355 blk_mq_realloc_hw_ctxs(set, q);
2356 if (!q->nr_hw_queues)
2357 goto err_hctxs;
Jens Axboe320ae512013-10-24 09:20:05 +01002358
Christoph Hellwig287922eb2015-10-30 20:57:30 +08002359 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
Ming Leie56f6982015-07-16 19:53:22 +08002360 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
Jens Axboe320ae512013-10-24 09:20:05 +01002361
2362 q->nr_queues = nr_cpu_ids;
Jens Axboe320ae512013-10-24 09:20:05 +01002363
Jens Axboe94eddfb2013-11-19 09:25:07 -07002364 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
Jens Axboe320ae512013-10-24 09:20:05 +01002365
Jens Axboe05f1dd52014-05-29 09:53:32 -06002366 if (!(set->flags & BLK_MQ_F_SG_MERGE))
2367 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2368
Christoph Hellwig1be036e2014-02-07 10:22:39 -08002369 q->sg_reserved_size = INT_MAX;
2370
Mike Snitzer28494502016-09-14 13:28:30 -04002371 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -06002372 INIT_LIST_HEAD(&q->requeue_list);
2373 spin_lock_init(&q->requeue_lock);
2374
Jens Axboe07068d52014-05-22 10:40:51 -06002375 if (q->nr_hw_queues > 1)
2376 blk_queue_make_request(q, blk_mq_make_request);
2377 else
2378 blk_queue_make_request(q, blk_sq_make_request);
2379
Jens Axboeeba71762014-05-20 15:17:27 -06002380 /*
2381 * Do this after blk_queue_make_request() overrides it...
2382 */
2383 q->nr_requests = set->queue_depth;
2384
Jens Axboe64f1c212016-11-14 13:03:03 -07002385 /*
2386 * Default to classic polling
2387 */
2388 q->poll_nsec = -1;
2389
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002390 if (set->ops->complete)
2391 blk_queue_softirq_done(q, set->ops->complete);
Christoph Hellwig30a91cb2014-02-10 03:24:38 -08002392
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002393 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01002394
Akinobu Mita57783222015-09-27 02:09:23 +09002395 get_online_cpus();
Jens Axboe320ae512013-10-24 09:20:05 +01002396 mutex_lock(&all_q_mutex);
Akinobu Mita4593fdb2015-09-27 02:09:20 +09002397
Jens Axboe320ae512013-10-24 09:20:05 +01002398 list_add_tail(&q->all_q_node, &all_q_list);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002399 blk_mq_add_queue_tag_set(set, q);
Akinobu Mita57783222015-09-27 02:09:23 +09002400 blk_mq_map_swqueue(q, cpu_online_mask);
Jens Axboe484b4062014-05-21 14:01:15 -06002401
Akinobu Mita4593fdb2015-09-27 02:09:20 +09002402 mutex_unlock(&all_q_mutex);
Akinobu Mita57783222015-09-27 02:09:23 +09002403 put_online_cpus();
Akinobu Mita4593fdb2015-09-27 02:09:20 +09002404
Jens Axboed3484992017-01-13 14:43:58 -07002405 if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2406 int ret;
2407
2408 ret = blk_mq_sched_init(q);
2409 if (ret)
2410 return ERR_PTR(ret);
2411 }
2412
Jens Axboe320ae512013-10-24 09:20:05 +01002413 return q;
Christoph Hellwig18741982014-02-10 09:29:00 -07002414
Jens Axboe320ae512013-10-24 09:20:05 +01002415err_hctxs:
Keith Busch868f2f02015-12-17 17:08:14 -07002416 kfree(q->queue_hw_ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01002417err_percpu:
Keith Busch868f2f02015-12-17 17:08:14 -07002418 free_percpu(q->queue_ctx);
Ming Linc7de5722016-05-25 23:23:27 -07002419err_exit:
2420 q->mq_ops = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01002421 return ERR_PTR(-ENOMEM);
2422}
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002423EXPORT_SYMBOL(blk_mq_init_allocated_queue);
Jens Axboe320ae512013-10-24 09:20:05 +01002424
2425void blk_mq_free_queue(struct request_queue *q)
2426{
Ming Lei624dbe42014-05-27 23:35:13 +08002427 struct blk_mq_tag_set *set = q->tag_set;
Jens Axboe320ae512013-10-24 09:20:05 +01002428
Akinobu Mita0e626362015-09-27 02:09:22 +09002429 mutex_lock(&all_q_mutex);
2430 list_del_init(&q->all_q_node);
2431 mutex_unlock(&all_q_mutex);
2432
Jens Axboe87760e52016-11-09 12:38:14 -07002433 wbt_exit(q);
2434
Jens Axboe0d2602c2014-05-13 15:10:52 -06002435 blk_mq_del_queue_tag_set(q);
2436
Ming Lei624dbe42014-05-27 23:35:13 +08002437 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01002438}
Jens Axboe320ae512013-10-24 09:20:05 +01002439
2440/* Basically redo blk_mq_init_queue with queue frozen */
Akinobu Mita57783222015-09-27 02:09:23 +09002441static void blk_mq_queue_reinit(struct request_queue *q,
2442 const struct cpumask *online_mask)
Jens Axboe320ae512013-10-24 09:20:05 +01002443{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +02002444 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
Jens Axboe320ae512013-10-24 09:20:05 +01002445
Jens Axboe67aec142014-05-30 08:25:36 -06002446 blk_mq_sysfs_unregister(q);
2447
Jens Axboe320ae512013-10-24 09:20:05 +01002448 /*
2449 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2450 * we should change hctx numa_node according to new topology (this
2451 * involves free and re-allocate memory, worthy doing?)
2452 */
2453
Akinobu Mita57783222015-09-27 02:09:23 +09002454 blk_mq_map_swqueue(q, online_mask);
Jens Axboe320ae512013-10-24 09:20:05 +01002455
Jens Axboe67aec142014-05-30 08:25:36 -06002456 blk_mq_sysfs_register(q);
Jens Axboe320ae512013-10-24 09:20:05 +01002457}
2458
Sebastian Andrzej Siewior65d52912016-09-22 08:05:19 -06002459/*
2460 * New online cpumask which is going to be set in this hotplug event.
2461 * Declare this cpumasks as global as cpu-hotplug operation is invoked
2462 * one-by-one and dynamically allocating this could result in a failure.
2463 */
2464static struct cpumask cpuhp_online_new;
2465
2466static void blk_mq_queue_reinit_work(void)
Jens Axboe320ae512013-10-24 09:20:05 +01002467{
2468 struct request_queue *q;
Jens Axboe320ae512013-10-24 09:20:05 +01002469
2470 mutex_lock(&all_q_mutex);
Tejun Heof3af0202014-11-04 13:52:27 -05002471 /*
2472 * We need to freeze and reinit all existing queues. Freezing
2473 * involves synchronous wait for an RCU grace period and doing it
2474 * one by one may take a long time. Start freezing all queues in
2475 * one swoop and then wait for the completions so that freezing can
2476 * take place in parallel.
2477 */
2478 list_for_each_entry(q, &all_q_list, all_q_node)
2479 blk_mq_freeze_queue_start(q);
Gabriel Krisman Bertazi415d3da2016-11-28 15:01:48 -02002480 list_for_each_entry(q, &all_q_list, all_q_node)
Tejun Heof3af0202014-11-04 13:52:27 -05002481 blk_mq_freeze_queue_wait(q);
2482
Jens Axboe320ae512013-10-24 09:20:05 +01002483 list_for_each_entry(q, &all_q_list, all_q_node)
Sebastian Andrzej Siewior65d52912016-09-22 08:05:19 -06002484 blk_mq_queue_reinit(q, &cpuhp_online_new);
Tejun Heof3af0202014-11-04 13:52:27 -05002485
2486 list_for_each_entry(q, &all_q_list, all_q_node)
2487 blk_mq_unfreeze_queue(q);
2488
Jens Axboe320ae512013-10-24 09:20:05 +01002489 mutex_unlock(&all_q_mutex);
Sebastian Andrzej Siewior65d52912016-09-22 08:05:19 -06002490}
2491
2492static int blk_mq_queue_reinit_dead(unsigned int cpu)
2493{
Sebastian Andrzej Siewior97a32862016-09-23 15:02:38 +02002494 cpumask_copy(&cpuhp_online_new, cpu_online_mask);
Sebastian Andrzej Siewior65d52912016-09-22 08:05:19 -06002495 blk_mq_queue_reinit_work();
2496 return 0;
2497}
2498
2499/*
2500 * Before hotadded cpu starts handling requests, new mappings must be
2501 * established. Otherwise, these requests in hw queue might never be
2502 * dispatched.
2503 *
2504 * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
2505 * for CPU0, and ctx1 for CPU1).
2506 *
2507 * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
2508 * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
2509 *
Jens Axboe2c3ad662016-12-14 14:34:47 -07002510 * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set
2511 * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
2512 * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is
2513 * ignored.
Sebastian Andrzej Siewior65d52912016-09-22 08:05:19 -06002514 */
2515static int blk_mq_queue_reinit_prepare(unsigned int cpu)
2516{
2517 cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2518 cpumask_set_cpu(cpu, &cpuhp_online_new);
2519 blk_mq_queue_reinit_work();
2520 return 0;
Jens Axboe320ae512013-10-24 09:20:05 +01002521}
2522
Jens Axboea5164402014-09-10 09:02:03 -06002523static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2524{
2525 int i;
2526
Jens Axboecc71a6f2017-01-11 14:29:56 -07002527 for (i = 0; i < set->nr_hw_queues; i++)
2528 if (!__blk_mq_alloc_rq_map(set, i))
Jens Axboea5164402014-09-10 09:02:03 -06002529 goto out_unwind;
Jens Axboea5164402014-09-10 09:02:03 -06002530
2531 return 0;
2532
2533out_unwind:
2534 while (--i >= 0)
Jens Axboecc71a6f2017-01-11 14:29:56 -07002535 blk_mq_free_rq_map(set->tags[i]);
Jens Axboea5164402014-09-10 09:02:03 -06002536
Jens Axboea5164402014-09-10 09:02:03 -06002537 return -ENOMEM;
2538}
2539
2540/*
2541 * Allocate the request maps associated with this tag_set. Note that this
2542 * may reduce the depth asked for, if memory is tight. set->queue_depth
2543 * will be updated to reflect the allocated depth.
2544 */
2545static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2546{
2547 unsigned int depth;
2548 int err;
2549
2550 depth = set->queue_depth;
2551 do {
2552 err = __blk_mq_alloc_rq_maps(set);
2553 if (!err)
2554 break;
2555
2556 set->queue_depth >>= 1;
2557 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2558 err = -ENOMEM;
2559 break;
2560 }
2561 } while (set->queue_depth);
2562
2563 if (!set->queue_depth || err) {
2564 pr_err("blk-mq: failed to allocate request map\n");
2565 return -ENOMEM;
2566 }
2567
2568 if (depth != set->queue_depth)
2569 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2570 depth, set->queue_depth);
2571
2572 return 0;
2573}
2574
Jens Axboea4391c62014-06-05 15:21:56 -06002575/*
2576 * Alloc a tag set to be associated with one or more request queues.
2577 * May fail with EINVAL for various error conditions. May adjust the
2578 * requested depth down, if if it too large. In that case, the set
2579 * value will be stored in set->queue_depth.
2580 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002581int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2582{
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002583 int ret;
2584
Bart Van Assche205fb5f2014-10-30 14:45:11 +01002585 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2586
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002587 if (!set->nr_hw_queues)
2588 return -EINVAL;
Jens Axboea4391c62014-06-05 15:21:56 -06002589 if (!set->queue_depth)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002590 return -EINVAL;
2591 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2592 return -EINVAL;
2593
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02002594 if (!set->ops->queue_rq)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002595 return -EINVAL;
2596
Jens Axboea4391c62014-06-05 15:21:56 -06002597 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2598 pr_info("blk-mq: reduced tag depth to %u\n",
2599 BLK_MQ_MAX_DEPTH);
2600 set->queue_depth = BLK_MQ_MAX_DEPTH;
2601 }
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002602
Shaohua Li6637fad2014-11-30 16:00:58 -08002603 /*
2604 * If a crashdump is active, then we are potentially in a very
2605 * memory constrained environment. Limit us to 1 queue and
2606 * 64 tags to prevent using too much memory.
2607 */
2608 if (is_kdump_kernel()) {
2609 set->nr_hw_queues = 1;
2610 set->queue_depth = min(64U, set->queue_depth);
2611 }
Keith Busch868f2f02015-12-17 17:08:14 -07002612 /*
2613 * There is no use for more h/w queues than cpus.
2614 */
2615 if (set->nr_hw_queues > nr_cpu_ids)
2616 set->nr_hw_queues = nr_cpu_ids;
Shaohua Li6637fad2014-11-30 16:00:58 -08002617
Keith Busch868f2f02015-12-17 17:08:14 -07002618 set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002619 GFP_KERNEL, set->numa_node);
2620 if (!set->tags)
Jens Axboea5164402014-09-10 09:02:03 -06002621 return -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002622
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002623 ret = -ENOMEM;
2624 set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2625 GFP_KERNEL, set->numa_node);
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002626 if (!set->mq_map)
2627 goto out_free_tags;
2628
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002629 if (set->ops->map_queues)
2630 ret = set->ops->map_queues(set);
2631 else
2632 ret = blk_mq_map_queues(set);
2633 if (ret)
2634 goto out_free_mq_map;
2635
2636 ret = blk_mq_alloc_rq_maps(set);
2637 if (ret)
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002638 goto out_free_mq_map;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002639
Jens Axboe0d2602c2014-05-13 15:10:52 -06002640 mutex_init(&set->tag_list_lock);
2641 INIT_LIST_HEAD(&set->tag_list);
2642
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002643 return 0;
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002644
2645out_free_mq_map:
2646 kfree(set->mq_map);
2647 set->mq_map = NULL;
2648out_free_tags:
Robert Elliott5676e7b2014-09-02 11:38:44 -05002649 kfree(set->tags);
2650 set->tags = NULL;
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002651 return ret;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002652}
2653EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2654
2655void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2656{
2657 int i;
2658
Jens Axboecc71a6f2017-01-11 14:29:56 -07002659 for (i = 0; i < nr_cpu_ids; i++)
2660 blk_mq_free_map_and_requests(set, i);
Jens Axboe484b4062014-05-21 14:01:15 -06002661
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002662 kfree(set->mq_map);
2663 set->mq_map = NULL;
2664
Ming Lei981bd182014-04-24 00:07:34 +08002665 kfree(set->tags);
Robert Elliott5676e7b2014-09-02 11:38:44 -05002666 set->tags = NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002667}
2668EXPORT_SYMBOL(blk_mq_free_tag_set);
2669
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002670int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2671{
2672 struct blk_mq_tag_set *set = q->tag_set;
2673 struct blk_mq_hw_ctx *hctx;
2674 int i, ret;
2675
Jens Axboebd166ef2017-01-17 06:03:22 -07002676 if (!set)
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002677 return -EINVAL;
2678
Jens Axboe70f36b62017-01-19 10:59:07 -07002679 blk_mq_freeze_queue(q);
2680 blk_mq_quiesce_queue(q);
2681
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002682 ret = 0;
2683 queue_for_each_hw_ctx(q, hctx, i) {
Keith Busche9137d42016-02-18 14:56:35 -07002684 if (!hctx->tags)
2685 continue;
Jens Axboebd166ef2017-01-17 06:03:22 -07002686 /*
2687 * If we're using an MQ scheduler, just update the scheduler
2688 * queue depth. This is similar to what the old code would do.
2689 */
Jens Axboe70f36b62017-01-19 10:59:07 -07002690 if (!hctx->sched_tags) {
2691 ret = blk_mq_tag_update_depth(hctx, &hctx->tags,
2692 min(nr, set->queue_depth),
2693 false);
2694 } else {
2695 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2696 nr, true);
2697 }
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002698 if (ret)
2699 break;
2700 }
2701
2702 if (!ret)
2703 q->nr_requests = nr;
2704
Jens Axboe70f36b62017-01-19 10:59:07 -07002705 blk_mq_unfreeze_queue(q);
2706 blk_mq_start_stopped_hw_queues(q, true);
2707
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002708 return ret;
2709}
2710
Keith Busch868f2f02015-12-17 17:08:14 -07002711void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2712{
2713 struct request_queue *q;
2714
2715 if (nr_hw_queues > nr_cpu_ids)
2716 nr_hw_queues = nr_cpu_ids;
2717 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2718 return;
2719
2720 list_for_each_entry(q, &set->tag_list, tag_set_list)
2721 blk_mq_freeze_queue(q);
2722
2723 set->nr_hw_queues = nr_hw_queues;
2724 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2725 blk_mq_realloc_hw_ctxs(set, q);
2726
Josef Bacikf6f94302017-02-10 13:03:33 -05002727 /*
2728 * Manually set the make_request_fn as blk_queue_make_request
2729 * resets a lot of the queue settings.
2730 */
Keith Busch868f2f02015-12-17 17:08:14 -07002731 if (q->nr_hw_queues > 1)
Josef Bacikf6f94302017-02-10 13:03:33 -05002732 q->make_request_fn = blk_mq_make_request;
Keith Busch868f2f02015-12-17 17:08:14 -07002733 else
Josef Bacikf6f94302017-02-10 13:03:33 -05002734 q->make_request_fn = blk_sq_make_request;
Keith Busch868f2f02015-12-17 17:08:14 -07002735
2736 blk_mq_queue_reinit(q, cpu_online_mask);
2737 }
2738
2739 list_for_each_entry(q, &set->tag_list, tag_set_list)
2740 blk_mq_unfreeze_queue(q);
2741}
2742EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2743
Jens Axboe64f1c212016-11-14 13:03:03 -07002744static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2745 struct blk_mq_hw_ctx *hctx,
2746 struct request *rq)
2747{
2748 struct blk_rq_stat stat[2];
2749 unsigned long ret = 0;
2750
2751 /*
2752 * If stats collection isn't on, don't sleep but turn it on for
2753 * future users
2754 */
2755 if (!blk_stat_enable(q))
2756 return 0;
2757
2758 /*
2759 * We don't have to do this once per IO, should optimize this
2760 * to just use the current window of stats until it changes
2761 */
2762 memset(&stat, 0, sizeof(stat));
2763 blk_hctx_stat_get(hctx, stat);
2764
2765 /*
2766 * As an optimistic guess, use half of the mean service time
2767 * for this type of request. We can (and should) make this smarter.
2768 * For instance, if the completion latencies are tight, we can
2769 * get closer than just half the mean. This is especially
2770 * important on devices where the completion latencies are longer
2771 * than ~10 usec.
2772 */
2773 if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
2774 ret = (stat[BLK_STAT_READ].mean + 1) / 2;
2775 else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
2776 ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
2777
2778 return ret;
2779}
2780
Jens Axboe06426ad2016-11-14 13:01:59 -07002781static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
Jens Axboe64f1c212016-11-14 13:03:03 -07002782 struct blk_mq_hw_ctx *hctx,
Jens Axboe06426ad2016-11-14 13:01:59 -07002783 struct request *rq)
2784{
2785 struct hrtimer_sleeper hs;
2786 enum hrtimer_mode mode;
Jens Axboe64f1c212016-11-14 13:03:03 -07002787 unsigned int nsecs;
Jens Axboe06426ad2016-11-14 13:01:59 -07002788 ktime_t kt;
2789
Jens Axboe64f1c212016-11-14 13:03:03 -07002790 if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2791 return false;
2792
2793 /*
2794 * poll_nsec can be:
2795 *
2796 * -1: don't ever hybrid sleep
2797 * 0: use half of prev avg
2798 * >0: use this specific value
2799 */
2800 if (q->poll_nsec == -1)
2801 return false;
2802 else if (q->poll_nsec > 0)
2803 nsecs = q->poll_nsec;
2804 else
2805 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2806
2807 if (!nsecs)
Jens Axboe06426ad2016-11-14 13:01:59 -07002808 return false;
2809
2810 set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2811
2812 /*
2813 * This will be replaced with the stats tracking code, using
2814 * 'avg_completion_time / 2' as the pre-sleep target.
2815 */
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01002816 kt = nsecs;
Jens Axboe06426ad2016-11-14 13:01:59 -07002817
2818 mode = HRTIMER_MODE_REL;
2819 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2820 hrtimer_set_expires(&hs.timer, kt);
2821
2822 hrtimer_init_sleeper(&hs, current);
2823 do {
2824 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2825 break;
2826 set_current_state(TASK_UNINTERRUPTIBLE);
2827 hrtimer_start_expires(&hs.timer, mode);
2828 if (hs.task)
2829 io_schedule();
2830 hrtimer_cancel(&hs.timer);
2831 mode = HRTIMER_MODE_ABS;
2832 } while (hs.task && !signal_pending(current));
2833
2834 __set_current_state(TASK_RUNNING);
2835 destroy_hrtimer_on_stack(&hs.timer);
2836 return true;
2837}
2838
Jens Axboebbd7bb72016-11-04 09:34:34 -06002839static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2840{
2841 struct request_queue *q = hctx->queue;
2842 long state;
2843
Jens Axboe06426ad2016-11-14 13:01:59 -07002844 /*
2845 * If we sleep, have the caller restart the poll loop to reset
2846 * the state. Like for the other success return cases, the
2847 * caller is responsible for checking if the IO completed. If
2848 * the IO isn't complete, we'll get called again and will go
2849 * straight to the busy poll loop.
2850 */
Jens Axboe64f1c212016-11-14 13:03:03 -07002851 if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
Jens Axboe06426ad2016-11-14 13:01:59 -07002852 return true;
2853
Jens Axboebbd7bb72016-11-04 09:34:34 -06002854 hctx->poll_considered++;
2855
2856 state = current->state;
2857 while (!need_resched()) {
2858 int ret;
2859
2860 hctx->poll_invoked++;
2861
2862 ret = q->mq_ops->poll(hctx, rq->tag);
2863 if (ret > 0) {
2864 hctx->poll_success++;
2865 set_current_state(TASK_RUNNING);
2866 return true;
2867 }
2868
2869 if (signal_pending_state(state, current))
2870 set_current_state(TASK_RUNNING);
2871
2872 if (current->state == TASK_RUNNING)
2873 return true;
2874 if (ret < 0)
2875 break;
2876 cpu_relax();
2877 }
2878
2879 return false;
2880}
2881
2882bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2883{
2884 struct blk_mq_hw_ctx *hctx;
2885 struct blk_plug *plug;
2886 struct request *rq;
2887
2888 if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2889 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2890 return false;
2891
2892 plug = current->plug;
2893 if (plug)
2894 blk_flush_plug_list(plug, false);
2895
2896 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
Jens Axboebd166ef2017-01-17 06:03:22 -07002897 if (!blk_qc_t_is_internal(cookie))
2898 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2899 else
2900 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
Jens Axboebbd7bb72016-11-04 09:34:34 -06002901
2902 return __blk_mq_poll(hctx, rq);
2903}
2904EXPORT_SYMBOL_GPL(blk_mq_poll);
2905
Jens Axboe676141e2014-03-20 13:29:18 -06002906void blk_mq_disable_hotplug(void)
2907{
2908 mutex_lock(&all_q_mutex);
2909}
2910
2911void blk_mq_enable_hotplug(void)
2912{
2913 mutex_unlock(&all_q_mutex);
2914}
2915
Jens Axboe320ae512013-10-24 09:20:05 +01002916static int __init blk_mq_init(void)
2917{
Thomas Gleixner9467f852016-09-22 08:05:17 -06002918 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2919 blk_mq_hctx_notify_dead);
Jens Axboe320ae512013-10-24 09:20:05 +01002920
Sebastian Andrzej Siewior65d52912016-09-22 08:05:19 -06002921 cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
2922 blk_mq_queue_reinit_prepare,
2923 blk_mq_queue_reinit_dead);
Jens Axboe320ae512013-10-24 09:20:05 +01002924 return 0;
2925}
2926subsys_initcall(blk_mq_init);