blob: e797607dab89d96298ee0898e1d9d784c7ccf499 [file] [log] [blame]
Jens Axboe75bb4622014-05-28 10:15:41 -06001/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
Jens Axboe320ae512013-10-24 09:20:05 +01007#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/backing-dev.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
Catalin Marinasf75782e2015-09-14 18:16:02 +010012#include <linux/kmemleak.h>
Jens Axboe320ae512013-10-24 09:20:05 +010013#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/workqueue.h>
17#include <linux/smp.h>
18#include <linux/llist.h>
19#include <linux/list_sort.h>
20#include <linux/cpu.h>
21#include <linux/cache.h>
22#include <linux/sched/sysctl.h>
23#include <linux/delay.h>
Jens Axboeaedcd722014-09-17 08:27:03 -060024#include <linux/crash_dump.h>
Jens Axboe88c7b2b2016-08-25 08:07:30 -060025#include <linux/prefetch.h>
Jens Axboe320ae512013-10-24 09:20:05 +010026
27#include <trace/events/block.h>
28
29#include <linux/blk-mq.h>
30#include "blk.h"
31#include "blk-mq.h"
32#include "blk-mq-tag.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -070033#include "blk-stat.h"
Jens Axboe87760e52016-11-09 12:38:14 -070034#include "blk-wbt.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070035#include "blk-mq-sched.h"
Jens Axboe320ae512013-10-24 09:20:05 +010036
37static DEFINE_MUTEX(all_q_mutex);
38static LIST_HEAD(all_q_list);
39
Jens Axboe320ae512013-10-24 09:20:05 +010040/*
41 * Check if any of the ctx's have pending work in this hardware queue
42 */
Jens Axboe50e1dab2017-01-26 14:42:34 -070043bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
Jens Axboe320ae512013-10-24 09:20:05 +010044{
Jens Axboebd166ef2017-01-17 06:03:22 -070045 return sbitmap_any_bit_set(&hctx->ctx_map) ||
46 !list_empty_careful(&hctx->dispatch) ||
47 blk_mq_sched_has_work(hctx);
Jens Axboe320ae512013-10-24 09:20:05 +010048}
49
50/*
51 * Mark this ctx as having pending work in this hardware queue
52 */
53static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
54 struct blk_mq_ctx *ctx)
55{
Omar Sandoval88459642016-09-17 08:38:44 -060056 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
57 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
Jens Axboe1429d7c2014-05-19 09:23:55 -060058}
59
60static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
61 struct blk_mq_ctx *ctx)
62{
Omar Sandoval88459642016-09-17 08:38:44 -060063 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
Jens Axboe320ae512013-10-24 09:20:05 +010064}
65
Keith Buschb4c6a022014-12-19 17:54:14 -070066void blk_mq_freeze_queue_start(struct request_queue *q)
Ming Lei43a5e4e2013-12-26 21:31:35 +080067{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +020068 int freeze_depth;
Tejun Heocddd5d12014-08-16 08:02:24 -040069
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +020070 freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
71 if (freeze_depth == 1) {
Dan Williams3ef28e82015-10-21 13:20:12 -040072 percpu_ref_kill(&q->q_usage_counter);
Mike Snitzerb94ec292015-03-11 23:56:38 -040073 blk_mq_run_hw_queues(q, false);
Tejun Heocddd5d12014-08-16 08:02:24 -040074 }
Tejun Heof3af0202014-11-04 13:52:27 -050075}
Keith Buschb4c6a022014-12-19 17:54:14 -070076EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
Tejun Heof3af0202014-11-04 13:52:27 -050077
Keith Busch6bae3632017-03-01 14:22:10 -050078void blk_mq_freeze_queue_wait(struct request_queue *q)
Tejun Heof3af0202014-11-04 13:52:27 -050079{
Dan Williams3ef28e82015-10-21 13:20:12 -040080 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
Ming Lei43a5e4e2013-12-26 21:31:35 +080081}
Keith Busch6bae3632017-03-01 14:22:10 -050082EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
Ming Lei43a5e4e2013-12-26 21:31:35 +080083
Keith Buschf91328c2017-03-01 14:22:11 -050084int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
85 unsigned long timeout)
86{
87 return wait_event_timeout(q->mq_freeze_wq,
88 percpu_ref_is_zero(&q->q_usage_counter),
89 timeout);
90}
91EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
92
Tejun Heof3af0202014-11-04 13:52:27 -050093/*
94 * Guarantee no request is in use, so we can change any data structure of
95 * the queue afterward.
96 */
Dan Williams3ef28e82015-10-21 13:20:12 -040097void blk_freeze_queue(struct request_queue *q)
Tejun Heof3af0202014-11-04 13:52:27 -050098{
Dan Williams3ef28e82015-10-21 13:20:12 -040099 /*
100 * In the !blk_mq case we are only calling this to kill the
101 * q_usage_counter, otherwise this increases the freeze depth
102 * and waits for it to return to zero. For this reason there is
103 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
104 * exported to drivers as the only user for unfreeze is blk_mq.
105 */
Tejun Heof3af0202014-11-04 13:52:27 -0500106 blk_mq_freeze_queue_start(q);
107 blk_mq_freeze_queue_wait(q);
108}
Dan Williams3ef28e82015-10-21 13:20:12 -0400109
110void blk_mq_freeze_queue(struct request_queue *q)
111{
112 /*
113 * ...just an alias to keep freeze and unfreeze actions balanced
114 * in the blk_mq_* namespace
115 */
116 blk_freeze_queue(q);
117}
Jens Axboec761d962015-01-02 15:05:12 -0700118EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
Tejun Heof3af0202014-11-04 13:52:27 -0500119
Keith Buschb4c6a022014-12-19 17:54:14 -0700120void blk_mq_unfreeze_queue(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +0100121{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200122 int freeze_depth;
Jens Axboe320ae512013-10-24 09:20:05 +0100123
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200124 freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
125 WARN_ON_ONCE(freeze_depth < 0);
126 if (!freeze_depth) {
Dan Williams3ef28e82015-10-21 13:20:12 -0400127 percpu_ref_reinit(&q->q_usage_counter);
Jens Axboe320ae512013-10-24 09:20:05 +0100128 wake_up_all(&q->mq_freeze_wq);
Tejun Heoadd703f2014-07-01 10:34:38 -0600129 }
Jens Axboe320ae512013-10-24 09:20:05 +0100130}
Keith Buschb4c6a022014-12-19 17:54:14 -0700131EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
Jens Axboe320ae512013-10-24 09:20:05 +0100132
Bart Van Assche6a83e742016-11-02 10:09:51 -0600133/**
134 * blk_mq_quiesce_queue() - wait until all ongoing queue_rq calls have finished
135 * @q: request queue.
136 *
137 * Note: this function does not prevent that the struct request end_io()
138 * callback function is invoked. Additionally, it is not prevented that
139 * new queue_rq() calls occur unless the queue has been stopped first.
140 */
141void blk_mq_quiesce_queue(struct request_queue *q)
142{
143 struct blk_mq_hw_ctx *hctx;
144 unsigned int i;
145 bool rcu = false;
146
147 blk_mq_stop_hw_queues(q);
148
149 queue_for_each_hw_ctx(q, hctx, i) {
150 if (hctx->flags & BLK_MQ_F_BLOCKING)
151 synchronize_srcu(&hctx->queue_rq_srcu);
152 else
153 rcu = true;
154 }
155 if (rcu)
156 synchronize_rcu();
157}
158EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
159
Jens Axboeaed3ea92014-12-22 14:04:42 -0700160void blk_mq_wake_waiters(struct request_queue *q)
161{
162 struct blk_mq_hw_ctx *hctx;
163 unsigned int i;
164
165 queue_for_each_hw_ctx(q, hctx, i)
166 if (blk_mq_hw_queue_mapped(hctx))
167 blk_mq_tag_wakeup_all(hctx->tags, true);
Keith Busch3fd59402015-01-08 08:53:56 -0700168
169 /*
170 * If we are called because the queue has now been marked as
171 * dying, we need to ensure that processes currently waiting on
172 * the queue are notified as well.
173 */
174 wake_up_all(&q->mq_freeze_wq);
Jens Axboeaed3ea92014-12-22 14:04:42 -0700175}
176
Jens Axboe320ae512013-10-24 09:20:05 +0100177bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
178{
179 return blk_mq_has_free_tags(hctx->tags);
180}
181EXPORT_SYMBOL(blk_mq_can_queue);
182
Jens Axboe2c3ad662016-12-14 14:34:47 -0700183void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
184 struct request *rq, unsigned int op)
Jens Axboe320ae512013-10-24 09:20:05 +0100185{
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200186 INIT_LIST_HEAD(&rq->queuelist);
187 /* csd/requeue_work/fifo_time is initialized before use */
188 rq->q = q;
Jens Axboe320ae512013-10-24 09:20:05 +0100189 rq->mq_ctx = ctx;
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600190 rq->cmd_flags = op;
Christoph Hellwige8064022016-10-20 15:12:13 +0200191 if (blk_queue_io_stat(q))
192 rq->rq_flags |= RQF_IO_STAT;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200193 /* do not touch atomic flags, it needs atomic ops against the timer */
194 rq->cpu = -1;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200195 INIT_HLIST_NODE(&rq->hash);
196 RB_CLEAR_NODE(&rq->rb_node);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200197 rq->rq_disk = NULL;
198 rq->part = NULL;
Jens Axboe3ee32372014-06-09 09:36:53 -0600199 rq->start_time = jiffies;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200200#ifdef CONFIG_BLK_CGROUP
201 rq->rl = NULL;
Ming Lei0fec08b2014-01-03 10:00:08 -0700202 set_start_time_ns(rq);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200203 rq->io_start_time_ns = 0;
204#endif
205 rq->nr_phys_segments = 0;
206#if defined(CONFIG_BLK_DEV_INTEGRITY)
207 rq->nr_integrity_segments = 0;
208#endif
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200209 rq->special = NULL;
210 /* tag was already set */
211 rq->errors = 0;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200212 rq->extra_len = 0;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200213
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200214 INIT_LIST_HEAD(&rq->timeout_list);
Jens Axboef6be4fb2014-06-06 11:03:48 -0600215 rq->timeout = 0;
216
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200217 rq->end_io = NULL;
218 rq->end_io_data = NULL;
219 rq->next_rq = NULL;
220
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600221 ctx->rq_dispatched[op_is_sync(op)]++;
Jens Axboe320ae512013-10-24 09:20:05 +0100222}
Jens Axboe2c3ad662016-12-14 14:34:47 -0700223EXPORT_SYMBOL_GPL(blk_mq_rq_ctx_init);
Jens Axboe320ae512013-10-24 09:20:05 +0100224
Jens Axboe2c3ad662016-12-14 14:34:47 -0700225struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
226 unsigned int op)
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200227{
228 struct request *rq;
229 unsigned int tag;
230
Ming Leicb96a422014-06-01 00:43:37 +0800231 tag = blk_mq_get_tag(data);
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200232 if (tag != BLK_MQ_TAG_FAIL) {
Jens Axboebd166ef2017-01-17 06:03:22 -0700233 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
234
235 rq = tags->static_rqs[tag];
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200236
Jens Axboebd166ef2017-01-17 06:03:22 -0700237 if (data->flags & BLK_MQ_REQ_INTERNAL) {
238 rq->tag = -1;
239 rq->internal_tag = tag;
240 } else {
Jens Axboe200e86b2017-01-25 08:11:38 -0700241 if (blk_mq_tag_busy(data->hctx)) {
242 rq->rq_flags = RQF_MQ_INFLIGHT;
243 atomic_inc(&data->hctx->nr_active);
244 }
Jens Axboebd166ef2017-01-17 06:03:22 -0700245 rq->tag = tag;
246 rq->internal_tag = -1;
Omar Sandoval562bef42017-02-27 09:47:55 -0800247 data->hctx->tags->rqs[rq->tag] = rq;
Jens Axboebd166ef2017-01-17 06:03:22 -0700248 }
249
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600250 blk_mq_rq_ctx_init(data->q, data->ctx, rq, op);
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200251 return rq;
252 }
253
254 return NULL;
255}
Jens Axboe2c3ad662016-12-14 14:34:47 -0700256EXPORT_SYMBOL_GPL(__blk_mq_alloc_request);
Christoph Hellwig5dee8572014-05-27 20:59:47 +0200257
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100258struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
259 unsigned int flags)
Jens Axboe320ae512013-10-24 09:20:05 +0100260{
Jens Axboe5a797e02017-01-26 12:22:11 -0700261 struct blk_mq_alloc_data alloc_data = { .flags = flags };
Jens Axboebd166ef2017-01-17 06:03:22 -0700262 struct request *rq;
Joe Lawrencea492f072014-08-28 08:15:21 -0600263 int ret;
Jens Axboe320ae512013-10-24 09:20:05 +0100264
Christoph Hellwig6f3b0e82015-11-26 09:13:05 +0100265 ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
Joe Lawrencea492f072014-08-28 08:15:21 -0600266 if (ret)
267 return ERR_PTR(ret);
Jens Axboe320ae512013-10-24 09:20:05 +0100268
Jens Axboebd166ef2017-01-17 06:03:22 -0700269 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
Jens Axboe841bac22016-09-21 10:08:43 -0600270
Jens Axboebd166ef2017-01-17 06:03:22 -0700271 blk_mq_put_ctx(alloc_data.ctx);
272 blk_queue_exit(q);
273
274 if (!rq)
Joe Lawrencea492f072014-08-28 08:15:21 -0600275 return ERR_PTR(-EWOULDBLOCK);
Christoph Hellwig0c4de0f2016-07-19 11:31:50 +0200276
277 rq->__data_len = 0;
278 rq->__sector = (sector_t) -1;
279 rq->bio = rq->biotail = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +0100280 return rq;
281}
Jens Axboe4bb659b2014-05-09 09:36:49 -0600282EXPORT_SYMBOL(blk_mq_alloc_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100283
Ming Lin1f5bd332016-06-13 16:45:21 +0200284struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
285 unsigned int flags, unsigned int hctx_idx)
286{
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800287 struct blk_mq_alloc_data alloc_data = { .flags = flags };
Ming Lin1f5bd332016-06-13 16:45:21 +0200288 struct request *rq;
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800289 unsigned int cpu;
Ming Lin1f5bd332016-06-13 16:45:21 +0200290 int ret;
291
292 /*
293 * If the tag allocator sleeps we could get an allocation for a
294 * different hardware context. No need to complicate the low level
295 * allocator for this for the rare use case of a command tied to
296 * a specific queue.
297 */
298 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
299 return ERR_PTR(-EINVAL);
300
301 if (hctx_idx >= q->nr_hw_queues)
302 return ERR_PTR(-EIO);
303
304 ret = blk_queue_enter(q, true);
305 if (ret)
306 return ERR_PTR(ret);
307
Christoph Hellwigc8712c62016-09-23 10:25:48 -0600308 /*
309 * Check if the hardware context is actually mapped to anything.
310 * If not tell the caller that it should skip this queue.
311 */
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800312 alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
313 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
314 blk_queue_exit(q);
315 return ERR_PTR(-EXDEV);
Christoph Hellwigc8712c62016-09-23 10:25:48 -0600316 }
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800317 cpu = cpumask_first(alloc_data.hctx->cpumask);
318 alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
Ming Lin1f5bd332016-06-13 16:45:21 +0200319
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800320 rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
321
322 blk_mq_put_ctx(alloc_data.ctx);
323 blk_queue_exit(q);
324
325 if (!rq)
326 return ERR_PTR(-EWOULDBLOCK);
Ming Lin1f5bd332016-06-13 16:45:21 +0200327
328 return rq;
329}
330EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
331
Jens Axboebd166ef2017-01-17 06:03:22 -0700332void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
333 struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100334{
Jens Axboebd166ef2017-01-17 06:03:22 -0700335 const int sched_tag = rq->internal_tag;
Jens Axboe320ae512013-10-24 09:20:05 +0100336 struct request_queue *q = rq->q;
337
Christoph Hellwige8064022016-10-20 15:12:13 +0200338 if (rq->rq_flags & RQF_MQ_INFLIGHT)
Jens Axboe0d2602c2014-05-13 15:10:52 -0600339 atomic_dec(&hctx->nr_active);
Jens Axboe87760e52016-11-09 12:38:14 -0700340
341 wbt_done(q->rq_wb, &rq->issue_stat);
Christoph Hellwige8064022016-10-20 15:12:13 +0200342 rq->rq_flags = 0;
Jens Axboe0d2602c2014-05-13 15:10:52 -0600343
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200344 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
Jens Axboe06426ad2016-11-14 13:01:59 -0700345 clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
Jens Axboebd166ef2017-01-17 06:03:22 -0700346 if (rq->tag != -1)
347 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
348 if (sched_tag != -1)
349 blk_mq_sched_completed_request(hctx, rq);
Jens Axboe50e1dab2017-01-26 14:42:34 -0700350 blk_mq_sched_restart_queues(hctx);
Dan Williams3ef28e82015-10-21 13:20:12 -0400351 blk_queue_exit(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100352}
353
Jens Axboebd166ef2017-01-17 06:03:22 -0700354static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx,
Jens Axboe16a3c2a2016-12-15 14:27:46 -0700355 struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100356{
357 struct blk_mq_ctx *ctx = rq->mq_ctx;
Jens Axboe7c7f2f22014-11-17 10:41:57 -0700358
359 ctx->rq_completed[rq_is_sync(rq)]++;
Jens Axboebd166ef2017-01-17 06:03:22 -0700360 __blk_mq_finish_request(hctx, ctx, rq);
361}
362
363void blk_mq_finish_request(struct request *rq)
364{
365 blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
Jens Axboe7c7f2f22014-11-17 10:41:57 -0700366}
Jens Axboe7c7f2f22014-11-17 10:41:57 -0700367
368void blk_mq_free_request(struct request *rq)
369{
Jens Axboebd166ef2017-01-17 06:03:22 -0700370 blk_mq_sched_put_request(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100371}
Jens Axboe1a3b5952014-11-17 10:40:48 -0700372EXPORT_SYMBOL_GPL(blk_mq_free_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100373
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700374inline void __blk_mq_end_request(struct request *rq, int error)
Jens Axboe320ae512013-10-24 09:20:05 +0100375{
Ming Lei0d11e6a2013-12-05 10:50:39 -0700376 blk_account_io_done(rq);
377
Christoph Hellwig91b63632014-04-16 09:44:53 +0200378 if (rq->end_io) {
Jens Axboe87760e52016-11-09 12:38:14 -0700379 wbt_done(rq->q->rq_wb, &rq->issue_stat);
Jens Axboe320ae512013-10-24 09:20:05 +0100380 rq->end_io(rq, error);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200381 } else {
382 if (unlikely(blk_bidi_rq(rq)))
383 blk_mq_free_request(rq->next_rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100384 blk_mq_free_request(rq);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200385 }
Jens Axboe320ae512013-10-24 09:20:05 +0100386}
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700387EXPORT_SYMBOL(__blk_mq_end_request);
Christoph Hellwig63151a42014-04-16 09:44:52 +0200388
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700389void blk_mq_end_request(struct request *rq, int error)
Christoph Hellwig63151a42014-04-16 09:44:52 +0200390{
391 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
392 BUG();
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700393 __blk_mq_end_request(rq, error);
Christoph Hellwig63151a42014-04-16 09:44:52 +0200394}
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700395EXPORT_SYMBOL(blk_mq_end_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100396
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800397static void __blk_mq_complete_request_remote(void *data)
Jens Axboe320ae512013-10-24 09:20:05 +0100398{
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800399 struct request *rq = data;
Jens Axboe320ae512013-10-24 09:20:05 +0100400
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800401 rq->q->softirq_done_fn(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100402}
403
Jens Axboeed851862014-05-30 21:20:50 -0600404static void blk_mq_ipi_complete_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100405{
406 struct blk_mq_ctx *ctx = rq->mq_ctx;
Christoph Hellwig38535202014-04-25 02:32:53 -0700407 bool shared = false;
Jens Axboe320ae512013-10-24 09:20:05 +0100408 int cpu;
409
Christoph Hellwig38535202014-04-25 02:32:53 -0700410 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800411 rq->q->softirq_done_fn(rq);
412 return;
413 }
Jens Axboe320ae512013-10-24 09:20:05 +0100414
415 cpu = get_cpu();
Christoph Hellwig38535202014-04-25 02:32:53 -0700416 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
417 shared = cpus_share_cache(cpu, ctx->cpu);
418
419 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800420 rq->csd.func = __blk_mq_complete_request_remote;
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800421 rq->csd.info = rq;
422 rq->csd.flags = 0;
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +0100423 smp_call_function_single_async(ctx->cpu, &rq->csd);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800424 } else {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800425 rq->q->softirq_done_fn(rq);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800426 }
Jens Axboe320ae512013-10-24 09:20:05 +0100427 put_cpu();
428}
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800429
Jens Axboecf43e6b2016-11-07 21:32:37 -0700430static void blk_mq_stat_add(struct request *rq)
431{
432 if (rq->rq_flags & RQF_STATS) {
433 /*
434 * We could rq->mq_ctx here, but there's less of a risk
435 * of races if we have the completion event add the stats
436 * to the local software queue.
437 */
438 struct blk_mq_ctx *ctx;
439
440 ctx = __blk_mq_get_ctx(rq->q, raw_smp_processor_id());
441 blk_stat_add(&ctx->stat[rq_data_dir(rq)], rq);
442 }
443}
444
Jens Axboe1fa8cc52015-11-05 14:32:55 -0700445static void __blk_mq_complete_request(struct request *rq)
Jens Axboeed851862014-05-30 21:20:50 -0600446{
447 struct request_queue *q = rq->q;
448
Jens Axboecf43e6b2016-11-07 21:32:37 -0700449 blk_mq_stat_add(rq);
450
Jens Axboeed851862014-05-30 21:20:50 -0600451 if (!q->softirq_done_fn)
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700452 blk_mq_end_request(rq, rq->errors);
Jens Axboeed851862014-05-30 21:20:50 -0600453 else
454 blk_mq_ipi_complete_request(rq);
455}
456
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800457/**
458 * blk_mq_complete_request - end I/O on a request
459 * @rq: the request being processed
460 *
461 * Description:
462 * Ends all I/O on a request. It does not handle partial completions.
463 * The actual completion happens out-of-order, through a IPI handler.
464 **/
Christoph Hellwigf4829a92015-09-27 21:01:50 +0200465void blk_mq_complete_request(struct request *rq, int error)
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800466{
Jens Axboe95f09682014-05-27 17:46:48 -0600467 struct request_queue *q = rq->q;
468
469 if (unlikely(blk_should_fake_timeout(q)))
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800470 return;
Christoph Hellwigf4829a92015-09-27 21:01:50 +0200471 if (!blk_mark_rq_complete(rq)) {
472 rq->errors = error;
Jens Axboeed851862014-05-30 21:20:50 -0600473 __blk_mq_complete_request(rq);
Christoph Hellwigf4829a92015-09-27 21:01:50 +0200474 }
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800475}
476EXPORT_SYMBOL(blk_mq_complete_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100477
Keith Busch973c0192015-01-07 18:55:43 -0700478int blk_mq_request_started(struct request *rq)
479{
480 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
481}
482EXPORT_SYMBOL_GPL(blk_mq_request_started);
483
Christoph Hellwige2490072014-09-13 16:40:09 -0700484void blk_mq_start_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100485{
486 struct request_queue *q = rq->q;
487
Jens Axboebd166ef2017-01-17 06:03:22 -0700488 blk_mq_sched_started_request(rq);
489
Jens Axboe320ae512013-10-24 09:20:05 +0100490 trace_block_rq_issue(q, rq);
491
Jens Axboecf43e6b2016-11-07 21:32:37 -0700492 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
493 blk_stat_set_issue_time(&rq->issue_stat);
494 rq->rq_flags |= RQF_STATS;
Jens Axboe87760e52016-11-09 12:38:14 -0700495 wbt_issue(q->rq_wb, &rq->issue_stat);
Jens Axboecf43e6b2016-11-07 21:32:37 -0700496 }
497
Ming Lei2b8393b2014-06-10 00:16:41 +0800498 blk_add_timer(rq);
Jens Axboe87ee7b12014-04-24 08:51:47 -0600499
500 /*
Jens Axboe538b7532014-09-16 10:37:37 -0600501 * Ensure that ->deadline is visible before set the started
502 * flag and clear the completed flag.
503 */
504 smp_mb__before_atomic();
505
506 /*
Jens Axboe87ee7b12014-04-24 08:51:47 -0600507 * Mark us as started and clear complete. Complete might have been
508 * set if requeue raced with timeout, which then marked it as
509 * complete. So be sure to clear complete again when we start
510 * the request, otherwise we'll ignore the completion event.
511 */
Jens Axboe4b570522014-05-29 11:00:11 -0600512 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
513 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
514 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
515 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800516
517 if (q->dma_drain_size && blk_rq_bytes(rq)) {
518 /*
519 * Make sure space for the drain appears. We know we can do
520 * this because max_hw_segments has been adjusted to be one
521 * fewer than the device can handle.
522 */
523 rq->nr_phys_segments++;
524 }
Jens Axboe320ae512013-10-24 09:20:05 +0100525}
Christoph Hellwige2490072014-09-13 16:40:09 -0700526EXPORT_SYMBOL(blk_mq_start_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100527
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200528static void __blk_mq_requeue_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100529{
530 struct request_queue *q = rq->q;
531
532 trace_block_rq_requeue(q, rq);
Jens Axboe87760e52016-11-09 12:38:14 -0700533 wbt_requeue(q->rq_wb, &rq->issue_stat);
Jens Axboebd166ef2017-01-17 06:03:22 -0700534 blk_mq_sched_requeue_request(rq);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800535
Christoph Hellwige2490072014-09-13 16:40:09 -0700536 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
537 if (q->dma_drain_size && blk_rq_bytes(rq))
538 rq->nr_phys_segments--;
539 }
Jens Axboe320ae512013-10-24 09:20:05 +0100540}
541
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700542void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200543{
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200544 __blk_mq_requeue_request(rq);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200545
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200546 BUG_ON(blk_queued_rq(rq));
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700547 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200548}
549EXPORT_SYMBOL(blk_mq_requeue_request);
550
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600551static void blk_mq_requeue_work(struct work_struct *work)
552{
553 struct request_queue *q =
Mike Snitzer28494502016-09-14 13:28:30 -0400554 container_of(work, struct request_queue, requeue_work.work);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600555 LIST_HEAD(rq_list);
556 struct request *rq, *next;
557 unsigned long flags;
558
559 spin_lock_irqsave(&q->requeue_lock, flags);
560 list_splice_init(&q->requeue_list, &rq_list);
561 spin_unlock_irqrestore(&q->requeue_lock, flags);
562
563 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
Christoph Hellwige8064022016-10-20 15:12:13 +0200564 if (!(rq->rq_flags & RQF_SOFTBARRIER))
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600565 continue;
566
Christoph Hellwige8064022016-10-20 15:12:13 +0200567 rq->rq_flags &= ~RQF_SOFTBARRIER;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600568 list_del_init(&rq->queuelist);
Jens Axboebd6737f2017-01-27 01:00:47 -0700569 blk_mq_sched_insert_request(rq, true, false, false, true);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600570 }
571
572 while (!list_empty(&rq_list)) {
573 rq = list_entry(rq_list.next, struct request, queuelist);
574 list_del_init(&rq->queuelist);
Jens Axboebd6737f2017-01-27 01:00:47 -0700575 blk_mq_sched_insert_request(rq, false, false, false, true);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600576 }
577
Bart Van Assche52d7f1b2016-10-28 17:20:32 -0700578 blk_mq_run_hw_queues(q, false);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600579}
580
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700581void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
582 bool kick_requeue_list)
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600583{
584 struct request_queue *q = rq->q;
585 unsigned long flags;
586
587 /*
588 * We abuse this flag that is otherwise used by the I/O scheduler to
589 * request head insertation from the workqueue.
590 */
Christoph Hellwige8064022016-10-20 15:12:13 +0200591 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600592
593 spin_lock_irqsave(&q->requeue_lock, flags);
594 if (at_head) {
Christoph Hellwige8064022016-10-20 15:12:13 +0200595 rq->rq_flags |= RQF_SOFTBARRIER;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600596 list_add(&rq->queuelist, &q->requeue_list);
597 } else {
598 list_add_tail(&rq->queuelist, &q->requeue_list);
599 }
600 spin_unlock_irqrestore(&q->requeue_lock, flags);
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700601
602 if (kick_requeue_list)
603 blk_mq_kick_requeue_list(q);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600604}
605EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
606
607void blk_mq_kick_requeue_list(struct request_queue *q)
608{
Mike Snitzer28494502016-09-14 13:28:30 -0400609 kblockd_schedule_delayed_work(&q->requeue_work, 0);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600610}
611EXPORT_SYMBOL(blk_mq_kick_requeue_list);
612
Mike Snitzer28494502016-09-14 13:28:30 -0400613void blk_mq_delay_kick_requeue_list(struct request_queue *q,
614 unsigned long msecs)
615{
616 kblockd_schedule_delayed_work(&q->requeue_work,
617 msecs_to_jiffies(msecs));
618}
619EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
620
Jens Axboe1885b242015-01-07 18:55:45 -0700621void blk_mq_abort_requeue_list(struct request_queue *q)
622{
623 unsigned long flags;
624 LIST_HEAD(rq_list);
625
626 spin_lock_irqsave(&q->requeue_lock, flags);
627 list_splice_init(&q->requeue_list, &rq_list);
628 spin_unlock_irqrestore(&q->requeue_lock, flags);
629
630 while (!list_empty(&rq_list)) {
631 struct request *rq;
632
633 rq = list_first_entry(&rq_list, struct request, queuelist);
634 list_del_init(&rq->queuelist);
635 rq->errors = -EIO;
636 blk_mq_end_request(rq, rq->errors);
637 }
638}
639EXPORT_SYMBOL(blk_mq_abort_requeue_list);
640
Jens Axboe0e62f512014-06-04 10:23:49 -0600641struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
642{
Jens Axboe88c7b2b2016-08-25 08:07:30 -0600643 if (tag < tags->nr_tags) {
644 prefetch(tags->rqs[tag]);
Hannes Reinecke4ee86ba2016-03-15 12:03:28 -0700645 return tags->rqs[tag];
Jens Axboe88c7b2b2016-08-25 08:07:30 -0600646 }
Hannes Reinecke4ee86ba2016-03-15 12:03:28 -0700647
648 return NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600649}
650EXPORT_SYMBOL(blk_mq_tag_to_rq);
651
Jens Axboe320ae512013-10-24 09:20:05 +0100652struct blk_mq_timeout_data {
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700653 unsigned long next;
654 unsigned int next_set;
Jens Axboe320ae512013-10-24 09:20:05 +0100655};
656
Christoph Hellwig90415832014-09-22 10:21:48 -0600657void blk_mq_rq_timed_out(struct request *req, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100658{
Jens Axboef8a5b122016-12-13 09:24:51 -0700659 const struct blk_mq_ops *ops = req->q->mq_ops;
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700660 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
Jens Axboe87ee7b12014-04-24 08:51:47 -0600661
662 /*
663 * We know that complete is set at this point. If STARTED isn't set
664 * anymore, then the request isn't active and the "timeout" should
665 * just be ignored. This can happen due to the bitflag ordering.
666 * Timeout first checks if STARTED is set, and if it is, assumes
667 * the request is active. But if we race with completion, then
668 * we both flags will get cleared. So check here again, and ignore
669 * a timeout event with a request that isn't active.
670 */
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700671 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
672 return;
Jens Axboe87ee7b12014-04-24 08:51:47 -0600673
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700674 if (ops->timeout)
Christoph Hellwig0152fb62014-09-13 16:40:13 -0700675 ret = ops->timeout(req, reserved);
Jens Axboe87ee7b12014-04-24 08:51:47 -0600676
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700677 switch (ret) {
678 case BLK_EH_HANDLED:
679 __blk_mq_complete_request(req);
680 break;
681 case BLK_EH_RESET_TIMER:
682 blk_add_timer(req);
683 blk_clear_rq_complete(req);
684 break;
685 case BLK_EH_NOT_HANDLED:
686 break;
687 default:
688 printk(KERN_ERR "block: bad eh return: %d\n", ret);
689 break;
690 }
Jens Axboe87ee7b12014-04-24 08:51:47 -0600691}
Keith Busch5b3f25f2015-01-07 18:55:46 -0700692
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700693static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
694 struct request *rq, void *priv, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100695{
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700696 struct blk_mq_timeout_data *data = priv;
697
Keith Buscheb130db2015-01-08 08:59:53 -0700698 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
699 /*
700 * If a request wasn't started before the queue was
701 * marked dying, kill it here or it'll go unnoticed.
702 */
Keith Buscha59e0f52016-02-11 13:05:38 -0700703 if (unlikely(blk_queue_dying(rq->q))) {
704 rq->errors = -EIO;
705 blk_mq_end_request(rq, rq->errors);
706 }
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700707 return;
Keith Buscheb130db2015-01-08 08:59:53 -0700708 }
Jens Axboe320ae512013-10-24 09:20:05 +0100709
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700710 if (time_after_eq(jiffies, rq->deadline)) {
711 if (!blk_mark_rq_complete(rq))
Christoph Hellwig0152fb62014-09-13 16:40:13 -0700712 blk_mq_rq_timed_out(rq, reserved);
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700713 } else if (!data->next_set || time_after(data->next, rq->deadline)) {
714 data->next = rq->deadline;
715 data->next_set = 1;
716 }
Jens Axboe320ae512013-10-24 09:20:05 +0100717}
718
Christoph Hellwig287922e2015-10-30 20:57:30 +0800719static void blk_mq_timeout_work(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +0100720{
Christoph Hellwig287922e2015-10-30 20:57:30 +0800721 struct request_queue *q =
722 container_of(work, struct request_queue, timeout_work);
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700723 struct blk_mq_timeout_data data = {
724 .next = 0,
725 .next_set = 0,
726 };
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700727 int i;
Jens Axboe320ae512013-10-24 09:20:05 +0100728
Gabriel Krisman Bertazi71f79fb2016-08-01 08:23:39 -0600729 /* A deadlock might occur if a request is stuck requiring a
730 * timeout at the same time a queue freeze is waiting
731 * completion, since the timeout code would not be able to
732 * acquire the queue reference here.
733 *
734 * That's why we don't use blk_queue_enter here; instead, we use
735 * percpu_ref_tryget directly, because we need to be able to
736 * obtain a reference even in the short window between the queue
737 * starting to freeze, by dropping the first reference in
738 * blk_mq_freeze_queue_start, and the moment the last request is
739 * consumed, marked by the instant q_usage_counter reaches
740 * zero.
741 */
742 if (!percpu_ref_tryget(&q->q_usage_counter))
Christoph Hellwig287922e2015-10-30 20:57:30 +0800743 return;
744
Christoph Hellwig0bf6cd52015-09-27 21:01:51 +0200745 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
Jens Axboe320ae512013-10-24 09:20:05 +0100746
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700747 if (data.next_set) {
748 data.next = blk_rq_timeout(round_jiffies_up(data.next));
749 mod_timer(&q->timeout, data.next);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600750 } else {
Christoph Hellwig0bf6cd52015-09-27 21:01:51 +0200751 struct blk_mq_hw_ctx *hctx;
752
Ming Leif054b562015-04-21 10:00:19 +0800753 queue_for_each_hw_ctx(q, hctx, i) {
754 /* the hctx may be unmapped, so check it here */
755 if (blk_mq_hw_queue_mapped(hctx))
756 blk_mq_tag_idle(hctx);
757 }
Jens Axboe0d2602c2014-05-13 15:10:52 -0600758 }
Christoph Hellwig287922e2015-10-30 20:57:30 +0800759 blk_queue_exit(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100760}
761
762/*
763 * Reverse check our software queue for entries that we could potentially
764 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
765 * too much time checking for merges.
766 */
767static bool blk_mq_attempt_merge(struct request_queue *q,
768 struct blk_mq_ctx *ctx, struct bio *bio)
769{
770 struct request *rq;
771 int checked = 8;
772
773 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100774 bool merged = false;
Jens Axboe320ae512013-10-24 09:20:05 +0100775
776 if (!checked--)
777 break;
778
779 if (!blk_rq_merge_ok(rq, bio))
780 continue;
781
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100782 switch (blk_try_merge(rq, bio)) {
783 case ELEVATOR_BACK_MERGE:
784 if (blk_mq_sched_allow_merge(q, rq, bio))
785 merged = bio_attempt_back_merge(q, rq, bio);
786 break;
787 case ELEVATOR_FRONT_MERGE:
788 if (blk_mq_sched_allow_merge(q, rq, bio))
789 merged = bio_attempt_front_merge(q, rq, bio);
790 break;
Christoph Hellwig1e739732017-02-08 14:46:49 +0100791 case ELEVATOR_DISCARD_MERGE:
792 merged = bio_attempt_discard_merge(q, rq, bio);
793 break;
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100794 default:
Jens Axboebd166ef2017-01-17 06:03:22 -0700795 continue;
Jens Axboe320ae512013-10-24 09:20:05 +0100796 }
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100797
798 if (merged)
799 ctx->rq_merged++;
800 return merged;
Jens Axboe320ae512013-10-24 09:20:05 +0100801 }
802
803 return false;
804}
805
Omar Sandoval88459642016-09-17 08:38:44 -0600806struct flush_busy_ctx_data {
807 struct blk_mq_hw_ctx *hctx;
808 struct list_head *list;
809};
810
811static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
812{
813 struct flush_busy_ctx_data *flush_data = data;
814 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
815 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
816
817 sbitmap_clear_bit(sb, bitnr);
818 spin_lock(&ctx->lock);
819 list_splice_tail_init(&ctx->rq_list, flush_data->list);
820 spin_unlock(&ctx->lock);
821 return true;
822}
823
Jens Axboe320ae512013-10-24 09:20:05 +0100824/*
Jens Axboe1429d7c2014-05-19 09:23:55 -0600825 * Process software queues that have been marked busy, splicing them
826 * to the for-dispatch
827 */
Jens Axboe2c3ad662016-12-14 14:34:47 -0700828void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
Jens Axboe1429d7c2014-05-19 09:23:55 -0600829{
Omar Sandoval88459642016-09-17 08:38:44 -0600830 struct flush_busy_ctx_data data = {
831 .hctx = hctx,
832 .list = list,
833 };
Jens Axboe1429d7c2014-05-19 09:23:55 -0600834
Omar Sandoval88459642016-09-17 08:38:44 -0600835 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600836}
Jens Axboe2c3ad662016-12-14 14:34:47 -0700837EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600838
Jens Axboe703fd1c2016-09-16 13:59:14 -0600839static inline unsigned int queued_to_index(unsigned int queued)
840{
841 if (!queued)
842 return 0;
Jens Axboe1429d7c2014-05-19 09:23:55 -0600843
Jens Axboe703fd1c2016-09-16 13:59:14 -0600844 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600845}
846
Jens Axboebd6737f2017-01-27 01:00:47 -0700847bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
848 bool wait)
Jens Axboebd166ef2017-01-17 06:03:22 -0700849{
850 struct blk_mq_alloc_data data = {
851 .q = rq->q,
Jens Axboebd166ef2017-01-17 06:03:22 -0700852 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
853 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
854 };
855
Jens Axboebd166ef2017-01-17 06:03:22 -0700856 if (rq->tag != -1) {
857done:
858 if (hctx)
859 *hctx = data.hctx;
860 return true;
861 }
862
Sagi Grimberg415b8062017-02-27 10:04:39 -0700863 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
864 data.flags |= BLK_MQ_REQ_RESERVED;
865
Jens Axboebd166ef2017-01-17 06:03:22 -0700866 rq->tag = blk_mq_get_tag(&data);
867 if (rq->tag >= 0) {
Jens Axboe200e86b2017-01-25 08:11:38 -0700868 if (blk_mq_tag_busy(data.hctx)) {
869 rq->rq_flags |= RQF_MQ_INFLIGHT;
870 atomic_inc(&data.hctx->nr_active);
871 }
Jens Axboebd166ef2017-01-17 06:03:22 -0700872 data.hctx->tags->rqs[rq->tag] = rq;
873 goto done;
874 }
875
876 return false;
877}
878
Jens Axboe113285b2017-03-02 13:26:04 -0700879static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
880 struct request *rq)
Jens Axboe99cf1dc2017-01-26 12:32:32 -0700881{
Jens Axboe99cf1dc2017-01-26 12:32:32 -0700882 blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
883 rq->tag = -1;
884
885 if (rq->rq_flags & RQF_MQ_INFLIGHT) {
886 rq->rq_flags &= ~RQF_MQ_INFLIGHT;
887 atomic_dec(&hctx->nr_active);
888 }
889}
890
Jens Axboe113285b2017-03-02 13:26:04 -0700891static void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
892 struct request *rq)
893{
894 if (rq->tag == -1 || rq->internal_tag == -1)
895 return;
896
897 __blk_mq_put_driver_tag(hctx, rq);
898}
899
900static void blk_mq_put_driver_tag(struct request *rq)
901{
902 struct blk_mq_hw_ctx *hctx;
903
904 if (rq->tag == -1 || rq->internal_tag == -1)
905 return;
906
907 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
908 __blk_mq_put_driver_tag(hctx, rq);
909}
910
Jens Axboebd166ef2017-01-17 06:03:22 -0700911/*
912 * If we fail getting a driver tag because all the driver tags are already
913 * assigned and on the dispatch list, BUT the first entry does not have a
914 * tag, then we could deadlock. For that case, move entries with assigned
915 * driver tags to the front, leaving the set of tagged requests in the
916 * same order, and the untagged set in the same order.
917 */
918static bool reorder_tags_to_front(struct list_head *list)
919{
920 struct request *rq, *tmp, *first = NULL;
921
922 list_for_each_entry_safe_reverse(rq, tmp, list, queuelist) {
923 if (rq == first)
924 break;
925 if (rq->tag != -1) {
926 list_move(&rq->queuelist, list);
927 if (!first)
928 first = rq;
929 }
930 }
931
932 return first != NULL;
933}
934
Omar Sandovalda55f2c2017-02-22 10:58:29 -0800935static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags,
936 void *key)
937{
938 struct blk_mq_hw_ctx *hctx;
939
940 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
941
942 list_del(&wait->task_list);
943 clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
944 blk_mq_run_hw_queue(hctx, true);
945 return 1;
946}
947
948static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
949{
950 struct sbq_wait_state *ws;
951
952 /*
953 * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
954 * The thread which wins the race to grab this bit adds the hardware
955 * queue to the wait queue.
956 */
957 if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
958 test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
959 return false;
960
961 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
962 ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
963
964 /*
965 * As soon as this returns, it's no longer safe to fiddle with
966 * hctx->dispatch_wait, since a completion can wake up the wait queue
967 * and unlock the bit.
968 */
969 add_wait_queue(&ws->wait, &hctx->dispatch_wait);
970 return true;
971}
972
Jens Axboef04c3df2016-12-07 08:41:17 -0700973bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
974{
975 struct request_queue *q = hctx->queue;
976 struct request *rq;
977 LIST_HEAD(driver_list);
978 struct list_head *dptr;
979 int queued, ret = BLK_MQ_RQ_QUEUE_OK;
980
981 /*
982 * Start off with dptr being NULL, so we start the first request
983 * immediately, even if we have more pending.
984 */
985 dptr = NULL;
986
987 /*
988 * Now process all the entries, sending them to the driver.
989 */
990 queued = 0;
991 while (!list_empty(list)) {
992 struct blk_mq_queue_data bd;
993
994 rq = list_first_entry(list, struct request, queuelist);
Jens Axboebd166ef2017-01-17 06:03:22 -0700995 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
996 if (!queued && reorder_tags_to_front(list))
997 continue;
Jens Axboe3c782d62017-01-26 12:50:36 -0700998
999 /*
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001000 * The initial allocation attempt failed, so we need to
1001 * rerun the hardware queue when a tag is freed.
Jens Axboe3c782d62017-01-26 12:50:36 -07001002 */
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001003 if (blk_mq_dispatch_wait_add(hctx)) {
1004 /*
1005 * It's possible that a tag was freed in the
1006 * window between the allocation failure and
1007 * adding the hardware queue to the wait queue.
1008 */
1009 if (!blk_mq_get_driver_tag(rq, &hctx, false))
1010 break;
1011 } else {
Jens Axboe3c782d62017-01-26 12:50:36 -07001012 break;
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001013 }
Jens Axboebd166ef2017-01-17 06:03:22 -07001014 }
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001015
Jens Axboef04c3df2016-12-07 08:41:17 -07001016 list_del_init(&rq->queuelist);
1017
1018 bd.rq = rq;
1019 bd.list = dptr;
Jens Axboe113285b2017-03-02 13:26:04 -07001020
1021 /*
1022 * Flag last if we have no more requests, or if we have more
1023 * but can't assign a driver tag to it.
1024 */
1025 if (list_empty(list))
1026 bd.last = true;
1027 else {
1028 struct request *nxt;
1029
1030 nxt = list_first_entry(list, struct request, queuelist);
1031 bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1032 }
Jens Axboef04c3df2016-12-07 08:41:17 -07001033
1034 ret = q->mq_ops->queue_rq(hctx, &bd);
1035 switch (ret) {
1036 case BLK_MQ_RQ_QUEUE_OK:
1037 queued++;
1038 break;
1039 case BLK_MQ_RQ_QUEUE_BUSY:
Jens Axboe113285b2017-03-02 13:26:04 -07001040 blk_mq_put_driver_tag_hctx(hctx, rq);
Jens Axboef04c3df2016-12-07 08:41:17 -07001041 list_add(&rq->queuelist, list);
1042 __blk_mq_requeue_request(rq);
1043 break;
1044 default:
1045 pr_err("blk-mq: bad return on queue: %d\n", ret);
1046 case BLK_MQ_RQ_QUEUE_ERROR:
1047 rq->errors = -EIO;
1048 blk_mq_end_request(rq, rq->errors);
1049 break;
1050 }
1051
1052 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
1053 break;
1054
1055 /*
1056 * We've done the first request. If we have more than 1
1057 * left in the list, set dptr to defer issue.
1058 */
1059 if (!dptr && list->next != list->prev)
1060 dptr = &driver_list;
1061 }
1062
1063 hctx->dispatched[queued_to_index(queued)]++;
1064
1065 /*
1066 * Any items that need requeuing? Stuff them into hctx->dispatch,
1067 * that is where we will continue on next queue run.
1068 */
1069 if (!list_empty(list)) {
Jens Axboe113285b2017-03-02 13:26:04 -07001070 /*
1071 * If we got a driver tag for the next request already,
1072 * free it again.
1073 */
1074 rq = list_first_entry(list, struct request, queuelist);
1075 blk_mq_put_driver_tag(rq);
1076
Jens Axboef04c3df2016-12-07 08:41:17 -07001077 spin_lock(&hctx->lock);
Jens Axboec13660a2017-01-26 12:40:07 -07001078 list_splice_init(list, &hctx->dispatch);
Jens Axboef04c3df2016-12-07 08:41:17 -07001079 spin_unlock(&hctx->lock);
1080
1081 /*
1082 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
1083 * it's possible the queue is stopped and restarted again
1084 * before this. Queue restart will dispatch requests. And since
1085 * requests in rq_list aren't added into hctx->dispatch yet,
1086 * the requests in rq_list might get lost.
1087 *
1088 * blk_mq_run_hw_queue() already checks the STOPPED bit
Jens Axboebd166ef2017-01-17 06:03:22 -07001089 *
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001090 * If RESTART or TAG_WAITING is set, then let completion restart
1091 * the queue instead of potentially looping here.
Jens Axboebd166ef2017-01-17 06:03:22 -07001092 */
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001093 if (!blk_mq_sched_needs_restart(hctx) &&
1094 !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
Jens Axboebd166ef2017-01-17 06:03:22 -07001095 blk_mq_run_hw_queue(hctx, true);
Jens Axboef04c3df2016-12-07 08:41:17 -07001096 }
1097
Jens Axboe2aa0f212017-02-17 11:35:35 -07001098 return queued != 0;
Jens Axboef04c3df2016-12-07 08:41:17 -07001099}
1100
Bart Van Assche6a83e742016-11-02 10:09:51 -06001101static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1102{
1103 int srcu_idx;
1104
1105 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1106 cpu_online(hctx->next_cpu));
1107
1108 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1109 rcu_read_lock();
Jens Axboebd166ef2017-01-17 06:03:22 -07001110 blk_mq_sched_dispatch_requests(hctx);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001111 rcu_read_unlock();
1112 } else {
1113 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
Jens Axboebd166ef2017-01-17 06:03:22 -07001114 blk_mq_sched_dispatch_requests(hctx);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001115 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
1116 }
1117}
1118
Jens Axboe506e9312014-05-07 10:26:44 -06001119/*
1120 * It'd be great if the workqueue API had a way to pass
1121 * in a mask and had some smarts for more clever placement.
1122 * For now we just round-robin here, switching for every
1123 * BLK_MQ_CPU_WORK_BATCH queued items.
1124 */
1125static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1126{
Christoph Hellwigb657d7e2014-11-24 09:27:23 +01001127 if (hctx->queue->nr_hw_queues == 1)
1128 return WORK_CPU_UNBOUND;
Jens Axboe506e9312014-05-07 10:26:44 -06001129
1130 if (--hctx->next_cpu_batch <= 0) {
Gabriel Krisman Bertazic02ebfd2016-09-28 00:24:24 -03001131 int next_cpu;
Jens Axboe506e9312014-05-07 10:26:44 -06001132
1133 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1134 if (next_cpu >= nr_cpu_ids)
1135 next_cpu = cpumask_first(hctx->cpumask);
1136
1137 hctx->next_cpu = next_cpu;
1138 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1139 }
1140
Christoph Hellwigb657d7e2014-11-24 09:27:23 +01001141 return hctx->next_cpu;
Jens Axboe506e9312014-05-07 10:26:44 -06001142}
1143
Jens Axboe320ae512013-10-24 09:20:05 +01001144void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1145{
Bart Van Assche5d1b25c2016-10-28 17:19:15 -07001146 if (unlikely(blk_mq_hctx_stopped(hctx) ||
1147 !blk_mq_hw_queue_mapped(hctx)))
Jens Axboe320ae512013-10-24 09:20:05 +01001148 return;
1149
Jens Axboe1b792f22016-09-21 10:12:13 -06001150 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001151 int cpu = get_cpu();
1152 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
Paolo Bonzini398205b2014-11-07 23:03:59 +01001153 __blk_mq_run_hw_queue(hctx);
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001154 put_cpu();
Paolo Bonzini398205b2014-11-07 23:03:59 +01001155 return;
1156 }
Jens Axboee4043dc2014-04-09 10:18:23 -06001157
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001158 put_cpu();
Jens Axboee4043dc2014-04-09 10:18:23 -06001159 }
Paolo Bonzini398205b2014-11-07 23:03:59 +01001160
Jens Axboe27489a32016-08-24 15:54:25 -06001161 kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
Jens Axboe320ae512013-10-24 09:20:05 +01001162}
1163
Mike Snitzerb94ec292015-03-11 23:56:38 -04001164void blk_mq_run_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +01001165{
1166 struct blk_mq_hw_ctx *hctx;
1167 int i;
1168
1169 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001170 if (!blk_mq_hctx_has_pending(hctx) ||
Bart Van Assche5d1b25c2016-10-28 17:19:15 -07001171 blk_mq_hctx_stopped(hctx))
Jens Axboe320ae512013-10-24 09:20:05 +01001172 continue;
1173
Mike Snitzerb94ec292015-03-11 23:56:38 -04001174 blk_mq_run_hw_queue(hctx, async);
Jens Axboe320ae512013-10-24 09:20:05 +01001175 }
1176}
Mike Snitzerb94ec292015-03-11 23:56:38 -04001177EXPORT_SYMBOL(blk_mq_run_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01001178
Bart Van Asschefd001442016-10-28 17:19:37 -07001179/**
1180 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1181 * @q: request queue.
1182 *
1183 * The caller is responsible for serializing this function against
1184 * blk_mq_{start,stop}_hw_queue().
1185 */
1186bool blk_mq_queue_stopped(struct request_queue *q)
1187{
1188 struct blk_mq_hw_ctx *hctx;
1189 int i;
1190
1191 queue_for_each_hw_ctx(q, hctx, i)
1192 if (blk_mq_hctx_stopped(hctx))
1193 return true;
1194
1195 return false;
1196}
1197EXPORT_SYMBOL(blk_mq_queue_stopped);
1198
Jens Axboe320ae512013-10-24 09:20:05 +01001199void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1200{
Jens Axboe27489a32016-08-24 15:54:25 -06001201 cancel_work(&hctx->run_work);
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001202 cancel_delayed_work(&hctx->delay_work);
Jens Axboe320ae512013-10-24 09:20:05 +01001203 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1204}
1205EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1206
Christoph Hellwig280d45f2013-10-25 14:45:58 +01001207void blk_mq_stop_hw_queues(struct request_queue *q)
1208{
1209 struct blk_mq_hw_ctx *hctx;
1210 int i;
1211
1212 queue_for_each_hw_ctx(q, hctx, i)
1213 blk_mq_stop_hw_queue(hctx);
1214}
1215EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1216
Jens Axboe320ae512013-10-24 09:20:05 +01001217void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1218{
1219 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboee4043dc2014-04-09 10:18:23 -06001220
Jens Axboe0ffbce82014-06-25 08:22:34 -06001221 blk_mq_run_hw_queue(hctx, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001222}
1223EXPORT_SYMBOL(blk_mq_start_hw_queue);
1224
Christoph Hellwig2f268552014-04-16 09:44:56 +02001225void blk_mq_start_hw_queues(struct request_queue *q)
1226{
1227 struct blk_mq_hw_ctx *hctx;
1228 int i;
1229
1230 queue_for_each_hw_ctx(q, hctx, i)
1231 blk_mq_start_hw_queue(hctx);
1232}
1233EXPORT_SYMBOL(blk_mq_start_hw_queues);
1234
Jens Axboeae911c52016-12-08 13:19:30 -07001235void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1236{
1237 if (!blk_mq_hctx_stopped(hctx))
1238 return;
1239
1240 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1241 blk_mq_run_hw_queue(hctx, async);
1242}
1243EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1244
Christoph Hellwig1b4a3252014-04-16 09:44:54 +02001245void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +01001246{
1247 struct blk_mq_hw_ctx *hctx;
1248 int i;
1249
Jens Axboeae911c52016-12-08 13:19:30 -07001250 queue_for_each_hw_ctx(q, hctx, i)
1251 blk_mq_start_stopped_hw_queue(hctx, async);
Jens Axboe320ae512013-10-24 09:20:05 +01001252}
1253EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1254
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001255static void blk_mq_run_work_fn(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +01001256{
1257 struct blk_mq_hw_ctx *hctx;
1258
Jens Axboe27489a32016-08-24 15:54:25 -06001259 hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
Jens Axboee4043dc2014-04-09 10:18:23 -06001260
Jens Axboe320ae512013-10-24 09:20:05 +01001261 __blk_mq_run_hw_queue(hctx);
1262}
1263
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001264static void blk_mq_delay_work_fn(struct work_struct *work)
1265{
1266 struct blk_mq_hw_ctx *hctx;
1267
1268 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
1269
1270 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
1271 __blk_mq_run_hw_queue(hctx);
1272}
1273
1274void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1275{
Ming Lei19c66e52014-12-03 19:38:04 +08001276 if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
1277 return;
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001278
Jens Axboe7e79dad2017-01-19 07:58:59 -07001279 blk_mq_stop_hw_queue(hctx);
Christoph Hellwigb657d7e2014-11-24 09:27:23 +01001280 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1281 &hctx->delay_work, msecs_to_jiffies(msecs));
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001282}
1283EXPORT_SYMBOL(blk_mq_delay_queue);
1284
Ming Leicfd0c552015-10-20 23:13:57 +08001285static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
Ming Leicfd0c552015-10-20 23:13:57 +08001286 struct request *rq,
1287 bool at_head)
Jens Axboe320ae512013-10-24 09:20:05 +01001288{
Jens Axboee57690f2016-08-24 15:34:35 -06001289 struct blk_mq_ctx *ctx = rq->mq_ctx;
1290
Jens Axboe01b983c2013-11-19 18:59:10 -07001291 trace_block_rq_insert(hctx->queue, rq);
1292
Christoph Hellwig72a0a362014-02-07 10:22:36 -08001293 if (at_head)
1294 list_add(&rq->queuelist, &ctx->rq_list);
1295 else
1296 list_add_tail(&rq->queuelist, &ctx->rq_list);
Ming Leicfd0c552015-10-20 23:13:57 +08001297}
Jens Axboe4bb659b2014-05-09 09:36:49 -06001298
Jens Axboe2c3ad662016-12-14 14:34:47 -07001299void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1300 bool at_head)
Ming Leicfd0c552015-10-20 23:13:57 +08001301{
1302 struct blk_mq_ctx *ctx = rq->mq_ctx;
1303
Jens Axboee57690f2016-08-24 15:34:35 -06001304 __blk_mq_insert_req_list(hctx, rq, at_head);
Jens Axboe320ae512013-10-24 09:20:05 +01001305 blk_mq_hctx_mark_pending(hctx, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001306}
1307
Jens Axboebd166ef2017-01-17 06:03:22 -07001308void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1309 struct list_head *list)
Jens Axboe320ae512013-10-24 09:20:05 +01001310
1311{
Jens Axboe320ae512013-10-24 09:20:05 +01001312 /*
1313 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1314 * offline now
1315 */
1316 spin_lock(&ctx->lock);
1317 while (!list_empty(list)) {
1318 struct request *rq;
1319
1320 rq = list_first_entry(list, struct request, queuelist);
Jens Axboee57690f2016-08-24 15:34:35 -06001321 BUG_ON(rq->mq_ctx != ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001322 list_del_init(&rq->queuelist);
Jens Axboee57690f2016-08-24 15:34:35 -06001323 __blk_mq_insert_req_list(hctx, rq, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001324 }
Ming Leicfd0c552015-10-20 23:13:57 +08001325 blk_mq_hctx_mark_pending(hctx, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001326 spin_unlock(&ctx->lock);
Jens Axboe320ae512013-10-24 09:20:05 +01001327}
1328
1329static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1330{
1331 struct request *rqa = container_of(a, struct request, queuelist);
1332 struct request *rqb = container_of(b, struct request, queuelist);
1333
1334 return !(rqa->mq_ctx < rqb->mq_ctx ||
1335 (rqa->mq_ctx == rqb->mq_ctx &&
1336 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1337}
1338
1339void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1340{
1341 struct blk_mq_ctx *this_ctx;
1342 struct request_queue *this_q;
1343 struct request *rq;
1344 LIST_HEAD(list);
1345 LIST_HEAD(ctx_list);
1346 unsigned int depth;
1347
1348 list_splice_init(&plug->mq_list, &list);
1349
1350 list_sort(NULL, &list, plug_ctx_cmp);
1351
1352 this_q = NULL;
1353 this_ctx = NULL;
1354 depth = 0;
1355
1356 while (!list_empty(&list)) {
1357 rq = list_entry_rq(list.next);
1358 list_del_init(&rq->queuelist);
1359 BUG_ON(!rq->q);
1360 if (rq->mq_ctx != this_ctx) {
1361 if (this_ctx) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001362 trace_block_unplug(this_q, depth, from_schedule);
1363 blk_mq_sched_insert_requests(this_q, this_ctx,
1364 &ctx_list,
1365 from_schedule);
Jens Axboe320ae512013-10-24 09:20:05 +01001366 }
1367
1368 this_ctx = rq->mq_ctx;
1369 this_q = rq->q;
1370 depth = 0;
1371 }
1372
1373 depth++;
1374 list_add_tail(&rq->queuelist, &ctx_list);
1375 }
1376
1377 /*
1378 * If 'this_ctx' is set, we know we have entries to complete
1379 * on 'ctx_list'. Do those.
1380 */
1381 if (this_ctx) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001382 trace_block_unplug(this_q, depth, from_schedule);
1383 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1384 from_schedule);
Jens Axboe320ae512013-10-24 09:20:05 +01001385 }
1386}
1387
1388static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1389{
1390 init_request_from_bio(rq, bio);
Jens Axboe4b570522014-05-29 11:00:11 -06001391
Jens Axboe6e85eaf2016-12-02 20:00:14 -07001392 blk_account_io_start(rq, true);
Jens Axboe320ae512013-10-24 09:20:05 +01001393}
1394
Jens Axboe274a5842014-08-15 12:44:08 -06001395static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1396{
1397 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1398 !blk_queue_nomerges(hctx->queue);
1399}
1400
Jens Axboe07068d52014-05-22 10:40:51 -06001401static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1402 struct blk_mq_ctx *ctx,
1403 struct request *rq, struct bio *bio)
1404{
Ming Leie18378a2015-10-20 23:13:54 +08001405 if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
Jens Axboe07068d52014-05-22 10:40:51 -06001406 blk_mq_bio_to_request(rq, bio);
1407 spin_lock(&ctx->lock);
1408insert_rq:
1409 __blk_mq_insert_request(hctx, rq, false);
1410 spin_unlock(&ctx->lock);
1411 return false;
1412 } else {
Jens Axboe274a5842014-08-15 12:44:08 -06001413 struct request_queue *q = hctx->queue;
1414
Jens Axboe07068d52014-05-22 10:40:51 -06001415 spin_lock(&ctx->lock);
1416 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1417 blk_mq_bio_to_request(rq, bio);
1418 goto insert_rq;
1419 }
1420
1421 spin_unlock(&ctx->lock);
Jens Axboebd166ef2017-01-17 06:03:22 -07001422 __blk_mq_finish_request(hctx, ctx, rq);
Jens Axboe07068d52014-05-22 10:40:51 -06001423 return true;
1424 }
1425}
1426
Jens Axboefd2d3322017-01-12 10:04:45 -07001427static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1428{
Jens Axboebd166ef2017-01-17 06:03:22 -07001429 if (rq->tag != -1)
1430 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1431
1432 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
Jens Axboefd2d3322017-01-12 10:04:45 -07001433}
1434
Jens Axboe066a4a72016-11-11 12:24:46 -07001435static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
Shaohua Lif984df12015-05-08 10:51:32 -07001436{
Shaohua Lif984df12015-05-08 10:51:32 -07001437 struct request_queue *q = rq->q;
Shaohua Lif984df12015-05-08 10:51:32 -07001438 struct blk_mq_queue_data bd = {
1439 .rq = rq,
1440 .list = NULL,
1441 .last = 1
1442 };
Jens Axboebd166ef2017-01-17 06:03:22 -07001443 struct blk_mq_hw_ctx *hctx;
1444 blk_qc_t new_cookie;
1445 int ret;
Shaohua Lif984df12015-05-08 10:51:32 -07001446
Jens Axboebd166ef2017-01-17 06:03:22 -07001447 if (q->elevator)
Bart Van Assche2253efc2016-10-28 17:20:02 -07001448 goto insert;
1449
Jens Axboebd166ef2017-01-17 06:03:22 -07001450 if (!blk_mq_get_driver_tag(rq, &hctx, false))
1451 goto insert;
1452
1453 new_cookie = request_to_qc_t(hctx, rq);
1454
Shaohua Lif984df12015-05-08 10:51:32 -07001455 /*
1456 * For OK queue, we are done. For error, kill it. Any other
1457 * error (busy), just add it to our list as we previously
1458 * would have done
1459 */
1460 ret = q->mq_ops->queue_rq(hctx, &bd);
Jens Axboe7b371632015-11-05 10:41:40 -07001461 if (ret == BLK_MQ_RQ_QUEUE_OK) {
1462 *cookie = new_cookie;
Bart Van Assche2253efc2016-10-28 17:20:02 -07001463 return;
Shaohua Lif984df12015-05-08 10:51:32 -07001464 }
Jens Axboe7b371632015-11-05 10:41:40 -07001465
1466 __blk_mq_requeue_request(rq);
1467
1468 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1469 *cookie = BLK_QC_T_NONE;
1470 rq->errors = -EIO;
1471 blk_mq_end_request(rq, rq->errors);
Bart Van Assche2253efc2016-10-28 17:20:02 -07001472 return;
Jens Axboe7b371632015-11-05 10:41:40 -07001473 }
1474
Bart Van Assche2253efc2016-10-28 17:20:02 -07001475insert:
Jens Axboebd6737f2017-01-27 01:00:47 -07001476 blk_mq_sched_insert_request(rq, false, true, true, false);
Shaohua Lif984df12015-05-08 10:51:32 -07001477}
1478
Jens Axboe07068d52014-05-22 10:40:51 -06001479/*
1480 * Multiple hardware queue variant. This will not use per-process plugs,
1481 * but will attempt to bypass the hctx queueing if we can go straight to
1482 * hardware for SYNC IO.
1483 */
Jens Axboedece1632015-11-05 10:41:16 -07001484static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
Jens Axboe07068d52014-05-22 10:40:51 -06001485{
Christoph Hellwigef295ec2016-10-28 08:48:16 -06001486 const int is_sync = op_is_sync(bio->bi_opf);
Christoph Hellwigf73f44e2017-01-27 08:30:47 -07001487 const int is_flush_fua = op_is_flush(bio->bi_opf);
Jens Axboe5a797e02017-01-26 12:22:11 -07001488 struct blk_mq_alloc_data data = { .flags = 0 };
Jens Axboe07068d52014-05-22 10:40:51 -06001489 struct request *rq;
Bart Van Assche6a83e742016-11-02 10:09:51 -06001490 unsigned int request_count = 0, srcu_idx;
Shaohua Lif984df12015-05-08 10:51:32 -07001491 struct blk_plug *plug;
Shaohua Li5b3f3412015-05-08 10:51:33 -07001492 struct request *same_queue_rq = NULL;
Jens Axboe7b371632015-11-05 10:41:40 -07001493 blk_qc_t cookie;
Jens Axboe87760e52016-11-09 12:38:14 -07001494 unsigned int wb_acct;
Jens Axboe07068d52014-05-22 10:40:51 -06001495
1496 blk_queue_bounce(q, &bio);
1497
1498 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001499 bio_io_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -07001500 return BLK_QC_T_NONE;
Jens Axboe07068d52014-05-22 10:40:51 -06001501 }
1502
Kent Overstreet54efd502015-04-23 22:37:18 -07001503 blk_queue_split(q, &bio, q->bio_split);
1504
Omar Sandoval87c279e2016-06-01 22:18:48 -07001505 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1506 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1507 return BLK_QC_T_NONE;
Shaohua Lif984df12015-05-08 10:51:32 -07001508
Jens Axboebd166ef2017-01-17 06:03:22 -07001509 if (blk_mq_sched_bio_merge(q, bio))
1510 return BLK_QC_T_NONE;
1511
Jens Axboe87760e52016-11-09 12:38:14 -07001512 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1513
Jens Axboebd166ef2017-01-17 06:03:22 -07001514 trace_block_getrq(q, bio, bio->bi_opf);
1515
1516 rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
Jens Axboe87760e52016-11-09 12:38:14 -07001517 if (unlikely(!rq)) {
1518 __wbt_done(q->rq_wb, wb_acct);
Jens Axboedece1632015-11-05 10:41:16 -07001519 return BLK_QC_T_NONE;
Jens Axboe87760e52016-11-09 12:38:14 -07001520 }
1521
1522 wbt_track(&rq->issue_stat, wb_acct);
Jens Axboe07068d52014-05-22 10:40:51 -06001523
Jens Axboefd2d3322017-01-12 10:04:45 -07001524 cookie = request_to_qc_t(data.hctx, rq);
Jens Axboe07068d52014-05-22 10:40:51 -06001525
1526 if (unlikely(is_flush_fua)) {
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001527 if (q->elevator)
1528 goto elv_insert;
Jens Axboe07068d52014-05-22 10:40:51 -06001529 blk_mq_bio_to_request(rq, bio);
1530 blk_insert_flush(rq);
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001531 goto run_queue;
Jens Axboe07068d52014-05-22 10:40:51 -06001532 }
1533
Shaohua Lif984df12015-05-08 10:51:32 -07001534 plug = current->plug;
Jens Axboee167dfb2014-10-29 11:18:26 -06001535 /*
1536 * If the driver supports defer issued based on 'last', then
1537 * queue it up like normal since we can potentially save some
1538 * CPU this way.
1539 */
Shaohua Lif984df12015-05-08 10:51:32 -07001540 if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1541 !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1542 struct request *old_rq = NULL;
Jens Axboe07068d52014-05-22 10:40:51 -06001543
1544 blk_mq_bio_to_request(rq, bio);
Jens Axboe07068d52014-05-22 10:40:51 -06001545
1546 /*
Bart Van Assche6a83e742016-11-02 10:09:51 -06001547 * We do limited plugging. If the bio can be merged, do that.
Shaohua Lif984df12015-05-08 10:51:32 -07001548 * Otherwise the existing request in the plug list will be
1549 * issued. So the plug list will have one request at most
Jens Axboe07068d52014-05-22 10:40:51 -06001550 */
Shaohua Lif984df12015-05-08 10:51:32 -07001551 if (plug) {
Shaohua Li5b3f3412015-05-08 10:51:33 -07001552 /*
1553 * The plug list might get flushed before this. If that
Jens Axboeb094f892015-11-20 20:29:45 -07001554 * happens, same_queue_rq is invalid and plug list is
1555 * empty
1556 */
Shaohua Li5b3f3412015-05-08 10:51:33 -07001557 if (same_queue_rq && !list_empty(&plug->mq_list)) {
1558 old_rq = same_queue_rq;
Shaohua Lif984df12015-05-08 10:51:32 -07001559 list_del_init(&old_rq->queuelist);
Jens Axboe07068d52014-05-22 10:40:51 -06001560 }
Shaohua Lif984df12015-05-08 10:51:32 -07001561 list_add_tail(&rq->queuelist, &plug->mq_list);
1562 } else /* is_sync */
1563 old_rq = rq;
1564 blk_mq_put_ctx(data.ctx);
1565 if (!old_rq)
Jens Axboe7b371632015-11-05 10:41:40 -07001566 goto done;
Bart Van Assche6a83e742016-11-02 10:09:51 -06001567
1568 if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
1569 rcu_read_lock();
Jens Axboe066a4a72016-11-11 12:24:46 -07001570 blk_mq_try_issue_directly(old_rq, &cookie);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001571 rcu_read_unlock();
1572 } else {
1573 srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
Jens Axboe066a4a72016-11-11 12:24:46 -07001574 blk_mq_try_issue_directly(old_rq, &cookie);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001575 srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
1576 }
Jens Axboe7b371632015-11-05 10:41:40 -07001577 goto done;
Jens Axboe07068d52014-05-22 10:40:51 -06001578 }
1579
Jens Axboebd166ef2017-01-17 06:03:22 -07001580 if (q->elevator) {
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001581elv_insert:
Jens Axboebd166ef2017-01-17 06:03:22 -07001582 blk_mq_put_ctx(data.ctx);
1583 blk_mq_bio_to_request(rq, bio);
Jens Axboe0abad772017-01-26 12:28:10 -07001584 blk_mq_sched_insert_request(rq, false, true,
Jens Axboebd6737f2017-01-27 01:00:47 -07001585 !is_sync || is_flush_fua, true);
Jens Axboebd166ef2017-01-17 06:03:22 -07001586 goto done;
1587 }
Jens Axboe07068d52014-05-22 10:40:51 -06001588 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1589 /*
1590 * For a SYNC request, send it to the hardware immediately. For
1591 * an ASYNC request, just ensure that we run it later on. The
1592 * latter allows for merging opportunities and more efficient
1593 * dispatching.
1594 */
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001595run_queue:
Jens Axboe07068d52014-05-22 10:40:51 -06001596 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1597 }
Jens Axboe07068d52014-05-22 10:40:51 -06001598 blk_mq_put_ctx(data.ctx);
Jens Axboe7b371632015-11-05 10:41:40 -07001599done:
1600 return cookie;
Jens Axboe07068d52014-05-22 10:40:51 -06001601}
1602
1603/*
1604 * Single hardware queue variant. This will attempt to use any per-process
1605 * plug for merging and IO deferral.
1606 */
Jens Axboedece1632015-11-05 10:41:16 -07001607static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
Jens Axboe07068d52014-05-22 10:40:51 -06001608{
Christoph Hellwigef295ec2016-10-28 08:48:16 -06001609 const int is_sync = op_is_sync(bio->bi_opf);
Christoph Hellwigf73f44e2017-01-27 08:30:47 -07001610 const int is_flush_fua = op_is_flush(bio->bi_opf);
Jeff Moyere6c44382015-05-08 10:51:30 -07001611 struct blk_plug *plug;
1612 unsigned int request_count = 0;
Jens Axboe5a797e02017-01-26 12:22:11 -07001613 struct blk_mq_alloc_data data = { .flags = 0 };
Jens Axboe07068d52014-05-22 10:40:51 -06001614 struct request *rq;
Jens Axboe7b371632015-11-05 10:41:40 -07001615 blk_qc_t cookie;
Jens Axboe87760e52016-11-09 12:38:14 -07001616 unsigned int wb_acct;
Jens Axboe07068d52014-05-22 10:40:51 -06001617
Jens Axboe07068d52014-05-22 10:40:51 -06001618 blk_queue_bounce(q, &bio);
1619
1620 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001621 bio_io_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -07001622 return BLK_QC_T_NONE;
Jens Axboe07068d52014-05-22 10:40:51 -06001623 }
1624
Kent Overstreet54efd502015-04-23 22:37:18 -07001625 blk_queue_split(q, &bio, q->bio_split);
1626
Omar Sandoval87c279e2016-06-01 22:18:48 -07001627 if (!is_flush_fua && !blk_queue_nomerges(q)) {
1628 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1629 return BLK_QC_T_NONE;
1630 } else
1631 request_count = blk_plug_queued_count(q);
Jens Axboe07068d52014-05-22 10:40:51 -06001632
Jens Axboebd166ef2017-01-17 06:03:22 -07001633 if (blk_mq_sched_bio_merge(q, bio))
1634 return BLK_QC_T_NONE;
1635
Jens Axboe87760e52016-11-09 12:38:14 -07001636 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1637
Jens Axboebd166ef2017-01-17 06:03:22 -07001638 trace_block_getrq(q, bio, bio->bi_opf);
1639
1640 rq = blk_mq_sched_get_request(q, bio, bio->bi_opf, &data);
Jens Axboe87760e52016-11-09 12:38:14 -07001641 if (unlikely(!rq)) {
1642 __wbt_done(q->rq_wb, wb_acct);
Jens Axboedece1632015-11-05 10:41:16 -07001643 return BLK_QC_T_NONE;
Jens Axboe87760e52016-11-09 12:38:14 -07001644 }
1645
1646 wbt_track(&rq->issue_stat, wb_acct);
Jens Axboe320ae512013-10-24 09:20:05 +01001647
Jens Axboefd2d3322017-01-12 10:04:45 -07001648 cookie = request_to_qc_t(data.hctx, rq);
Jens Axboe320ae512013-10-24 09:20:05 +01001649
1650 if (unlikely(is_flush_fua)) {
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001651 if (q->elevator)
1652 goto elv_insert;
Jens Axboe320ae512013-10-24 09:20:05 +01001653 blk_mq_bio_to_request(rq, bio);
Jens Axboe320ae512013-10-24 09:20:05 +01001654 blk_insert_flush(rq);
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001655 goto run_queue;
Jens Axboe320ae512013-10-24 09:20:05 +01001656 }
1657
1658 /*
1659 * A task plug currently exists. Since this is completely lockless,
1660 * utilize that to temporarily store requests until the task is
1661 * either done or scheduled away.
1662 */
Jeff Moyere6c44382015-05-08 10:51:30 -07001663 plug = current->plug;
1664 if (plug) {
Shaohua Li600271d2016-11-03 17:03:54 -07001665 struct request *last = NULL;
1666
Jeff Moyere6c44382015-05-08 10:51:30 -07001667 blk_mq_bio_to_request(rq, bio);
Ming Lei0a6219a2016-11-16 18:07:05 +08001668
1669 /*
1670 * @request_count may become stale because of schedule
1671 * out, so check the list again.
1672 */
1673 if (list_empty(&plug->mq_list))
1674 request_count = 0;
Ming Lei676d0602015-10-20 23:13:56 +08001675 if (!request_count)
Jeff Moyere6c44382015-05-08 10:51:30 -07001676 trace_block_plug(q);
Shaohua Li600271d2016-11-03 17:03:54 -07001677 else
1678 last = list_entry_rq(plug->mq_list.prev);
Jens Axboeb094f892015-11-20 20:29:45 -07001679
1680 blk_mq_put_ctx(data.ctx);
1681
Shaohua Li600271d2016-11-03 17:03:54 -07001682 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1683 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
Jeff Moyere6c44382015-05-08 10:51:30 -07001684 blk_flush_plug_list(plug, false);
1685 trace_block_plug(q);
Jens Axboe320ae512013-10-24 09:20:05 +01001686 }
Jens Axboeb094f892015-11-20 20:29:45 -07001687
Jeff Moyere6c44382015-05-08 10:51:30 -07001688 list_add_tail(&rq->queuelist, &plug->mq_list);
Jens Axboe7b371632015-11-05 10:41:40 -07001689 return cookie;
Jens Axboe320ae512013-10-24 09:20:05 +01001690 }
1691
Jens Axboebd166ef2017-01-17 06:03:22 -07001692 if (q->elevator) {
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001693elv_insert:
Jens Axboebd166ef2017-01-17 06:03:22 -07001694 blk_mq_put_ctx(data.ctx);
1695 blk_mq_bio_to_request(rq, bio);
Jens Axboe0abad772017-01-26 12:28:10 -07001696 blk_mq_sched_insert_request(rq, false, true,
Jens Axboebd6737f2017-01-27 01:00:47 -07001697 !is_sync || is_flush_fua, true);
Jens Axboebd166ef2017-01-17 06:03:22 -07001698 goto done;
1699 }
Jens Axboe07068d52014-05-22 10:40:51 -06001700 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1701 /*
1702 * For a SYNC request, send it to the hardware immediately. For
1703 * an ASYNC request, just ensure that we run it later on. The
1704 * latter allows for merging opportunities and more efficient
1705 * dispatching.
1706 */
Jens Axboe0c2a6fe2017-02-17 11:38:36 -07001707run_queue:
Jens Axboe07068d52014-05-22 10:40:51 -06001708 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
Jens Axboe320ae512013-10-24 09:20:05 +01001709 }
1710
Jens Axboe07068d52014-05-22 10:40:51 -06001711 blk_mq_put_ctx(data.ctx);
Jens Axboebd166ef2017-01-17 06:03:22 -07001712done:
Jens Axboe7b371632015-11-05 10:41:40 -07001713 return cookie;
Jens Axboe320ae512013-10-24 09:20:05 +01001714}
1715
Jens Axboecc71a6f2017-01-11 14:29:56 -07001716void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1717 unsigned int hctx_idx)
Jens Axboe320ae512013-10-24 09:20:05 +01001718{
1719 struct page *page;
1720
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001721 if (tags->rqs && set->ops->exit_request) {
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001722 int i;
1723
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001724 for (i = 0; i < tags->nr_tags; i++) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001725 struct request *rq = tags->static_rqs[i];
1726
1727 if (!rq)
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001728 continue;
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001729 set->ops->exit_request(set->driver_data, rq,
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001730 hctx_idx, i);
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001731 tags->static_rqs[i] = NULL;
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001732 }
1733 }
1734
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001735 while (!list_empty(&tags->page_list)) {
1736 page = list_first_entry(&tags->page_list, struct page, lru);
Dave Hansen67534712014-01-08 20:17:46 -07001737 list_del_init(&page->lru);
Catalin Marinasf75782e2015-09-14 18:16:02 +01001738 /*
1739 * Remove kmemleak object previously allocated in
1740 * blk_mq_init_rq_map().
1741 */
1742 kmemleak_free(page_address(page));
Jens Axboe320ae512013-10-24 09:20:05 +01001743 __free_pages(page, page->private);
1744 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07001745}
Jens Axboe320ae512013-10-24 09:20:05 +01001746
Jens Axboecc71a6f2017-01-11 14:29:56 -07001747void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1748{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001749 kfree(tags->rqs);
Jens Axboecc71a6f2017-01-11 14:29:56 -07001750 tags->rqs = NULL;
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001751 kfree(tags->static_rqs);
1752 tags->static_rqs = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01001753
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001754 blk_mq_free_tags(tags);
Jens Axboe320ae512013-10-24 09:20:05 +01001755}
1756
Jens Axboecc71a6f2017-01-11 14:29:56 -07001757struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1758 unsigned int hctx_idx,
1759 unsigned int nr_tags,
1760 unsigned int reserved_tags)
Jens Axboe320ae512013-10-24 09:20:05 +01001761{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001762 struct blk_mq_tags *tags;
Shaohua Li59f082e2017-02-01 09:53:14 -08001763 int node;
Jens Axboe320ae512013-10-24 09:20:05 +01001764
Shaohua Li59f082e2017-02-01 09:53:14 -08001765 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1766 if (node == NUMA_NO_NODE)
1767 node = set->numa_node;
1768
1769 tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
Shaohua Li24391c02015-01-23 14:18:00 -07001770 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001771 if (!tags)
1772 return NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01001773
Jens Axboecc71a6f2017-01-11 14:29:56 -07001774 tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02001775 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
Shaohua Li59f082e2017-02-01 09:53:14 -08001776 node);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001777 if (!tags->rqs) {
1778 blk_mq_free_tags(tags);
1779 return NULL;
1780 }
Jens Axboe320ae512013-10-24 09:20:05 +01001781
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001782 tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1783 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
Shaohua Li59f082e2017-02-01 09:53:14 -08001784 node);
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001785 if (!tags->static_rqs) {
1786 kfree(tags->rqs);
1787 blk_mq_free_tags(tags);
1788 return NULL;
1789 }
1790
Jens Axboecc71a6f2017-01-11 14:29:56 -07001791 return tags;
1792}
1793
1794static size_t order_to_size(unsigned int order)
1795{
1796 return (size_t)PAGE_SIZE << order;
1797}
1798
1799int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1800 unsigned int hctx_idx, unsigned int depth)
1801{
1802 unsigned int i, j, entries_per_page, max_order = 4;
1803 size_t rq_size, left;
Shaohua Li59f082e2017-02-01 09:53:14 -08001804 int node;
1805
1806 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1807 if (node == NUMA_NO_NODE)
1808 node = set->numa_node;
Jens Axboecc71a6f2017-01-11 14:29:56 -07001809
1810 INIT_LIST_HEAD(&tags->page_list);
1811
Jens Axboe320ae512013-10-24 09:20:05 +01001812 /*
1813 * rq_size is the size of the request plus driver payload, rounded
1814 * to the cacheline size
1815 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001816 rq_size = round_up(sizeof(struct request) + set->cmd_size,
Jens Axboe320ae512013-10-24 09:20:05 +01001817 cache_line_size());
Jens Axboecc71a6f2017-01-11 14:29:56 -07001818 left = rq_size * depth;
Jens Axboe320ae512013-10-24 09:20:05 +01001819
Jens Axboecc71a6f2017-01-11 14:29:56 -07001820 for (i = 0; i < depth; ) {
Jens Axboe320ae512013-10-24 09:20:05 +01001821 int this_order = max_order;
1822 struct page *page;
1823 int to_do;
1824 void *p;
1825
Bartlomiej Zolnierkiewiczb3a834b2016-05-16 09:54:47 -06001826 while (this_order && left < order_to_size(this_order - 1))
Jens Axboe320ae512013-10-24 09:20:05 +01001827 this_order--;
1828
1829 do {
Shaohua Li59f082e2017-02-01 09:53:14 -08001830 page = alloc_pages_node(node,
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02001831 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
Jens Axboea5164402014-09-10 09:02:03 -06001832 this_order);
Jens Axboe320ae512013-10-24 09:20:05 +01001833 if (page)
1834 break;
1835 if (!this_order--)
1836 break;
1837 if (order_to_size(this_order) < rq_size)
1838 break;
1839 } while (1);
1840
1841 if (!page)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001842 goto fail;
Jens Axboe320ae512013-10-24 09:20:05 +01001843
1844 page->private = this_order;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001845 list_add_tail(&page->lru, &tags->page_list);
Jens Axboe320ae512013-10-24 09:20:05 +01001846
1847 p = page_address(page);
Catalin Marinasf75782e2015-09-14 18:16:02 +01001848 /*
1849 * Allow kmemleak to scan these pages as they contain pointers
1850 * to additional allocations like via ops->init_request().
1851 */
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02001852 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
Jens Axboe320ae512013-10-24 09:20:05 +01001853 entries_per_page = order_to_size(this_order) / rq_size;
Jens Axboecc71a6f2017-01-11 14:29:56 -07001854 to_do = min(entries_per_page, depth - i);
Jens Axboe320ae512013-10-24 09:20:05 +01001855 left -= to_do * rq_size;
1856 for (j = 0; j < to_do; j++) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001857 struct request *rq = p;
1858
1859 tags->static_rqs[i] = rq;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001860 if (set->ops->init_request) {
1861 if (set->ops->init_request(set->driver_data,
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001862 rq, hctx_idx, i,
Shaohua Li59f082e2017-02-01 09:53:14 -08001863 node)) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001864 tags->static_rqs[i] = NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001865 goto fail;
Jens Axboea5164402014-09-10 09:02:03 -06001866 }
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001867 }
1868
Jens Axboe320ae512013-10-24 09:20:05 +01001869 p += rq_size;
1870 i++;
1871 }
1872 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07001873 return 0;
Jens Axboe320ae512013-10-24 09:20:05 +01001874
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001875fail:
Jens Axboecc71a6f2017-01-11 14:29:56 -07001876 blk_mq_free_rqs(set, tags, hctx_idx);
1877 return -ENOMEM;
Jens Axboe320ae512013-10-24 09:20:05 +01001878}
1879
Jens Axboee57690f2016-08-24 15:34:35 -06001880/*
1881 * 'cpu' is going away. splice any existing rq_list entries from this
1882 * software queue to the hw queue dispatch list, and ensure that it
1883 * gets run.
1884 */
Thomas Gleixner9467f852016-09-22 08:05:17 -06001885static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
Jens Axboe484b4062014-05-21 14:01:15 -06001886{
Thomas Gleixner9467f852016-09-22 08:05:17 -06001887 struct blk_mq_hw_ctx *hctx;
Jens Axboe484b4062014-05-21 14:01:15 -06001888 struct blk_mq_ctx *ctx;
1889 LIST_HEAD(tmp);
1890
Thomas Gleixner9467f852016-09-22 08:05:17 -06001891 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
Jens Axboee57690f2016-08-24 15:34:35 -06001892 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
Jens Axboe484b4062014-05-21 14:01:15 -06001893
1894 spin_lock(&ctx->lock);
1895 if (!list_empty(&ctx->rq_list)) {
1896 list_splice_init(&ctx->rq_list, &tmp);
1897 blk_mq_hctx_clear_pending(hctx, ctx);
1898 }
1899 spin_unlock(&ctx->lock);
1900
1901 if (list_empty(&tmp))
Thomas Gleixner9467f852016-09-22 08:05:17 -06001902 return 0;
Jens Axboe484b4062014-05-21 14:01:15 -06001903
Jens Axboee57690f2016-08-24 15:34:35 -06001904 spin_lock(&hctx->lock);
1905 list_splice_tail_init(&tmp, &hctx->dispatch);
1906 spin_unlock(&hctx->lock);
Jens Axboe484b4062014-05-21 14:01:15 -06001907
1908 blk_mq_run_hw_queue(hctx, true);
Thomas Gleixner9467f852016-09-22 08:05:17 -06001909 return 0;
Jens Axboe484b4062014-05-21 14:01:15 -06001910}
1911
Thomas Gleixner9467f852016-09-22 08:05:17 -06001912static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
Jens Axboe484b4062014-05-21 14:01:15 -06001913{
Thomas Gleixner9467f852016-09-22 08:05:17 -06001914 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1915 &hctx->cpuhp_dead);
Jens Axboe484b4062014-05-21 14:01:15 -06001916}
1917
Ming Leic3b4afc2015-06-04 22:25:04 +08001918/* hctx->ctxs will be freed in queue's release handler */
Ming Lei08e98fc2014-09-25 23:23:38 +08001919static void blk_mq_exit_hctx(struct request_queue *q,
1920 struct blk_mq_tag_set *set,
1921 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1922{
Ming Leif70ced02014-09-25 23:23:47 +08001923 unsigned flush_start_tag = set->queue_depth;
1924
Ming Lei08e98fc2014-09-25 23:23:38 +08001925 blk_mq_tag_idle(hctx);
1926
Ming Leif70ced02014-09-25 23:23:47 +08001927 if (set->ops->exit_request)
1928 set->ops->exit_request(set->driver_data,
1929 hctx->fq->flush_rq, hctx_idx,
1930 flush_start_tag + hctx_idx);
1931
Ming Lei08e98fc2014-09-25 23:23:38 +08001932 if (set->ops->exit_hctx)
1933 set->ops->exit_hctx(hctx, hctx_idx);
1934
Bart Van Assche6a83e742016-11-02 10:09:51 -06001935 if (hctx->flags & BLK_MQ_F_BLOCKING)
1936 cleanup_srcu_struct(&hctx->queue_rq_srcu);
1937
Thomas Gleixner9467f852016-09-22 08:05:17 -06001938 blk_mq_remove_cpuhp(hctx);
Ming Leif70ced02014-09-25 23:23:47 +08001939 blk_free_flush_queue(hctx->fq);
Omar Sandoval88459642016-09-17 08:38:44 -06001940 sbitmap_free(&hctx->ctx_map);
Ming Lei08e98fc2014-09-25 23:23:38 +08001941}
1942
Ming Lei624dbe42014-05-27 23:35:13 +08001943static void blk_mq_exit_hw_queues(struct request_queue *q,
1944 struct blk_mq_tag_set *set, int nr_queue)
1945{
1946 struct blk_mq_hw_ctx *hctx;
1947 unsigned int i;
1948
1949 queue_for_each_hw_ctx(q, hctx, i) {
1950 if (i == nr_queue)
1951 break;
Ming Lei08e98fc2014-09-25 23:23:38 +08001952 blk_mq_exit_hctx(q, set, hctx, i);
Ming Lei624dbe42014-05-27 23:35:13 +08001953 }
Ming Lei624dbe42014-05-27 23:35:13 +08001954}
1955
1956static void blk_mq_free_hw_queues(struct request_queue *q,
1957 struct blk_mq_tag_set *set)
1958{
1959 struct blk_mq_hw_ctx *hctx;
1960 unsigned int i;
1961
Ming Leie09aae7e2015-01-29 20:17:27 +08001962 queue_for_each_hw_ctx(q, hctx, i)
Ming Lei624dbe42014-05-27 23:35:13 +08001963 free_cpumask_var(hctx->cpumask);
Ming Lei624dbe42014-05-27 23:35:13 +08001964}
1965
Ming Lei08e98fc2014-09-25 23:23:38 +08001966static int blk_mq_init_hctx(struct request_queue *q,
1967 struct blk_mq_tag_set *set,
1968 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1969{
1970 int node;
Ming Leif70ced02014-09-25 23:23:47 +08001971 unsigned flush_start_tag = set->queue_depth;
Ming Lei08e98fc2014-09-25 23:23:38 +08001972
1973 node = hctx->numa_node;
1974 if (node == NUMA_NO_NODE)
1975 node = hctx->numa_node = set->numa_node;
1976
Jens Axboe27489a32016-08-24 15:54:25 -06001977 INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
Ming Lei08e98fc2014-09-25 23:23:38 +08001978 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1979 spin_lock_init(&hctx->lock);
1980 INIT_LIST_HEAD(&hctx->dispatch);
1981 hctx->queue = q;
1982 hctx->queue_num = hctx_idx;
Jeff Moyer2404e602015-11-03 10:40:06 -05001983 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
Ming Lei08e98fc2014-09-25 23:23:38 +08001984
Thomas Gleixner9467f852016-09-22 08:05:17 -06001985 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
Ming Lei08e98fc2014-09-25 23:23:38 +08001986
1987 hctx->tags = set->tags[hctx_idx];
1988
1989 /*
1990 * Allocate space for all possible cpus to avoid allocation at
1991 * runtime
1992 */
1993 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1994 GFP_KERNEL, node);
1995 if (!hctx->ctxs)
1996 goto unregister_cpu_notifier;
1997
Omar Sandoval88459642016-09-17 08:38:44 -06001998 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
1999 node))
Ming Lei08e98fc2014-09-25 23:23:38 +08002000 goto free_ctxs;
2001
2002 hctx->nr_ctx = 0;
2003
2004 if (set->ops->init_hctx &&
2005 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2006 goto free_bitmap;
2007
Ming Leif70ced02014-09-25 23:23:47 +08002008 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2009 if (!hctx->fq)
2010 goto exit_hctx;
2011
2012 if (set->ops->init_request &&
2013 set->ops->init_request(set->driver_data,
2014 hctx->fq->flush_rq, hctx_idx,
2015 flush_start_tag + hctx_idx, node))
2016 goto free_fq;
2017
Bart Van Assche6a83e742016-11-02 10:09:51 -06002018 if (hctx->flags & BLK_MQ_F_BLOCKING)
2019 init_srcu_struct(&hctx->queue_rq_srcu);
2020
Ming Lei08e98fc2014-09-25 23:23:38 +08002021 return 0;
2022
Ming Leif70ced02014-09-25 23:23:47 +08002023 free_fq:
2024 kfree(hctx->fq);
2025 exit_hctx:
2026 if (set->ops->exit_hctx)
2027 set->ops->exit_hctx(hctx, hctx_idx);
Ming Lei08e98fc2014-09-25 23:23:38 +08002028 free_bitmap:
Omar Sandoval88459642016-09-17 08:38:44 -06002029 sbitmap_free(&hctx->ctx_map);
Ming Lei08e98fc2014-09-25 23:23:38 +08002030 free_ctxs:
2031 kfree(hctx->ctxs);
2032 unregister_cpu_notifier:
Thomas Gleixner9467f852016-09-22 08:05:17 -06002033 blk_mq_remove_cpuhp(hctx);
Ming Lei08e98fc2014-09-25 23:23:38 +08002034 return -1;
2035}
2036
Jens Axboe320ae512013-10-24 09:20:05 +01002037static void blk_mq_init_cpu_queues(struct request_queue *q,
2038 unsigned int nr_hw_queues)
2039{
2040 unsigned int i;
2041
2042 for_each_possible_cpu(i) {
2043 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2044 struct blk_mq_hw_ctx *hctx;
2045
2046 memset(__ctx, 0, sizeof(*__ctx));
2047 __ctx->cpu = i;
2048 spin_lock_init(&__ctx->lock);
2049 INIT_LIST_HEAD(&__ctx->rq_list);
2050 __ctx->queue = q;
Jens Axboecf43e6b2016-11-07 21:32:37 -07002051 blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
2052 blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
Jens Axboe320ae512013-10-24 09:20:05 +01002053
2054 /* If the cpu isn't online, the cpu is mapped to first hctx */
Jens Axboe320ae512013-10-24 09:20:05 +01002055 if (!cpu_online(i))
2056 continue;
2057
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02002058 hctx = blk_mq_map_queue(q, i);
Jens Axboee4043dc2014-04-09 10:18:23 -06002059
Jens Axboe320ae512013-10-24 09:20:05 +01002060 /*
2061 * Set local node, IFF we have more than one hw queue. If
2062 * not, we remain on the home node of the device
2063 */
2064 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
Raghavendra K Tbffed452015-12-02 16:59:05 +05302065 hctx->numa_node = local_memory_node(cpu_to_node(i));
Jens Axboe320ae512013-10-24 09:20:05 +01002066 }
2067}
2068
Jens Axboecc71a6f2017-01-11 14:29:56 -07002069static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2070{
2071 int ret = 0;
2072
2073 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2074 set->queue_depth, set->reserved_tags);
2075 if (!set->tags[hctx_idx])
2076 return false;
2077
2078 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2079 set->queue_depth);
2080 if (!ret)
2081 return true;
2082
2083 blk_mq_free_rq_map(set->tags[hctx_idx]);
2084 set->tags[hctx_idx] = NULL;
2085 return false;
2086}
2087
2088static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2089 unsigned int hctx_idx)
2090{
Jens Axboebd166ef2017-01-17 06:03:22 -07002091 if (set->tags[hctx_idx]) {
2092 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2093 blk_mq_free_rq_map(set->tags[hctx_idx]);
2094 set->tags[hctx_idx] = NULL;
2095 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07002096}
2097
Akinobu Mita57783222015-09-27 02:09:23 +09002098static void blk_mq_map_swqueue(struct request_queue *q,
2099 const struct cpumask *online_mask)
Jens Axboe320ae512013-10-24 09:20:05 +01002100{
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002101 unsigned int i, hctx_idx;
Jens Axboe320ae512013-10-24 09:20:05 +01002102 struct blk_mq_hw_ctx *hctx;
2103 struct blk_mq_ctx *ctx;
Ming Lei2a34c082015-04-21 10:00:20 +08002104 struct blk_mq_tag_set *set = q->tag_set;
Jens Axboe320ae512013-10-24 09:20:05 +01002105
Akinobu Mita60de0742015-09-27 02:09:25 +09002106 /*
2107 * Avoid others reading imcomplete hctx->cpumask through sysfs
2108 */
2109 mutex_lock(&q->sysfs_lock);
2110
Jens Axboe320ae512013-10-24 09:20:05 +01002111 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboee4043dc2014-04-09 10:18:23 -06002112 cpumask_clear(hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01002113 hctx->nr_ctx = 0;
2114 }
2115
2116 /*
2117 * Map software to hardware queues
2118 */
Thomas Gleixner897bb0c2016-03-19 11:30:33 +01002119 for_each_possible_cpu(i) {
Jens Axboe320ae512013-10-24 09:20:05 +01002120 /* If the cpu isn't online, the cpu is mapped to first hctx */
Akinobu Mita57783222015-09-27 02:09:23 +09002121 if (!cpumask_test_cpu(i, online_mask))
Jens Axboee4043dc2014-04-09 10:18:23 -06002122 continue;
2123
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002124 hctx_idx = q->mq_map[i];
2125 /* unmapped hw queue can be remapped after CPU topo changed */
Jens Axboecc71a6f2017-01-11 14:29:56 -07002126 if (!set->tags[hctx_idx] &&
2127 !__blk_mq_alloc_rq_map(set, hctx_idx)) {
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002128 /*
2129 * If tags initialization fail for some hctx,
2130 * that hctx won't be brought online. In this
2131 * case, remap the current ctx to hctx[0] which
2132 * is guaranteed to always have tags allocated
2133 */
Jens Axboecc71a6f2017-01-11 14:29:56 -07002134 q->mq_map[i] = 0;
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002135 }
2136
Thomas Gleixner897bb0c2016-03-19 11:30:33 +01002137 ctx = per_cpu_ptr(q->queue_ctx, i);
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02002138 hctx = blk_mq_map_queue(q, i);
Keith Busch868f2f02015-12-17 17:08:14 -07002139
Jens Axboee4043dc2014-04-09 10:18:23 -06002140 cpumask_set_cpu(i, hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01002141 ctx->index_hw = hctx->nr_ctx;
2142 hctx->ctxs[hctx->nr_ctx++] = ctx;
2143 }
Jens Axboe506e9312014-05-07 10:26:44 -06002144
Akinobu Mita60de0742015-09-27 02:09:25 +09002145 mutex_unlock(&q->sysfs_lock);
2146
Jens Axboe506e9312014-05-07 10:26:44 -06002147 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe484b4062014-05-21 14:01:15 -06002148 /*
Jens Axboea68aafa2014-08-15 13:19:15 -06002149 * If no software queues are mapped to this hardware queue,
2150 * disable it and free the request entries.
Jens Axboe484b4062014-05-21 14:01:15 -06002151 */
2152 if (!hctx->nr_ctx) {
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002153 /* Never unmap queue 0. We need it as a
2154 * fallback in case of a new remap fails
2155 * allocation
2156 */
Jens Axboecc71a6f2017-01-11 14:29:56 -07002157 if (i && set->tags[i])
2158 blk_mq_free_map_and_requests(set, i);
2159
Ming Lei2a34c082015-04-21 10:00:20 +08002160 hctx->tags = NULL;
Jens Axboe484b4062014-05-21 14:01:15 -06002161 continue;
2162 }
2163
Ming Lei2a34c082015-04-21 10:00:20 +08002164 hctx->tags = set->tags[i];
2165 WARN_ON(!hctx->tags);
2166
Jens Axboe484b4062014-05-21 14:01:15 -06002167 /*
Chong Yuan889fa312015-04-15 11:39:29 -06002168 * Set the map size to the number of mapped software queues.
2169 * This is more accurate and more efficient than looping
2170 * over all possibly mapped software queues.
2171 */
Omar Sandoval88459642016-09-17 08:38:44 -06002172 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
Chong Yuan889fa312015-04-15 11:39:29 -06002173
2174 /*
Jens Axboe484b4062014-05-21 14:01:15 -06002175 * Initialize batch roundrobin counts
2176 */
Jens Axboe506e9312014-05-07 10:26:44 -06002177 hctx->next_cpu = cpumask_first(hctx->cpumask);
2178 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2179 }
Jens Axboe320ae512013-10-24 09:20:05 +01002180}
2181
Jeff Moyer2404e602015-11-03 10:40:06 -05002182static void queue_set_hctx_shared(struct request_queue *q, bool shared)
Jens Axboe0d2602c2014-05-13 15:10:52 -06002183{
2184 struct blk_mq_hw_ctx *hctx;
Jens Axboe0d2602c2014-05-13 15:10:52 -06002185 int i;
2186
Jeff Moyer2404e602015-11-03 10:40:06 -05002187 queue_for_each_hw_ctx(q, hctx, i) {
2188 if (shared)
2189 hctx->flags |= BLK_MQ_F_TAG_SHARED;
2190 else
2191 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2192 }
2193}
2194
2195static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
2196{
2197 struct request_queue *q;
Jens Axboe0d2602c2014-05-13 15:10:52 -06002198
2199 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2200 blk_mq_freeze_queue(q);
Jeff Moyer2404e602015-11-03 10:40:06 -05002201 queue_set_hctx_shared(q, shared);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002202 blk_mq_unfreeze_queue(q);
2203 }
2204}
2205
2206static void blk_mq_del_queue_tag_set(struct request_queue *q)
2207{
2208 struct blk_mq_tag_set *set = q->tag_set;
2209
Jens Axboe0d2602c2014-05-13 15:10:52 -06002210 mutex_lock(&set->tag_list_lock);
2211 list_del_init(&q->tag_set_list);
Jeff Moyer2404e602015-11-03 10:40:06 -05002212 if (list_is_singular(&set->tag_list)) {
2213 /* just transitioned to unshared */
2214 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2215 /* update existing queue */
2216 blk_mq_update_tag_set_depth(set, false);
2217 }
Jens Axboe0d2602c2014-05-13 15:10:52 -06002218 mutex_unlock(&set->tag_list_lock);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002219}
2220
2221static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2222 struct request_queue *q)
2223{
2224 q->tag_set = set;
2225
2226 mutex_lock(&set->tag_list_lock);
Jeff Moyer2404e602015-11-03 10:40:06 -05002227
2228 /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2229 if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2230 set->flags |= BLK_MQ_F_TAG_SHARED;
2231 /* update existing queue */
2232 blk_mq_update_tag_set_depth(set, true);
2233 }
2234 if (set->flags & BLK_MQ_F_TAG_SHARED)
2235 queue_set_hctx_shared(q, true);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002236 list_add_tail(&q->tag_set_list, &set->tag_list);
Jeff Moyer2404e602015-11-03 10:40:06 -05002237
Jens Axboe0d2602c2014-05-13 15:10:52 -06002238 mutex_unlock(&set->tag_list_lock);
2239}
2240
Ming Leie09aae7e2015-01-29 20:17:27 +08002241/*
2242 * It is the actual release handler for mq, but we do it from
2243 * request queue's release handler for avoiding use-after-free
2244 * and headache because q->mq_kobj shouldn't have been introduced,
2245 * but we can't group ctx/kctx kobj without it.
2246 */
2247void blk_mq_release(struct request_queue *q)
2248{
2249 struct blk_mq_hw_ctx *hctx;
2250 unsigned int i;
2251
Jens Axboebd166ef2017-01-17 06:03:22 -07002252 blk_mq_sched_teardown(q);
2253
Ming Leie09aae7e2015-01-29 20:17:27 +08002254 /* hctx kobj stays in hctx */
Ming Leic3b4afc2015-06-04 22:25:04 +08002255 queue_for_each_hw_ctx(q, hctx, i) {
2256 if (!hctx)
2257 continue;
2258 kfree(hctx->ctxs);
Ming Leie09aae7e2015-01-29 20:17:27 +08002259 kfree(hctx);
Ming Leic3b4afc2015-06-04 22:25:04 +08002260 }
Ming Leie09aae7e2015-01-29 20:17:27 +08002261
Akinobu Mitaa723bab2015-09-27 02:09:21 +09002262 q->mq_map = NULL;
2263
Ming Leie09aae7e2015-01-29 20:17:27 +08002264 kfree(q->queue_hw_ctx);
2265
2266 /* ctx kobj stays in queue_ctx */
2267 free_percpu(q->queue_ctx);
2268}
2269
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002270struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
Jens Axboe320ae512013-10-24 09:20:05 +01002271{
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002272 struct request_queue *uninit_q, *q;
2273
2274 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2275 if (!uninit_q)
2276 return ERR_PTR(-ENOMEM);
2277
2278 q = blk_mq_init_allocated_queue(set, uninit_q);
2279 if (IS_ERR(q))
2280 blk_cleanup_queue(uninit_q);
2281
2282 return q;
2283}
2284EXPORT_SYMBOL(blk_mq_init_queue);
2285
Keith Busch868f2f02015-12-17 17:08:14 -07002286static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2287 struct request_queue *q)
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002288{
Keith Busch868f2f02015-12-17 17:08:14 -07002289 int i, j;
2290 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +01002291
Keith Busch868f2f02015-12-17 17:08:14 -07002292 blk_mq_sysfs_unregister(q);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002293 for (i = 0; i < set->nr_hw_queues; i++) {
Keith Busch868f2f02015-12-17 17:08:14 -07002294 int node;
Jens Axboef14bbe72014-05-27 12:06:53 -06002295
Keith Busch868f2f02015-12-17 17:08:14 -07002296 if (hctxs[i])
2297 continue;
2298
2299 node = blk_mq_hw_queue_to_node(q->mq_map, i);
Christoph Hellwigcdef54d2014-05-28 18:11:06 +02002300 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
2301 GFP_KERNEL, node);
Jens Axboe320ae512013-10-24 09:20:05 +01002302 if (!hctxs[i])
Keith Busch868f2f02015-12-17 17:08:14 -07002303 break;
Jens Axboe320ae512013-10-24 09:20:05 +01002304
Jens Axboea86073e2014-10-13 15:41:54 -06002305 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
Keith Busch868f2f02015-12-17 17:08:14 -07002306 node)) {
2307 kfree(hctxs[i]);
2308 hctxs[i] = NULL;
2309 break;
2310 }
Jens Axboee4043dc2014-04-09 10:18:23 -06002311
Jens Axboe0d2602c2014-05-13 15:10:52 -06002312 atomic_set(&hctxs[i]->nr_active, 0);
Jens Axboef14bbe72014-05-27 12:06:53 -06002313 hctxs[i]->numa_node = node;
Jens Axboe320ae512013-10-24 09:20:05 +01002314 hctxs[i]->queue_num = i;
Keith Busch868f2f02015-12-17 17:08:14 -07002315
2316 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2317 free_cpumask_var(hctxs[i]->cpumask);
2318 kfree(hctxs[i]);
2319 hctxs[i] = NULL;
2320 break;
2321 }
2322 blk_mq_hctx_kobj_init(hctxs[i]);
Jens Axboe320ae512013-10-24 09:20:05 +01002323 }
Keith Busch868f2f02015-12-17 17:08:14 -07002324 for (j = i; j < q->nr_hw_queues; j++) {
2325 struct blk_mq_hw_ctx *hctx = hctxs[j];
2326
2327 if (hctx) {
Jens Axboecc71a6f2017-01-11 14:29:56 -07002328 if (hctx->tags)
2329 blk_mq_free_map_and_requests(set, j);
Keith Busch868f2f02015-12-17 17:08:14 -07002330 blk_mq_exit_hctx(q, set, hctx, j);
2331 free_cpumask_var(hctx->cpumask);
2332 kobject_put(&hctx->kobj);
2333 kfree(hctx->ctxs);
2334 kfree(hctx);
2335 hctxs[j] = NULL;
2336
2337 }
2338 }
2339 q->nr_hw_queues = i;
2340 blk_mq_sysfs_register(q);
2341}
2342
2343struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2344 struct request_queue *q)
2345{
Ming Lei66841672016-02-12 15:27:00 +08002346 /* mark the queue as mq asap */
2347 q->mq_ops = set->ops;
2348
Keith Busch868f2f02015-12-17 17:08:14 -07002349 q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2350 if (!q->queue_ctx)
Ming Linc7de5722016-05-25 23:23:27 -07002351 goto err_exit;
Keith Busch868f2f02015-12-17 17:08:14 -07002352
2353 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2354 GFP_KERNEL, set->numa_node);
2355 if (!q->queue_hw_ctx)
2356 goto err_percpu;
2357
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002358 q->mq_map = set->mq_map;
Keith Busch868f2f02015-12-17 17:08:14 -07002359
2360 blk_mq_realloc_hw_ctxs(set, q);
2361 if (!q->nr_hw_queues)
2362 goto err_hctxs;
Jens Axboe320ae512013-10-24 09:20:05 +01002363
Christoph Hellwig287922e2015-10-30 20:57:30 +08002364 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
Ming Leie56f6982015-07-16 19:53:22 +08002365 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
Jens Axboe320ae512013-10-24 09:20:05 +01002366
2367 q->nr_queues = nr_cpu_ids;
Jens Axboe320ae512013-10-24 09:20:05 +01002368
Jens Axboe94eddfb2013-11-19 09:25:07 -07002369 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
Jens Axboe320ae512013-10-24 09:20:05 +01002370
Jens Axboe05f1dd52014-05-29 09:53:32 -06002371 if (!(set->flags & BLK_MQ_F_SG_MERGE))
2372 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2373
Christoph Hellwig1be036e2014-02-07 10:22:39 -08002374 q->sg_reserved_size = INT_MAX;
2375
Mike Snitzer28494502016-09-14 13:28:30 -04002376 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -06002377 INIT_LIST_HEAD(&q->requeue_list);
2378 spin_lock_init(&q->requeue_lock);
2379
Jens Axboe07068d52014-05-22 10:40:51 -06002380 if (q->nr_hw_queues > 1)
2381 blk_queue_make_request(q, blk_mq_make_request);
2382 else
2383 blk_queue_make_request(q, blk_sq_make_request);
2384
Jens Axboeeba71762014-05-20 15:17:27 -06002385 /*
2386 * Do this after blk_queue_make_request() overrides it...
2387 */
2388 q->nr_requests = set->queue_depth;
2389
Jens Axboe64f1c212016-11-14 13:03:03 -07002390 /*
2391 * Default to classic polling
2392 */
2393 q->poll_nsec = -1;
2394
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002395 if (set->ops->complete)
2396 blk_queue_softirq_done(q, set->ops->complete);
Christoph Hellwig30a91cb2014-02-10 03:24:38 -08002397
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002398 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01002399
Akinobu Mita57783222015-09-27 02:09:23 +09002400 get_online_cpus();
Jens Axboe320ae512013-10-24 09:20:05 +01002401 mutex_lock(&all_q_mutex);
Akinobu Mita4593fdb2015-09-27 02:09:20 +09002402
Jens Axboe320ae512013-10-24 09:20:05 +01002403 list_add_tail(&q->all_q_node, &all_q_list);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002404 blk_mq_add_queue_tag_set(set, q);
Akinobu Mita57783222015-09-27 02:09:23 +09002405 blk_mq_map_swqueue(q, cpu_online_mask);
Jens Axboe484b4062014-05-21 14:01:15 -06002406
Akinobu Mita4593fdb2015-09-27 02:09:20 +09002407 mutex_unlock(&all_q_mutex);
Akinobu Mita57783222015-09-27 02:09:23 +09002408 put_online_cpus();
Akinobu Mita4593fdb2015-09-27 02:09:20 +09002409
Jens Axboed3484992017-01-13 14:43:58 -07002410 if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2411 int ret;
2412
2413 ret = blk_mq_sched_init(q);
2414 if (ret)
2415 return ERR_PTR(ret);
2416 }
2417
Jens Axboe320ae512013-10-24 09:20:05 +01002418 return q;
Christoph Hellwig18741982014-02-10 09:29:00 -07002419
Jens Axboe320ae512013-10-24 09:20:05 +01002420err_hctxs:
Keith Busch868f2f02015-12-17 17:08:14 -07002421 kfree(q->queue_hw_ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01002422err_percpu:
Keith Busch868f2f02015-12-17 17:08:14 -07002423 free_percpu(q->queue_ctx);
Ming Linc7de5722016-05-25 23:23:27 -07002424err_exit:
2425 q->mq_ops = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01002426 return ERR_PTR(-ENOMEM);
2427}
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002428EXPORT_SYMBOL(blk_mq_init_allocated_queue);
Jens Axboe320ae512013-10-24 09:20:05 +01002429
2430void blk_mq_free_queue(struct request_queue *q)
2431{
Ming Lei624dbe42014-05-27 23:35:13 +08002432 struct blk_mq_tag_set *set = q->tag_set;
Jens Axboe320ae512013-10-24 09:20:05 +01002433
Akinobu Mita0e626362015-09-27 02:09:22 +09002434 mutex_lock(&all_q_mutex);
2435 list_del_init(&q->all_q_node);
2436 mutex_unlock(&all_q_mutex);
2437
Jens Axboe87760e52016-11-09 12:38:14 -07002438 wbt_exit(q);
2439
Jens Axboe0d2602c2014-05-13 15:10:52 -06002440 blk_mq_del_queue_tag_set(q);
2441
Ming Lei624dbe42014-05-27 23:35:13 +08002442 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2443 blk_mq_free_hw_queues(q, set);
Jens Axboe320ae512013-10-24 09:20:05 +01002444}
Jens Axboe320ae512013-10-24 09:20:05 +01002445
2446/* Basically redo blk_mq_init_queue with queue frozen */
Akinobu Mita57783222015-09-27 02:09:23 +09002447static void blk_mq_queue_reinit(struct request_queue *q,
2448 const struct cpumask *online_mask)
Jens Axboe320ae512013-10-24 09:20:05 +01002449{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +02002450 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
Jens Axboe320ae512013-10-24 09:20:05 +01002451
Jens Axboe67aec142014-05-30 08:25:36 -06002452 blk_mq_sysfs_unregister(q);
2453
Jens Axboe320ae512013-10-24 09:20:05 +01002454 /*
2455 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2456 * we should change hctx numa_node according to new topology (this
2457 * involves free and re-allocate memory, worthy doing?)
2458 */
2459
Akinobu Mita57783222015-09-27 02:09:23 +09002460 blk_mq_map_swqueue(q, online_mask);
Jens Axboe320ae512013-10-24 09:20:05 +01002461
Jens Axboe67aec142014-05-30 08:25:36 -06002462 blk_mq_sysfs_register(q);
Jens Axboe320ae512013-10-24 09:20:05 +01002463}
2464
Sebastian Andrzej Siewior65d52912016-09-22 08:05:19 -06002465/*
2466 * New online cpumask which is going to be set in this hotplug event.
2467 * Declare this cpumasks as global as cpu-hotplug operation is invoked
2468 * one-by-one and dynamically allocating this could result in a failure.
2469 */
2470static struct cpumask cpuhp_online_new;
2471
2472static void blk_mq_queue_reinit_work(void)
Jens Axboe320ae512013-10-24 09:20:05 +01002473{
2474 struct request_queue *q;
Jens Axboe320ae512013-10-24 09:20:05 +01002475
2476 mutex_lock(&all_q_mutex);
Tejun Heof3af0202014-11-04 13:52:27 -05002477 /*
2478 * We need to freeze and reinit all existing queues. Freezing
2479 * involves synchronous wait for an RCU grace period and doing it
2480 * one by one may take a long time. Start freezing all queues in
2481 * one swoop and then wait for the completions so that freezing can
2482 * take place in parallel.
2483 */
2484 list_for_each_entry(q, &all_q_list, all_q_node)
2485 blk_mq_freeze_queue_start(q);
Gabriel Krisman Bertazi415d3da2016-11-28 15:01:48 -02002486 list_for_each_entry(q, &all_q_list, all_q_node)
Tejun Heof3af0202014-11-04 13:52:27 -05002487 blk_mq_freeze_queue_wait(q);
2488
Jens Axboe320ae512013-10-24 09:20:05 +01002489 list_for_each_entry(q, &all_q_list, all_q_node)
Sebastian Andrzej Siewior65d52912016-09-22 08:05:19 -06002490 blk_mq_queue_reinit(q, &cpuhp_online_new);
Tejun Heof3af0202014-11-04 13:52:27 -05002491
2492 list_for_each_entry(q, &all_q_list, all_q_node)
2493 blk_mq_unfreeze_queue(q);
2494
Jens Axboe320ae512013-10-24 09:20:05 +01002495 mutex_unlock(&all_q_mutex);
Sebastian Andrzej Siewior65d52912016-09-22 08:05:19 -06002496}
2497
2498static int blk_mq_queue_reinit_dead(unsigned int cpu)
2499{
Sebastian Andrzej Siewior97a32862016-09-23 15:02:38 +02002500 cpumask_copy(&cpuhp_online_new, cpu_online_mask);
Sebastian Andrzej Siewior65d52912016-09-22 08:05:19 -06002501 blk_mq_queue_reinit_work();
2502 return 0;
2503}
2504
2505/*
2506 * Before hotadded cpu starts handling requests, new mappings must be
2507 * established. Otherwise, these requests in hw queue might never be
2508 * dispatched.
2509 *
2510 * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
2511 * for CPU0, and ctx1 for CPU1).
2512 *
2513 * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
2514 * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
2515 *
Jens Axboe2c3ad662016-12-14 14:34:47 -07002516 * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set
2517 * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
2518 * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is
2519 * ignored.
Sebastian Andrzej Siewior65d52912016-09-22 08:05:19 -06002520 */
2521static int blk_mq_queue_reinit_prepare(unsigned int cpu)
2522{
2523 cpumask_copy(&cpuhp_online_new, cpu_online_mask);
2524 cpumask_set_cpu(cpu, &cpuhp_online_new);
2525 blk_mq_queue_reinit_work();
2526 return 0;
Jens Axboe320ae512013-10-24 09:20:05 +01002527}
2528
Jens Axboea5164402014-09-10 09:02:03 -06002529static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2530{
2531 int i;
2532
Jens Axboecc71a6f2017-01-11 14:29:56 -07002533 for (i = 0; i < set->nr_hw_queues; i++)
2534 if (!__blk_mq_alloc_rq_map(set, i))
Jens Axboea5164402014-09-10 09:02:03 -06002535 goto out_unwind;
Jens Axboea5164402014-09-10 09:02:03 -06002536
2537 return 0;
2538
2539out_unwind:
2540 while (--i >= 0)
Jens Axboecc71a6f2017-01-11 14:29:56 -07002541 blk_mq_free_rq_map(set->tags[i]);
Jens Axboea5164402014-09-10 09:02:03 -06002542
Jens Axboea5164402014-09-10 09:02:03 -06002543 return -ENOMEM;
2544}
2545
2546/*
2547 * Allocate the request maps associated with this tag_set. Note that this
2548 * may reduce the depth asked for, if memory is tight. set->queue_depth
2549 * will be updated to reflect the allocated depth.
2550 */
2551static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2552{
2553 unsigned int depth;
2554 int err;
2555
2556 depth = set->queue_depth;
2557 do {
2558 err = __blk_mq_alloc_rq_maps(set);
2559 if (!err)
2560 break;
2561
2562 set->queue_depth >>= 1;
2563 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2564 err = -ENOMEM;
2565 break;
2566 }
2567 } while (set->queue_depth);
2568
2569 if (!set->queue_depth || err) {
2570 pr_err("blk-mq: failed to allocate request map\n");
2571 return -ENOMEM;
2572 }
2573
2574 if (depth != set->queue_depth)
2575 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2576 depth, set->queue_depth);
2577
2578 return 0;
2579}
2580
Jens Axboea4391c62014-06-05 15:21:56 -06002581/*
2582 * Alloc a tag set to be associated with one or more request queues.
2583 * May fail with EINVAL for various error conditions. May adjust the
2584 * requested depth down, if if it too large. In that case, the set
2585 * value will be stored in set->queue_depth.
2586 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002587int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2588{
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002589 int ret;
2590
Bart Van Assche205fb5f2014-10-30 14:45:11 +01002591 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2592
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002593 if (!set->nr_hw_queues)
2594 return -EINVAL;
Jens Axboea4391c62014-06-05 15:21:56 -06002595 if (!set->queue_depth)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002596 return -EINVAL;
2597 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2598 return -EINVAL;
2599
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02002600 if (!set->ops->queue_rq)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002601 return -EINVAL;
2602
Jens Axboea4391c62014-06-05 15:21:56 -06002603 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2604 pr_info("blk-mq: reduced tag depth to %u\n",
2605 BLK_MQ_MAX_DEPTH);
2606 set->queue_depth = BLK_MQ_MAX_DEPTH;
2607 }
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002608
Shaohua Li6637fad2014-11-30 16:00:58 -08002609 /*
2610 * If a crashdump is active, then we are potentially in a very
2611 * memory constrained environment. Limit us to 1 queue and
2612 * 64 tags to prevent using too much memory.
2613 */
2614 if (is_kdump_kernel()) {
2615 set->nr_hw_queues = 1;
2616 set->queue_depth = min(64U, set->queue_depth);
2617 }
Keith Busch868f2f02015-12-17 17:08:14 -07002618 /*
2619 * There is no use for more h/w queues than cpus.
2620 */
2621 if (set->nr_hw_queues > nr_cpu_ids)
2622 set->nr_hw_queues = nr_cpu_ids;
Shaohua Li6637fad2014-11-30 16:00:58 -08002623
Keith Busch868f2f02015-12-17 17:08:14 -07002624 set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002625 GFP_KERNEL, set->numa_node);
2626 if (!set->tags)
Jens Axboea5164402014-09-10 09:02:03 -06002627 return -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002628
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002629 ret = -ENOMEM;
2630 set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2631 GFP_KERNEL, set->numa_node);
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002632 if (!set->mq_map)
2633 goto out_free_tags;
2634
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002635 if (set->ops->map_queues)
2636 ret = set->ops->map_queues(set);
2637 else
2638 ret = blk_mq_map_queues(set);
2639 if (ret)
2640 goto out_free_mq_map;
2641
2642 ret = blk_mq_alloc_rq_maps(set);
2643 if (ret)
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002644 goto out_free_mq_map;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002645
Jens Axboe0d2602c2014-05-13 15:10:52 -06002646 mutex_init(&set->tag_list_lock);
2647 INIT_LIST_HEAD(&set->tag_list);
2648
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002649 return 0;
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002650
2651out_free_mq_map:
2652 kfree(set->mq_map);
2653 set->mq_map = NULL;
2654out_free_tags:
Robert Elliott5676e7b2014-09-02 11:38:44 -05002655 kfree(set->tags);
2656 set->tags = NULL;
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002657 return ret;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002658}
2659EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2660
2661void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2662{
2663 int i;
2664
Jens Axboecc71a6f2017-01-11 14:29:56 -07002665 for (i = 0; i < nr_cpu_ids; i++)
2666 blk_mq_free_map_and_requests(set, i);
Jens Axboe484b4062014-05-21 14:01:15 -06002667
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002668 kfree(set->mq_map);
2669 set->mq_map = NULL;
2670
Ming Lei981bd182014-04-24 00:07:34 +08002671 kfree(set->tags);
Robert Elliott5676e7b2014-09-02 11:38:44 -05002672 set->tags = NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002673}
2674EXPORT_SYMBOL(blk_mq_free_tag_set);
2675
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002676int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2677{
2678 struct blk_mq_tag_set *set = q->tag_set;
2679 struct blk_mq_hw_ctx *hctx;
2680 int i, ret;
2681
Jens Axboebd166ef2017-01-17 06:03:22 -07002682 if (!set)
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002683 return -EINVAL;
2684
Jens Axboe70f36b62017-01-19 10:59:07 -07002685 blk_mq_freeze_queue(q);
2686 blk_mq_quiesce_queue(q);
2687
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002688 ret = 0;
2689 queue_for_each_hw_ctx(q, hctx, i) {
Keith Busche9137d42016-02-18 14:56:35 -07002690 if (!hctx->tags)
2691 continue;
Jens Axboebd166ef2017-01-17 06:03:22 -07002692 /*
2693 * If we're using an MQ scheduler, just update the scheduler
2694 * queue depth. This is similar to what the old code would do.
2695 */
Jens Axboe70f36b62017-01-19 10:59:07 -07002696 if (!hctx->sched_tags) {
2697 ret = blk_mq_tag_update_depth(hctx, &hctx->tags,
2698 min(nr, set->queue_depth),
2699 false);
2700 } else {
2701 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2702 nr, true);
2703 }
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002704 if (ret)
2705 break;
2706 }
2707
2708 if (!ret)
2709 q->nr_requests = nr;
2710
Jens Axboe70f36b62017-01-19 10:59:07 -07002711 blk_mq_unfreeze_queue(q);
2712 blk_mq_start_stopped_hw_queues(q, true);
2713
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002714 return ret;
2715}
2716
Keith Busch868f2f02015-12-17 17:08:14 -07002717void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2718{
2719 struct request_queue *q;
2720
2721 if (nr_hw_queues > nr_cpu_ids)
2722 nr_hw_queues = nr_cpu_ids;
2723 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2724 return;
2725
2726 list_for_each_entry(q, &set->tag_list, tag_set_list)
2727 blk_mq_freeze_queue(q);
2728
2729 set->nr_hw_queues = nr_hw_queues;
2730 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2731 blk_mq_realloc_hw_ctxs(set, q);
2732
Josef Bacikf6f94302017-02-10 13:03:33 -05002733 /*
2734 * Manually set the make_request_fn as blk_queue_make_request
2735 * resets a lot of the queue settings.
2736 */
Keith Busch868f2f02015-12-17 17:08:14 -07002737 if (q->nr_hw_queues > 1)
Josef Bacikf6f94302017-02-10 13:03:33 -05002738 q->make_request_fn = blk_mq_make_request;
Keith Busch868f2f02015-12-17 17:08:14 -07002739 else
Josef Bacikf6f94302017-02-10 13:03:33 -05002740 q->make_request_fn = blk_sq_make_request;
Keith Busch868f2f02015-12-17 17:08:14 -07002741
2742 blk_mq_queue_reinit(q, cpu_online_mask);
2743 }
2744
2745 list_for_each_entry(q, &set->tag_list, tag_set_list)
2746 blk_mq_unfreeze_queue(q);
2747}
2748EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2749
Jens Axboe64f1c212016-11-14 13:03:03 -07002750static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2751 struct blk_mq_hw_ctx *hctx,
2752 struct request *rq)
2753{
2754 struct blk_rq_stat stat[2];
2755 unsigned long ret = 0;
2756
2757 /*
2758 * If stats collection isn't on, don't sleep but turn it on for
2759 * future users
2760 */
2761 if (!blk_stat_enable(q))
2762 return 0;
2763
2764 /*
2765 * We don't have to do this once per IO, should optimize this
2766 * to just use the current window of stats until it changes
2767 */
2768 memset(&stat, 0, sizeof(stat));
2769 blk_hctx_stat_get(hctx, stat);
2770
2771 /*
2772 * As an optimistic guess, use half of the mean service time
2773 * for this type of request. We can (and should) make this smarter.
2774 * For instance, if the completion latencies are tight, we can
2775 * get closer than just half the mean. This is especially
2776 * important on devices where the completion latencies are longer
2777 * than ~10 usec.
2778 */
2779 if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
2780 ret = (stat[BLK_STAT_READ].mean + 1) / 2;
2781 else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
2782 ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
2783
2784 return ret;
2785}
2786
Jens Axboe06426ad2016-11-14 13:01:59 -07002787static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
Jens Axboe64f1c212016-11-14 13:03:03 -07002788 struct blk_mq_hw_ctx *hctx,
Jens Axboe06426ad2016-11-14 13:01:59 -07002789 struct request *rq)
2790{
2791 struct hrtimer_sleeper hs;
2792 enum hrtimer_mode mode;
Jens Axboe64f1c212016-11-14 13:03:03 -07002793 unsigned int nsecs;
Jens Axboe06426ad2016-11-14 13:01:59 -07002794 ktime_t kt;
2795
Jens Axboe64f1c212016-11-14 13:03:03 -07002796 if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2797 return false;
2798
2799 /*
2800 * poll_nsec can be:
2801 *
2802 * -1: don't ever hybrid sleep
2803 * 0: use half of prev avg
2804 * >0: use this specific value
2805 */
2806 if (q->poll_nsec == -1)
2807 return false;
2808 else if (q->poll_nsec > 0)
2809 nsecs = q->poll_nsec;
2810 else
2811 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2812
2813 if (!nsecs)
Jens Axboe06426ad2016-11-14 13:01:59 -07002814 return false;
2815
2816 set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2817
2818 /*
2819 * This will be replaced with the stats tracking code, using
2820 * 'avg_completion_time / 2' as the pre-sleep target.
2821 */
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01002822 kt = nsecs;
Jens Axboe06426ad2016-11-14 13:01:59 -07002823
2824 mode = HRTIMER_MODE_REL;
2825 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2826 hrtimer_set_expires(&hs.timer, kt);
2827
2828 hrtimer_init_sleeper(&hs, current);
2829 do {
2830 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2831 break;
2832 set_current_state(TASK_UNINTERRUPTIBLE);
2833 hrtimer_start_expires(&hs.timer, mode);
2834 if (hs.task)
2835 io_schedule();
2836 hrtimer_cancel(&hs.timer);
2837 mode = HRTIMER_MODE_ABS;
2838 } while (hs.task && !signal_pending(current));
2839
2840 __set_current_state(TASK_RUNNING);
2841 destroy_hrtimer_on_stack(&hs.timer);
2842 return true;
2843}
2844
Jens Axboebbd7bb72016-11-04 09:34:34 -06002845static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2846{
2847 struct request_queue *q = hctx->queue;
2848 long state;
2849
Jens Axboe06426ad2016-11-14 13:01:59 -07002850 /*
2851 * If we sleep, have the caller restart the poll loop to reset
2852 * the state. Like for the other success return cases, the
2853 * caller is responsible for checking if the IO completed. If
2854 * the IO isn't complete, we'll get called again and will go
2855 * straight to the busy poll loop.
2856 */
Jens Axboe64f1c212016-11-14 13:03:03 -07002857 if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
Jens Axboe06426ad2016-11-14 13:01:59 -07002858 return true;
2859
Jens Axboebbd7bb72016-11-04 09:34:34 -06002860 hctx->poll_considered++;
2861
2862 state = current->state;
2863 while (!need_resched()) {
2864 int ret;
2865
2866 hctx->poll_invoked++;
2867
2868 ret = q->mq_ops->poll(hctx, rq->tag);
2869 if (ret > 0) {
2870 hctx->poll_success++;
2871 set_current_state(TASK_RUNNING);
2872 return true;
2873 }
2874
2875 if (signal_pending_state(state, current))
2876 set_current_state(TASK_RUNNING);
2877
2878 if (current->state == TASK_RUNNING)
2879 return true;
2880 if (ret < 0)
2881 break;
2882 cpu_relax();
2883 }
2884
2885 return false;
2886}
2887
2888bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2889{
2890 struct blk_mq_hw_ctx *hctx;
2891 struct blk_plug *plug;
2892 struct request *rq;
2893
2894 if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
2895 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2896 return false;
2897
2898 plug = current->plug;
2899 if (plug)
2900 blk_flush_plug_list(plug, false);
2901
2902 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
Jens Axboebd166ef2017-01-17 06:03:22 -07002903 if (!blk_qc_t_is_internal(cookie))
2904 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2905 else
2906 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
Jens Axboebbd7bb72016-11-04 09:34:34 -06002907
2908 return __blk_mq_poll(hctx, rq);
2909}
2910EXPORT_SYMBOL_GPL(blk_mq_poll);
2911
Jens Axboe676141e2014-03-20 13:29:18 -06002912void blk_mq_disable_hotplug(void)
2913{
2914 mutex_lock(&all_q_mutex);
2915}
2916
2917void blk_mq_enable_hotplug(void)
2918{
2919 mutex_unlock(&all_q_mutex);
2920}
2921
Jens Axboe320ae512013-10-24 09:20:05 +01002922static int __init blk_mq_init(void)
2923{
Thomas Gleixner9467f852016-09-22 08:05:17 -06002924 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2925 blk_mq_hctx_notify_dead);
Jens Axboe320ae512013-10-24 09:20:05 +01002926
Sebastian Andrzej Siewior65d52912016-09-22 08:05:19 -06002927 cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",
2928 blk_mq_queue_reinit_prepare,
2929 blk_mq_queue_reinit_dead);
Jens Axboe320ae512013-10-24 09:20:05 +01002930 return 0;
2931}
2932subsys_initcall(blk_mq_init);