blob: 3295859f419ddab206c40dfd9a1052a4c97e81a7 [file] [log] [blame]
Jens Axboe75bb4622014-05-28 10:15:41 -06001/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
Jens Axboe320ae512013-10-24 09:20:05 +01007#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/backing-dev.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
Catalin Marinasf75782e2015-09-14 18:16:02 +010012#include <linux/kmemleak.h>
Jens Axboe320ae512013-10-24 09:20:05 +010013#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/workqueue.h>
17#include <linux/smp.h>
18#include <linux/llist.h>
19#include <linux/list_sort.h>
20#include <linux/cpu.h>
21#include <linux/cache.h>
22#include <linux/sched/sysctl.h>
Ingo Molnar105ab3d2017-02-01 16:36:40 +010023#include <linux/sched/topology.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
Jens Axboe320ae512013-10-24 09:20:05 +010025#include <linux/delay.h>
Jens Axboeaedcd722014-09-17 08:27:03 -060026#include <linux/crash_dump.h>
Jens Axboe88c7b2b2016-08-25 08:07:30 -060027#include <linux/prefetch.h>
Jens Axboe320ae512013-10-24 09:20:05 +010028
29#include <trace/events/block.h>
30
31#include <linux/blk-mq.h>
32#include "blk.h"
33#include "blk-mq.h"
Omar Sandoval9c1051a2017-05-04 08:17:21 -060034#include "blk-mq-debugfs.h"
Jens Axboe320ae512013-10-24 09:20:05 +010035#include "blk-mq-tag.h"
Jens Axboecf43e6b2016-11-07 21:32:37 -070036#include "blk-stat.h"
Jens Axboe87760e52016-11-09 12:38:14 -070037#include "blk-wbt.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070038#include "blk-mq-sched.h"
Jens Axboe320ae512013-10-24 09:20:05 +010039
Christoph Hellwigea435e12017-11-02 21:29:54 +030040static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
Omar Sandoval34dbad52017-03-21 08:56:08 -070041static void blk_mq_poll_stats_start(struct request_queue *q);
42static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
43
Stephen Bates720b8cc2017-04-07 06:24:03 -060044static int blk_mq_poll_stats_bkt(const struct request *rq)
45{
46 int ddir, bytes, bucket;
47
Jens Axboe99c749a2017-04-21 07:55:42 -060048 ddir = rq_data_dir(rq);
Stephen Bates720b8cc2017-04-07 06:24:03 -060049 bytes = blk_rq_bytes(rq);
50
51 bucket = ddir + 2*(ilog2(bytes) - 9);
52
53 if (bucket < 0)
54 return -1;
55 else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
56 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
57
58 return bucket;
59}
60
Jens Axboe320ae512013-10-24 09:20:05 +010061/*
62 * Check if any of the ctx's have pending work in this hardware queue
63 */
Jens Axboe79f720a2017-11-10 09:13:21 -070064static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
Jens Axboe320ae512013-10-24 09:20:05 +010065{
Jens Axboe79f720a2017-11-10 09:13:21 -070066 return !list_empty_careful(&hctx->dispatch) ||
67 sbitmap_any_bit_set(&hctx->ctx_map) ||
Jens Axboebd166ef2017-01-17 06:03:22 -070068 blk_mq_sched_has_work(hctx);
Jens Axboe320ae512013-10-24 09:20:05 +010069}
70
71/*
72 * Mark this ctx as having pending work in this hardware queue
73 */
74static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
75 struct blk_mq_ctx *ctx)
76{
Omar Sandoval88459642016-09-17 08:38:44 -060077 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
78 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
Jens Axboe1429d7c2014-05-19 09:23:55 -060079}
80
81static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
82 struct blk_mq_ctx *ctx)
83{
Omar Sandoval88459642016-09-17 08:38:44 -060084 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
Jens Axboe320ae512013-10-24 09:20:05 +010085}
86
Jens Axboef299b7c2017-08-08 17:51:45 -060087struct mq_inflight {
88 struct hd_struct *part;
89 unsigned int *inflight;
90};
91
92static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
93 struct request *rq, void *priv,
94 bool reserved)
95{
96 struct mq_inflight *mi = priv;
97
98 if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags) &&
99 !test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) {
100 /*
Jens Axboeb8d62b32017-08-08 17:53:33 -0600101 * index[0] counts the specific partition that was asked
102 * for. index[1] counts the ones that are active on the
103 * whole device, so increment that if mi->part is indeed
104 * a partition, and not a whole device.
Jens Axboef299b7c2017-08-08 17:51:45 -0600105 */
Jens Axboeb8d62b32017-08-08 17:53:33 -0600106 if (rq->part == mi->part)
Jens Axboef299b7c2017-08-08 17:51:45 -0600107 mi->inflight[0]++;
Jens Axboeb8d62b32017-08-08 17:53:33 -0600108 if (mi->part->partno)
109 mi->inflight[1]++;
Jens Axboef299b7c2017-08-08 17:51:45 -0600110 }
111}
112
113void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
114 unsigned int inflight[2])
115{
116 struct mq_inflight mi = { .part = part, .inflight = inflight, };
117
Jens Axboeb8d62b32017-08-08 17:53:33 -0600118 inflight[0] = inflight[1] = 0;
Jens Axboef299b7c2017-08-08 17:51:45 -0600119 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
120}
121
Ming Lei1671d522017-03-27 20:06:57 +0800122void blk_freeze_queue_start(struct request_queue *q)
Ming Lei43a5e4e2013-12-26 21:31:35 +0800123{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200124 int freeze_depth;
Tejun Heocddd5d12014-08-16 08:02:24 -0400125
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200126 freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
127 if (freeze_depth == 1) {
Dan Williams3ef28e82015-10-21 13:20:12 -0400128 percpu_ref_kill(&q->q_usage_counter);
Ming Lei055f6e12017-11-09 10:49:53 -0800129 if (q->mq_ops)
130 blk_mq_run_hw_queues(q, false);
Tejun Heocddd5d12014-08-16 08:02:24 -0400131 }
Tejun Heof3af0202014-11-04 13:52:27 -0500132}
Ming Lei1671d522017-03-27 20:06:57 +0800133EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
Tejun Heof3af0202014-11-04 13:52:27 -0500134
Keith Busch6bae3632017-03-01 14:22:10 -0500135void blk_mq_freeze_queue_wait(struct request_queue *q)
Tejun Heof3af0202014-11-04 13:52:27 -0500136{
Dan Williams3ef28e82015-10-21 13:20:12 -0400137 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
Ming Lei43a5e4e2013-12-26 21:31:35 +0800138}
Keith Busch6bae3632017-03-01 14:22:10 -0500139EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
Ming Lei43a5e4e2013-12-26 21:31:35 +0800140
Keith Buschf91328c2017-03-01 14:22:11 -0500141int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
142 unsigned long timeout)
143{
144 return wait_event_timeout(q->mq_freeze_wq,
145 percpu_ref_is_zero(&q->q_usage_counter),
146 timeout);
147}
148EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
Jens Axboe320ae512013-10-24 09:20:05 +0100149
Tejun Heof3af0202014-11-04 13:52:27 -0500150/*
151 * Guarantee no request is in use, so we can change any data structure of
152 * the queue afterward.
153 */
Dan Williams3ef28e82015-10-21 13:20:12 -0400154void blk_freeze_queue(struct request_queue *q)
Tejun Heof3af0202014-11-04 13:52:27 -0500155{
Dan Williams3ef28e82015-10-21 13:20:12 -0400156 /*
157 * In the !blk_mq case we are only calling this to kill the
158 * q_usage_counter, otherwise this increases the freeze depth
159 * and waits for it to return to zero. For this reason there is
160 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
161 * exported to drivers as the only user for unfreeze is blk_mq.
162 */
Ming Lei1671d522017-03-27 20:06:57 +0800163 blk_freeze_queue_start(q);
Tejun Heof3af0202014-11-04 13:52:27 -0500164 blk_mq_freeze_queue_wait(q);
165}
Dan Williams3ef28e82015-10-21 13:20:12 -0400166
167void blk_mq_freeze_queue(struct request_queue *q)
168{
169 /*
170 * ...just an alias to keep freeze and unfreeze actions balanced
171 * in the blk_mq_* namespace
172 */
173 blk_freeze_queue(q);
174}
Jens Axboec761d962015-01-02 15:05:12 -0700175EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
Tejun Heof3af0202014-11-04 13:52:27 -0500176
Keith Buschb4c6a022014-12-19 17:54:14 -0700177void blk_mq_unfreeze_queue(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +0100178{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200179 int freeze_depth;
Jens Axboe320ae512013-10-24 09:20:05 +0100180
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +0200181 freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
182 WARN_ON_ONCE(freeze_depth < 0);
183 if (!freeze_depth) {
Dan Williams3ef28e82015-10-21 13:20:12 -0400184 percpu_ref_reinit(&q->q_usage_counter);
Jens Axboe320ae512013-10-24 09:20:05 +0100185 wake_up_all(&q->mq_freeze_wq);
Tejun Heoadd703f2014-07-01 10:34:38 -0600186 }
Jens Axboe320ae512013-10-24 09:20:05 +0100187}
Keith Buschb4c6a022014-12-19 17:54:14 -0700188EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
Jens Axboe320ae512013-10-24 09:20:05 +0100189
Bart Van Assche852ec802017-06-21 10:55:47 -0700190/*
191 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
192 * mpt3sas driver such that this function can be removed.
193 */
194void blk_mq_quiesce_queue_nowait(struct request_queue *q)
195{
196 unsigned long flags;
197
198 spin_lock_irqsave(q->queue_lock, flags);
199 queue_flag_set(QUEUE_FLAG_QUIESCED, q);
200 spin_unlock_irqrestore(q->queue_lock, flags);
201}
202EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
203
Bart Van Assche6a83e742016-11-02 10:09:51 -0600204/**
Ming Lei69e07c42017-06-06 23:22:07 +0800205 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
Bart Van Assche6a83e742016-11-02 10:09:51 -0600206 * @q: request queue.
207 *
208 * Note: this function does not prevent that the struct request end_io()
Ming Lei69e07c42017-06-06 23:22:07 +0800209 * callback function is invoked. Once this function is returned, we make
210 * sure no dispatch can happen until the queue is unquiesced via
211 * blk_mq_unquiesce_queue().
Bart Van Assche6a83e742016-11-02 10:09:51 -0600212 */
213void blk_mq_quiesce_queue(struct request_queue *q)
214{
215 struct blk_mq_hw_ctx *hctx;
216 unsigned int i;
217 bool rcu = false;
218
Ming Lei1d9e9bc2017-06-06 23:22:08 +0800219 blk_mq_quiesce_queue_nowait(q);
Ming Leif4560ff2017-06-18 14:24:27 -0600220
Bart Van Assche6a83e742016-11-02 10:09:51 -0600221 queue_for_each_hw_ctx(q, hctx, i) {
222 if (hctx->flags & BLK_MQ_F_BLOCKING)
Bart Van Assche07319672017-06-20 11:15:38 -0700223 synchronize_srcu(hctx->queue_rq_srcu);
Bart Van Assche6a83e742016-11-02 10:09:51 -0600224 else
225 rcu = true;
226 }
227 if (rcu)
228 synchronize_rcu();
229}
230EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
231
Ming Leie4e73912017-06-06 23:22:03 +0800232/*
233 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
234 * @q: request queue.
235 *
236 * This function recovers queue into the state before quiescing
237 * which is done by blk_mq_quiesce_queue.
238 */
239void blk_mq_unquiesce_queue(struct request_queue *q)
240{
Bart Van Assche852ec802017-06-21 10:55:47 -0700241 unsigned long flags;
242
243 spin_lock_irqsave(q->queue_lock, flags);
Ming Leif4560ff2017-06-18 14:24:27 -0600244 queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
Bart Van Assche852ec802017-06-21 10:55:47 -0700245 spin_unlock_irqrestore(q->queue_lock, flags);
Ming Leif4560ff2017-06-18 14:24:27 -0600246
Ming Lei1d9e9bc2017-06-06 23:22:08 +0800247 /* dispatch requests which are inserted during quiescing */
248 blk_mq_run_hw_queues(q, true);
Ming Leie4e73912017-06-06 23:22:03 +0800249}
250EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
251
Jens Axboeaed3ea92014-12-22 14:04:42 -0700252void blk_mq_wake_waiters(struct request_queue *q)
253{
254 struct blk_mq_hw_ctx *hctx;
255 unsigned int i;
256
257 queue_for_each_hw_ctx(q, hctx, i)
258 if (blk_mq_hw_queue_mapped(hctx))
259 blk_mq_tag_wakeup_all(hctx->tags, true);
260}
261
Jens Axboe320ae512013-10-24 09:20:05 +0100262bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
263{
264 return blk_mq_has_free_tags(hctx->tags);
265}
266EXPORT_SYMBOL(blk_mq_can_queue);
267
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200268static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
269 unsigned int tag, unsigned int op)
Jens Axboe320ae512013-10-24 09:20:05 +0100270{
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200271 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
272 struct request *rq = tags->static_rqs[tag];
273
Bart Van Asschec3a148d2017-06-20 11:15:43 -0700274 rq->rq_flags = 0;
275
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200276 if (data->flags & BLK_MQ_REQ_INTERNAL) {
277 rq->tag = -1;
278 rq->internal_tag = tag;
279 } else {
280 if (blk_mq_tag_busy(data->hctx)) {
281 rq->rq_flags = RQF_MQ_INFLIGHT;
282 atomic_inc(&data->hctx->nr_active);
283 }
284 rq->tag = tag;
285 rq->internal_tag = -1;
286 data->hctx->tags->rqs[rq->tag] = rq;
287 }
288
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200289 INIT_LIST_HEAD(&rq->queuelist);
290 /* csd/requeue_work/fifo_time is initialized before use */
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200291 rq->q = data->q;
292 rq->mq_ctx = data->ctx;
Christoph Hellwigef295ec2016-10-28 08:48:16 -0600293 rq->cmd_flags = op;
Bart Van Assche1b6d65a2017-11-09 10:49:55 -0800294 if (data->flags & BLK_MQ_REQ_PREEMPT)
295 rq->rq_flags |= RQF_PREEMPT;
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200296 if (blk_queue_io_stat(data->q))
Christoph Hellwige8064022016-10-20 15:12:13 +0200297 rq->rq_flags |= RQF_IO_STAT;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200298 /* do not touch atomic flags, it needs atomic ops against the timer */
299 rq->cpu = -1;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200300 INIT_HLIST_NODE(&rq->hash);
301 RB_CLEAR_NODE(&rq->rb_node);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200302 rq->rq_disk = NULL;
303 rq->part = NULL;
Jens Axboe3ee32372014-06-09 09:36:53 -0600304 rq->start_time = jiffies;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200305#ifdef CONFIG_BLK_CGROUP
306 rq->rl = NULL;
Ming Lei0fec08b2014-01-03 10:00:08 -0700307 set_start_time_ns(rq);
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200308 rq->io_start_time_ns = 0;
309#endif
310 rq->nr_phys_segments = 0;
311#if defined(CONFIG_BLK_DEV_INTEGRITY)
312 rq->nr_integrity_segments = 0;
313#endif
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200314 rq->special = NULL;
315 /* tag was already set */
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200316 rq->extra_len = 0;
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200317
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200318 INIT_LIST_HEAD(&rq->timeout_list);
Jens Axboef6be4fb2014-06-06 11:03:48 -0600319 rq->timeout = 0;
320
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200321 rq->end_io = NULL;
322 rq->end_io_data = NULL;
323 rq->next_rq = NULL;
324
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200325 data->ctx->rq_dispatched[op_is_sync(op)]++;
326 return rq;
Jens Axboe320ae512013-10-24 09:20:05 +0100327}
328
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200329static struct request *blk_mq_get_request(struct request_queue *q,
330 struct bio *bio, unsigned int op,
331 struct blk_mq_alloc_data *data)
332{
333 struct elevator_queue *e = q->elevator;
334 struct request *rq;
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200335 unsigned int tag;
Bart Van Assche21e768b2017-10-16 16:32:26 -0700336 bool put_ctx_on_error = false;
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200337
338 blk_queue_enter_live(q);
339 data->q = q;
Bart Van Assche21e768b2017-10-16 16:32:26 -0700340 if (likely(!data->ctx)) {
341 data->ctx = blk_mq_get_ctx(q);
342 put_ctx_on_error = true;
343 }
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200344 if (likely(!data->hctx))
345 data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -0500346 if (op & REQ_NOWAIT)
347 data->flags |= BLK_MQ_REQ_NOWAIT;
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200348
349 if (e) {
350 data->flags |= BLK_MQ_REQ_INTERNAL;
351
352 /*
353 * Flush requests are special and go directly to the
354 * dispatch list.
355 */
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200356 if (!op_is_flush(op) && e->type->ops.mq.limit_depth)
357 e->type->ops.mq.limit_depth(op, data);
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200358 }
359
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200360 tag = blk_mq_get_tag(data);
361 if (tag == BLK_MQ_TAG_FAIL) {
Bart Van Assche21e768b2017-10-16 16:32:26 -0700362 if (put_ctx_on_error) {
363 blk_mq_put_ctx(data->ctx);
Ming Lei1ad43c02017-08-02 08:01:45 +0800364 data->ctx = NULL;
365 }
Christoph Hellwig037cebb2017-06-16 18:15:23 +0200366 blk_queue_exit(q);
367 return NULL;
368 }
369
Christoph Hellwige4cdf1a2017-06-16 18:15:27 +0200370 rq = blk_mq_rq_ctx_init(data, tag, op);
Christoph Hellwig037cebb2017-06-16 18:15:23 +0200371 if (!op_is_flush(op)) {
372 rq->elv.icq = NULL;
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200373 if (e && e->type->ops.mq.prepare_request) {
Christoph Hellwig44e8c2b2017-06-16 18:15:25 +0200374 if (e->type->icq_cache && rq_ioc(bio))
375 blk_mq_sched_assign_ioc(rq, bio);
376
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200377 e->type->ops.mq.prepare_request(rq, bio);
378 rq->rq_flags |= RQF_ELVPRIV;
Christoph Hellwig44e8c2b2017-06-16 18:15:25 +0200379 }
Christoph Hellwig037cebb2017-06-16 18:15:23 +0200380 }
381 data->hctx->queued++;
382 return rq;
Christoph Hellwigd2c0d382017-06-16 18:15:19 +0200383}
384
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700385struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800386 blk_mq_req_flags_t flags)
Jens Axboe320ae512013-10-24 09:20:05 +0100387{
Jens Axboe5a797e02017-01-26 12:22:11 -0700388 struct blk_mq_alloc_data alloc_data = { .flags = flags };
Jens Axboebd166ef2017-01-17 06:03:22 -0700389 struct request *rq;
Joe Lawrencea492f072014-08-28 08:15:21 -0600390 int ret;
Jens Axboe320ae512013-10-24 09:20:05 +0100391
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800392 ret = blk_queue_enter(q, flags);
Joe Lawrencea492f072014-08-28 08:15:21 -0600393 if (ret)
394 return ERR_PTR(ret);
Jens Axboe320ae512013-10-24 09:20:05 +0100395
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700396 rq = blk_mq_get_request(q, NULL, op, &alloc_data);
Keith Busch3280d662017-08-14 16:40:11 -0400397 blk_queue_exit(q);
Jens Axboe841bac22016-09-21 10:08:43 -0600398
Jens Axboebd166ef2017-01-17 06:03:22 -0700399 if (!rq)
Joe Lawrencea492f072014-08-28 08:15:21 -0600400 return ERR_PTR(-EWOULDBLOCK);
Christoph Hellwig0c4de0f2016-07-19 11:31:50 +0200401
Ming Lei1ad43c02017-08-02 08:01:45 +0800402 blk_mq_put_ctx(alloc_data.ctx);
Ming Lei1ad43c02017-08-02 08:01:45 +0800403
Christoph Hellwig0c4de0f2016-07-19 11:31:50 +0200404 rq->__data_len = 0;
405 rq->__sector = (sector_t) -1;
406 rq->bio = rq->biotail = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +0100407 return rq;
408}
Jens Axboe4bb659b2014-05-09 09:36:49 -0600409EXPORT_SYMBOL(blk_mq_alloc_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100410
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700411struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
Bart Van Assche9a95e4e2017-11-09 10:49:59 -0800412 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
Ming Lin1f5bd332016-06-13 16:45:21 +0200413{
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800414 struct blk_mq_alloc_data alloc_data = { .flags = flags };
Ming Lin1f5bd332016-06-13 16:45:21 +0200415 struct request *rq;
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800416 unsigned int cpu;
Ming Lin1f5bd332016-06-13 16:45:21 +0200417 int ret;
418
419 /*
420 * If the tag allocator sleeps we could get an allocation for a
421 * different hardware context. No need to complicate the low level
422 * allocator for this for the rare use case of a command tied to
423 * a specific queue.
424 */
425 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
426 return ERR_PTR(-EINVAL);
427
428 if (hctx_idx >= q->nr_hw_queues)
429 return ERR_PTR(-EIO);
430
Bart Van Assche3a0a5292017-11-09 10:49:58 -0800431 ret = blk_queue_enter(q, flags);
Ming Lin1f5bd332016-06-13 16:45:21 +0200432 if (ret)
433 return ERR_PTR(ret);
434
Christoph Hellwigc8712c62016-09-23 10:25:48 -0600435 /*
436 * Check if the hardware context is actually mapped to anything.
437 * If not tell the caller that it should skip this queue.
438 */
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800439 alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
440 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
441 blk_queue_exit(q);
442 return ERR_PTR(-EXDEV);
Christoph Hellwigc8712c62016-09-23 10:25:48 -0600443 }
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800444 cpu = cpumask_first(alloc_data.hctx->cpumask);
445 alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
Ming Lin1f5bd332016-06-13 16:45:21 +0200446
Bart Van Asschecd6ce142017-06-20 11:15:39 -0700447 rq = blk_mq_get_request(q, NULL, op, &alloc_data);
Keith Busch3280d662017-08-14 16:40:11 -0400448 blk_queue_exit(q);
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800449
Omar Sandoval6d2809d2017-02-27 10:28:27 -0800450 if (!rq)
451 return ERR_PTR(-EWOULDBLOCK);
Ming Lin1f5bd332016-06-13 16:45:21 +0200452
453 return rq;
454}
455EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
456
Christoph Hellwig6af54052017-06-16 18:15:22 +0200457void blk_mq_free_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100458{
Jens Axboe320ae512013-10-24 09:20:05 +0100459 struct request_queue *q = rq->q;
Christoph Hellwig6af54052017-06-16 18:15:22 +0200460 struct elevator_queue *e = q->elevator;
461 struct blk_mq_ctx *ctx = rq->mq_ctx;
462 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
463 const int sched_tag = rq->internal_tag;
Jens Axboe320ae512013-10-24 09:20:05 +0100464
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200465 if (rq->rq_flags & RQF_ELVPRIV) {
Christoph Hellwig6af54052017-06-16 18:15:22 +0200466 if (e && e->type->ops.mq.finish_request)
467 e->type->ops.mq.finish_request(rq);
468 if (rq->elv.icq) {
469 put_io_context(rq->elv.icq->ioc);
470 rq->elv.icq = NULL;
471 }
472 }
473
474 ctx->rq_completed[rq_is_sync(rq)]++;
Christoph Hellwige8064022016-10-20 15:12:13 +0200475 if (rq->rq_flags & RQF_MQ_INFLIGHT)
Jens Axboe0d2602c2014-05-13 15:10:52 -0600476 atomic_dec(&hctx->nr_active);
Jens Axboe87760e52016-11-09 12:38:14 -0700477
Jens Axboe7beb2f82017-09-30 02:08:24 -0600478 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
479 laptop_io_completion(q->backing_dev_info);
480
Jens Axboe87760e52016-11-09 12:38:14 -0700481 wbt_done(q->rq_wb, &rq->issue_stat);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600482
Shaohua Li85acb3b2017-10-06 17:56:00 -0700483 if (blk_rq_rl(rq))
484 blk_put_rl(blk_rq_rl(rq));
485
Christoph Hellwigaf76e552014-05-06 12:12:45 +0200486 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
Jens Axboe06426ad2016-11-14 13:01:59 -0700487 clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
Jens Axboebd166ef2017-01-17 06:03:22 -0700488 if (rq->tag != -1)
489 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
490 if (sched_tag != -1)
Omar Sandovalc05f8522017-04-14 01:00:01 -0700491 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
Bart Van Assche6d8c6c02017-04-07 12:40:09 -0600492 blk_mq_sched_restart(hctx);
Dan Williams3ef28e82015-10-21 13:20:12 -0400493 blk_queue_exit(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100494}
Jens Axboe1a3b5952014-11-17 10:40:48 -0700495EXPORT_SYMBOL_GPL(blk_mq_free_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100496
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200497inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
Jens Axboe320ae512013-10-24 09:20:05 +0100498{
Ming Lei0d11e6a2013-12-05 10:50:39 -0700499 blk_account_io_done(rq);
500
Christoph Hellwig91b63632014-04-16 09:44:53 +0200501 if (rq->end_io) {
Jens Axboe87760e52016-11-09 12:38:14 -0700502 wbt_done(rq->q->rq_wb, &rq->issue_stat);
Jens Axboe320ae512013-10-24 09:20:05 +0100503 rq->end_io(rq, error);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200504 } else {
505 if (unlikely(blk_bidi_rq(rq)))
506 blk_mq_free_request(rq->next_rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100507 blk_mq_free_request(rq);
Christoph Hellwig91b63632014-04-16 09:44:53 +0200508 }
Jens Axboe320ae512013-10-24 09:20:05 +0100509}
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700510EXPORT_SYMBOL(__blk_mq_end_request);
Christoph Hellwig63151a42014-04-16 09:44:52 +0200511
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200512void blk_mq_end_request(struct request *rq, blk_status_t error)
Christoph Hellwig63151a42014-04-16 09:44:52 +0200513{
514 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
515 BUG();
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700516 __blk_mq_end_request(rq, error);
Christoph Hellwig63151a42014-04-16 09:44:52 +0200517}
Christoph Hellwigc8a446a2014-09-13 16:40:10 -0700518EXPORT_SYMBOL(blk_mq_end_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100519
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800520static void __blk_mq_complete_request_remote(void *data)
Jens Axboe320ae512013-10-24 09:20:05 +0100521{
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800522 struct request *rq = data;
Jens Axboe320ae512013-10-24 09:20:05 +0100523
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800524 rq->q->softirq_done_fn(rq);
Jens Axboe320ae512013-10-24 09:20:05 +0100525}
526
Christoph Hellwig453f8342017-04-20 16:03:10 +0200527static void __blk_mq_complete_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100528{
529 struct blk_mq_ctx *ctx = rq->mq_ctx;
Christoph Hellwig38535202014-04-25 02:32:53 -0700530 bool shared = false;
Jens Axboe320ae512013-10-24 09:20:05 +0100531 int cpu;
532
Christoph Hellwig453f8342017-04-20 16:03:10 +0200533 if (rq->internal_tag != -1)
534 blk_mq_sched_completed_request(rq);
535 if (rq->rq_flags & RQF_STATS) {
536 blk_mq_poll_stats_start(rq->q);
537 blk_stat_add(rq);
538 }
539
Christoph Hellwig38535202014-04-25 02:32:53 -0700540 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800541 rq->q->softirq_done_fn(rq);
542 return;
543 }
Jens Axboe320ae512013-10-24 09:20:05 +0100544
545 cpu = get_cpu();
Christoph Hellwig38535202014-04-25 02:32:53 -0700546 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
547 shared = cpus_share_cache(cpu, ctx->cpu);
548
549 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800550 rq->csd.func = __blk_mq_complete_request_remote;
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800551 rq->csd.info = rq;
552 rq->csd.flags = 0;
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +0100553 smp_call_function_single_async(ctx->cpu, &rq->csd);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800554 } else {
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800555 rq->q->softirq_done_fn(rq);
Christoph Hellwig3d6efbf2014-01-08 09:33:37 -0800556 }
Jens Axboe320ae512013-10-24 09:20:05 +0100557 put_cpu();
558}
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800559
560/**
561 * blk_mq_complete_request - end I/O on a request
562 * @rq: the request being processed
563 *
564 * Description:
565 * Ends all I/O on a request. It does not handle partial completions.
566 * The actual completion happens out-of-order, through a IPI handler.
567 **/
Christoph Hellwig08e00292017-04-20 16:03:09 +0200568void blk_mq_complete_request(struct request *rq)
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800569{
Jens Axboe95f09682014-05-27 17:46:48 -0600570 struct request_queue *q = rq->q;
571
572 if (unlikely(blk_should_fake_timeout(q)))
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800573 return;
Christoph Hellwig08e00292017-04-20 16:03:09 +0200574 if (!blk_mark_rq_complete(rq))
Jens Axboeed851862014-05-30 21:20:50 -0600575 __blk_mq_complete_request(rq);
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800576}
577EXPORT_SYMBOL(blk_mq_complete_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100578
Keith Busch973c0192015-01-07 18:55:43 -0700579int blk_mq_request_started(struct request *rq)
580{
581 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
582}
583EXPORT_SYMBOL_GPL(blk_mq_request_started);
584
Christoph Hellwige2490072014-09-13 16:40:09 -0700585void blk_mq_start_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100586{
587 struct request_queue *q = rq->q;
588
Jens Axboebd166ef2017-01-17 06:03:22 -0700589 blk_mq_sched_started_request(rq);
590
Jens Axboe320ae512013-10-24 09:20:05 +0100591 trace_block_rq_issue(q, rq);
592
Jens Axboecf43e6b2016-11-07 21:32:37 -0700593 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
Shaohua Li88eeca42017-03-27 15:19:41 -0700594 blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq));
Jens Axboecf43e6b2016-11-07 21:32:37 -0700595 rq->rq_flags |= RQF_STATS;
Jens Axboe87760e52016-11-09 12:38:14 -0700596 wbt_issue(q->rq_wb, &rq->issue_stat);
Jens Axboecf43e6b2016-11-07 21:32:37 -0700597 }
598
Ming Lei2b8393b2014-06-10 00:16:41 +0800599 blk_add_timer(rq);
Jens Axboe87ee7b12014-04-24 08:51:47 -0600600
Peter Zijlstraa7af0af2017-09-06 10:00:22 +0200601 WARN_ON_ONCE(test_bit(REQ_ATOM_STARTED, &rq->atomic_flags));
Jens Axboe538b7532014-09-16 10:37:37 -0600602
603 /*
Jens Axboe87ee7b12014-04-24 08:51:47 -0600604 * Mark us as started and clear complete. Complete might have been
605 * set if requeue raced with timeout, which then marked it as
606 * complete. So be sure to clear complete again when we start
607 * the request, otherwise we'll ignore the completion event.
Peter Zijlstraa7af0af2017-09-06 10:00:22 +0200608 *
609 * Ensure that ->deadline is visible before we set STARTED, such that
610 * blk_mq_check_expired() is guaranteed to observe our ->deadline when
611 * it observes STARTED.
Jens Axboe87ee7b12014-04-24 08:51:47 -0600612 */
Peter Zijlstraa7af0af2017-09-06 10:00:22 +0200613 smp_wmb();
614 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
615 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) {
616 /*
617 * Coherence order guarantees these consecutive stores to a
618 * single variable propagate in the specified order. Thus the
619 * clear_bit() is ordered _after_ the set bit. See
620 * blk_mq_check_expired().
621 *
622 * (the bits must be part of the same byte for this to be
623 * true).
624 */
Jens Axboe4b570522014-05-29 11:00:11 -0600625 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
Peter Zijlstraa7af0af2017-09-06 10:00:22 +0200626 }
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800627
628 if (q->dma_drain_size && blk_rq_bytes(rq)) {
629 /*
630 * Make sure space for the drain appears. We know we can do
631 * this because max_hw_segments has been adjusted to be one
632 * fewer than the device can handle.
633 */
634 rq->nr_phys_segments++;
635 }
Jens Axboe320ae512013-10-24 09:20:05 +0100636}
Christoph Hellwige2490072014-09-13 16:40:09 -0700637EXPORT_SYMBOL(blk_mq_start_request);
Jens Axboe320ae512013-10-24 09:20:05 +0100638
Ming Leid9d149a2017-03-27 20:06:55 +0800639/*
640 * When we reach here because queue is busy, REQ_ATOM_COMPLETE
Jens Axboe48b99c92017-03-29 11:10:34 -0600641 * flag isn't set yet, so there may be race with timeout handler,
Ming Leid9d149a2017-03-27 20:06:55 +0800642 * but given rq->deadline is just set in .queue_rq() under
643 * this situation, the race won't be possible in reality because
644 * rq->timeout should be set as big enough to cover the window
645 * between blk_mq_start_request() called from .queue_rq() and
646 * clearing REQ_ATOM_STARTED here.
647 */
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200648static void __blk_mq_requeue_request(struct request *rq)
Jens Axboe320ae512013-10-24 09:20:05 +0100649{
650 struct request_queue *q = rq->q;
651
Ming Lei923218f2017-11-02 23:24:38 +0800652 blk_mq_put_driver_tag(rq);
653
Jens Axboe320ae512013-10-24 09:20:05 +0100654 trace_block_rq_requeue(q, rq);
Jens Axboe87760e52016-11-09 12:38:14 -0700655 wbt_requeue(q->rq_wb, &rq->issue_stat);
Jens Axboebd166ef2017-01-17 06:03:22 -0700656 blk_mq_sched_requeue_request(rq);
Christoph Hellwig49f5baa2014-02-11 08:27:14 -0800657
Christoph Hellwige2490072014-09-13 16:40:09 -0700658 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
659 if (q->dma_drain_size && blk_rq_bytes(rq))
660 rq->nr_phys_segments--;
661 }
Jens Axboe320ae512013-10-24 09:20:05 +0100662}
663
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700664void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200665{
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200666 __blk_mq_requeue_request(rq);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200667
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200668 BUG_ON(blk_queued_rq(rq));
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700669 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200670}
671EXPORT_SYMBOL(blk_mq_requeue_request);
672
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600673static void blk_mq_requeue_work(struct work_struct *work)
674{
675 struct request_queue *q =
Mike Snitzer28494502016-09-14 13:28:30 -0400676 container_of(work, struct request_queue, requeue_work.work);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600677 LIST_HEAD(rq_list);
678 struct request *rq, *next;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600679
Jens Axboe18e97812017-07-27 08:03:57 -0600680 spin_lock_irq(&q->requeue_lock);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600681 list_splice_init(&q->requeue_list, &rq_list);
Jens Axboe18e97812017-07-27 08:03:57 -0600682 spin_unlock_irq(&q->requeue_lock);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600683
684 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
Christoph Hellwige8064022016-10-20 15:12:13 +0200685 if (!(rq->rq_flags & RQF_SOFTBARRIER))
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600686 continue;
687
Christoph Hellwige8064022016-10-20 15:12:13 +0200688 rq->rq_flags &= ~RQF_SOFTBARRIER;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600689 list_del_init(&rq->queuelist);
Jens Axboebd6737f2017-01-27 01:00:47 -0700690 blk_mq_sched_insert_request(rq, true, false, false, true);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600691 }
692
693 while (!list_empty(&rq_list)) {
694 rq = list_entry(rq_list.next, struct request, queuelist);
695 list_del_init(&rq->queuelist);
Jens Axboebd6737f2017-01-27 01:00:47 -0700696 blk_mq_sched_insert_request(rq, false, false, false, true);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600697 }
698
Bart Van Assche52d7f1b2016-10-28 17:20:32 -0700699 blk_mq_run_hw_queues(q, false);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600700}
701
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700702void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
703 bool kick_requeue_list)
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600704{
705 struct request_queue *q = rq->q;
706 unsigned long flags;
707
708 /*
709 * We abuse this flag that is otherwise used by the I/O scheduler to
710 * request head insertation from the workqueue.
711 */
Christoph Hellwige8064022016-10-20 15:12:13 +0200712 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600713
714 spin_lock_irqsave(&q->requeue_lock, flags);
715 if (at_head) {
Christoph Hellwige8064022016-10-20 15:12:13 +0200716 rq->rq_flags |= RQF_SOFTBARRIER;
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600717 list_add(&rq->queuelist, &q->requeue_list);
718 } else {
719 list_add_tail(&rq->queuelist, &q->requeue_list);
720 }
721 spin_unlock_irqrestore(&q->requeue_lock, flags);
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700722
723 if (kick_requeue_list)
724 blk_mq_kick_requeue_list(q);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600725}
726EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
727
728void blk_mq_kick_requeue_list(struct request_queue *q)
729{
Mike Snitzer28494502016-09-14 13:28:30 -0400730 kblockd_schedule_delayed_work(&q->requeue_work, 0);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -0600731}
732EXPORT_SYMBOL(blk_mq_kick_requeue_list);
733
Mike Snitzer28494502016-09-14 13:28:30 -0400734void blk_mq_delay_kick_requeue_list(struct request_queue *q,
735 unsigned long msecs)
736{
Bart Van Assched4acf362017-08-09 11:28:06 -0700737 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
738 msecs_to_jiffies(msecs));
Mike Snitzer28494502016-09-14 13:28:30 -0400739}
740EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
741
Jens Axboe0e62f512014-06-04 10:23:49 -0600742struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
743{
Jens Axboe88c7b2b2016-08-25 08:07:30 -0600744 if (tag < tags->nr_tags) {
745 prefetch(tags->rqs[tag]);
Hannes Reinecke4ee86ba2016-03-15 12:03:28 -0700746 return tags->rqs[tag];
Jens Axboe88c7b2b2016-08-25 08:07:30 -0600747 }
Hannes Reinecke4ee86ba2016-03-15 12:03:28 -0700748
749 return NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600750}
751EXPORT_SYMBOL(blk_mq_tag_to_rq);
752
Jens Axboe320ae512013-10-24 09:20:05 +0100753struct blk_mq_timeout_data {
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700754 unsigned long next;
755 unsigned int next_set;
Jens Axboe320ae512013-10-24 09:20:05 +0100756};
757
Christoph Hellwig90415832014-09-22 10:21:48 -0600758void blk_mq_rq_timed_out(struct request *req, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100759{
Jens Axboef8a5b122016-12-13 09:24:51 -0700760 const struct blk_mq_ops *ops = req->q->mq_ops;
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700761 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
Jens Axboe87ee7b12014-04-24 08:51:47 -0600762
763 /*
764 * We know that complete is set at this point. If STARTED isn't set
765 * anymore, then the request isn't active and the "timeout" should
766 * just be ignored. This can happen due to the bitflag ordering.
767 * Timeout first checks if STARTED is set, and if it is, assumes
768 * the request is active. But if we race with completion, then
Jens Axboe48b99c92017-03-29 11:10:34 -0600769 * both flags will get cleared. So check here again, and ignore
Jens Axboe87ee7b12014-04-24 08:51:47 -0600770 * a timeout event with a request that isn't active.
771 */
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700772 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
773 return;
Jens Axboe87ee7b12014-04-24 08:51:47 -0600774
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700775 if (ops->timeout)
Christoph Hellwig0152fb62014-09-13 16:40:13 -0700776 ret = ops->timeout(req, reserved);
Jens Axboe87ee7b12014-04-24 08:51:47 -0600777
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700778 switch (ret) {
779 case BLK_EH_HANDLED:
780 __blk_mq_complete_request(req);
781 break;
782 case BLK_EH_RESET_TIMER:
783 blk_add_timer(req);
784 blk_clear_rq_complete(req);
785 break;
786 case BLK_EH_NOT_HANDLED:
787 break;
788 default:
789 printk(KERN_ERR "block: bad eh return: %d\n", ret);
790 break;
791 }
Jens Axboe87ee7b12014-04-24 08:51:47 -0600792}
Keith Busch5b3f25f2015-01-07 18:55:46 -0700793
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700794static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
795 struct request *rq, void *priv, bool reserved)
Jens Axboe320ae512013-10-24 09:20:05 +0100796{
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700797 struct blk_mq_timeout_data *data = priv;
Peter Zijlstraa7af0af2017-09-06 10:00:22 +0200798 unsigned long deadline;
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700799
Ming Lei95a49602017-03-22 10:14:43 +0800800 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700801 return;
Jens Axboe320ae512013-10-24 09:20:05 +0100802
Ming Leid9d149a2017-03-27 20:06:55 +0800803 /*
Peter Zijlstraa7af0af2017-09-06 10:00:22 +0200804 * Ensures that if we see STARTED we must also see our
805 * up-to-date deadline, see blk_mq_start_request().
806 */
807 smp_rmb();
808
809 deadline = READ_ONCE(rq->deadline);
810
811 /*
Ming Leid9d149a2017-03-27 20:06:55 +0800812 * The rq being checked may have been freed and reallocated
813 * out already here, we avoid this race by checking rq->deadline
814 * and REQ_ATOM_COMPLETE flag together:
815 *
816 * - if rq->deadline is observed as new value because of
817 * reusing, the rq won't be timed out because of timing.
818 * - if rq->deadline is observed as previous value,
819 * REQ_ATOM_COMPLETE flag won't be cleared in reuse path
820 * because we put a barrier between setting rq->deadline
821 * and clearing the flag in blk_mq_start_request(), so
822 * this rq won't be timed out too.
823 */
Peter Zijlstraa7af0af2017-09-06 10:00:22 +0200824 if (time_after_eq(jiffies, deadline)) {
825 if (!blk_mark_rq_complete(rq)) {
826 /*
827 * Again coherence order ensures that consecutive reads
828 * from the same variable must be in that order. This
829 * ensures that if we see COMPLETE clear, we must then
830 * see STARTED set and we'll ignore this timeout.
831 *
832 * (There's also the MB implied by the test_and_clear())
833 */
Christoph Hellwig0152fb62014-09-13 16:40:13 -0700834 blk_mq_rq_timed_out(rq, reserved);
Peter Zijlstraa7af0af2017-09-06 10:00:22 +0200835 }
836 } else if (!data->next_set || time_after(data->next, deadline)) {
837 data->next = deadline;
Christoph Hellwig46f92d42014-09-13 16:40:12 -0700838 data->next_set = 1;
839 }
Jens Axboe320ae512013-10-24 09:20:05 +0100840}
841
Christoph Hellwig287922e2015-10-30 20:57:30 +0800842static void blk_mq_timeout_work(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +0100843{
Christoph Hellwig287922e2015-10-30 20:57:30 +0800844 struct request_queue *q =
845 container_of(work, struct request_queue, timeout_work);
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700846 struct blk_mq_timeout_data data = {
847 .next = 0,
848 .next_set = 0,
849 };
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700850 int i;
Jens Axboe320ae512013-10-24 09:20:05 +0100851
Gabriel Krisman Bertazi71f79fb2016-08-01 08:23:39 -0600852 /* A deadlock might occur if a request is stuck requiring a
853 * timeout at the same time a queue freeze is waiting
854 * completion, since the timeout code would not be able to
855 * acquire the queue reference here.
856 *
857 * That's why we don't use blk_queue_enter here; instead, we use
858 * percpu_ref_tryget directly, because we need to be able to
859 * obtain a reference even in the short window between the queue
860 * starting to freeze, by dropping the first reference in
Ming Lei1671d522017-03-27 20:06:57 +0800861 * blk_freeze_queue_start, and the moment the last request is
Gabriel Krisman Bertazi71f79fb2016-08-01 08:23:39 -0600862 * consumed, marked by the instant q_usage_counter reaches
863 * zero.
864 */
865 if (!percpu_ref_tryget(&q->q_usage_counter))
Christoph Hellwig287922e2015-10-30 20:57:30 +0800866 return;
867
Christoph Hellwig0bf6cd52015-09-27 21:01:51 +0200868 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
Jens Axboe320ae512013-10-24 09:20:05 +0100869
Christoph Hellwig81481eb2014-09-13 16:40:11 -0700870 if (data.next_set) {
871 data.next = blk_rq_timeout(round_jiffies_up(data.next));
872 mod_timer(&q->timeout, data.next);
Jens Axboe0d2602c2014-05-13 15:10:52 -0600873 } else {
Christoph Hellwig0bf6cd52015-09-27 21:01:51 +0200874 struct blk_mq_hw_ctx *hctx;
875
Ming Leif054b562015-04-21 10:00:19 +0800876 queue_for_each_hw_ctx(q, hctx, i) {
877 /* the hctx may be unmapped, so check it here */
878 if (blk_mq_hw_queue_mapped(hctx))
879 blk_mq_tag_idle(hctx);
880 }
Jens Axboe0d2602c2014-05-13 15:10:52 -0600881 }
Christoph Hellwig287922e2015-10-30 20:57:30 +0800882 blk_queue_exit(q);
Jens Axboe320ae512013-10-24 09:20:05 +0100883}
884
Omar Sandoval88459642016-09-17 08:38:44 -0600885struct flush_busy_ctx_data {
886 struct blk_mq_hw_ctx *hctx;
887 struct list_head *list;
888};
889
890static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
891{
892 struct flush_busy_ctx_data *flush_data = data;
893 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
894 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
895
896 sbitmap_clear_bit(sb, bitnr);
897 spin_lock(&ctx->lock);
898 list_splice_tail_init(&ctx->rq_list, flush_data->list);
899 spin_unlock(&ctx->lock);
900 return true;
901}
902
Jens Axboe320ae512013-10-24 09:20:05 +0100903/*
Jens Axboe1429d7c2014-05-19 09:23:55 -0600904 * Process software queues that have been marked busy, splicing them
905 * to the for-dispatch
906 */
Jens Axboe2c3ad662016-12-14 14:34:47 -0700907void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
Jens Axboe1429d7c2014-05-19 09:23:55 -0600908{
Omar Sandoval88459642016-09-17 08:38:44 -0600909 struct flush_busy_ctx_data data = {
910 .hctx = hctx,
911 .list = list,
912 };
Jens Axboe1429d7c2014-05-19 09:23:55 -0600913
Omar Sandoval88459642016-09-17 08:38:44 -0600914 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600915}
Jens Axboe2c3ad662016-12-14 14:34:47 -0700916EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600917
Ming Leib3476892017-10-14 17:22:30 +0800918struct dispatch_rq_data {
919 struct blk_mq_hw_ctx *hctx;
920 struct request *rq;
921};
922
923static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
924 void *data)
925{
926 struct dispatch_rq_data *dispatch_data = data;
927 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
928 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
929
930 spin_lock(&ctx->lock);
931 if (unlikely(!list_empty(&ctx->rq_list))) {
932 dispatch_data->rq = list_entry_rq(ctx->rq_list.next);
933 list_del_init(&dispatch_data->rq->queuelist);
934 if (list_empty(&ctx->rq_list))
935 sbitmap_clear_bit(sb, bitnr);
936 }
937 spin_unlock(&ctx->lock);
938
939 return !dispatch_data->rq;
940}
941
942struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
943 struct blk_mq_ctx *start)
944{
945 unsigned off = start ? start->index_hw : 0;
946 struct dispatch_rq_data data = {
947 .hctx = hctx,
948 .rq = NULL,
949 };
950
951 __sbitmap_for_each_set(&hctx->ctx_map, off,
952 dispatch_rq_from_ctx, &data);
953
954 return data.rq;
955}
956
Jens Axboe703fd1c2016-09-16 13:59:14 -0600957static inline unsigned int queued_to_index(unsigned int queued)
958{
959 if (!queued)
960 return 0;
Jens Axboe1429d7c2014-05-19 09:23:55 -0600961
Jens Axboe703fd1c2016-09-16 13:59:14 -0600962 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
Jens Axboe1429d7c2014-05-19 09:23:55 -0600963}
964
Jens Axboebd6737f2017-01-27 01:00:47 -0700965bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
966 bool wait)
Jens Axboebd166ef2017-01-17 06:03:22 -0700967{
968 struct blk_mq_alloc_data data = {
969 .q = rq->q,
Jens Axboebd166ef2017-01-17 06:03:22 -0700970 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
971 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
972 };
973
Jens Axboe5feeacd2017-04-20 17:23:13 -0600974 might_sleep_if(wait);
975
Omar Sandoval81380ca2017-04-07 08:56:26 -0600976 if (rq->tag != -1)
977 goto done;
Jens Axboebd166ef2017-01-17 06:03:22 -0700978
Sagi Grimberg415b8062017-02-27 10:04:39 -0700979 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
980 data.flags |= BLK_MQ_REQ_RESERVED;
981
Jens Axboebd166ef2017-01-17 06:03:22 -0700982 rq->tag = blk_mq_get_tag(&data);
983 if (rq->tag >= 0) {
Jens Axboe200e86b2017-01-25 08:11:38 -0700984 if (blk_mq_tag_busy(data.hctx)) {
985 rq->rq_flags |= RQF_MQ_INFLIGHT;
986 atomic_inc(&data.hctx->nr_active);
987 }
Jens Axboebd166ef2017-01-17 06:03:22 -0700988 data.hctx->tags->rqs[rq->tag] = rq;
Jens Axboebd166ef2017-01-17 06:03:22 -0700989 }
990
Omar Sandoval81380ca2017-04-07 08:56:26 -0600991done:
992 if (hctx)
993 *hctx = data.hctx;
994 return rq->tag != -1;
Jens Axboebd166ef2017-01-17 06:03:22 -0700995}
996
Jens Axboeeb619fd2017-11-09 08:32:43 -0700997static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
998 int flags, void *key)
Omar Sandovalda55f2c2017-02-22 10:58:29 -0800999{
1000 struct blk_mq_hw_ctx *hctx;
1001
1002 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1003
Jens Axboeeb619fd2017-11-09 08:32:43 -07001004 list_del_init(&wait->entry);
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001005 blk_mq_run_hw_queue(hctx, true);
1006 return 1;
1007}
1008
Jens Axboef906a6a2017-11-09 16:10:13 -07001009/*
1010 * Mark us waiting for a tag. For shared tags, this involves hooking us into
1011 * the tag wakeups. For non-shared tags, we can simply mark us nedeing a
1012 * restart. For both caes, take care to check the condition again after
1013 * marking us as waiting.
1014 */
1015static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
1016 struct request *rq)
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001017{
Jens Axboeeb619fd2017-11-09 08:32:43 -07001018 struct blk_mq_hw_ctx *this_hctx = *hctx;
Jens Axboef906a6a2017-11-09 16:10:13 -07001019 bool shared_tags = (this_hctx->flags & BLK_MQ_F_TAG_SHARED) != 0;
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001020 struct sbq_wait_state *ws;
Jens Axboef906a6a2017-11-09 16:10:13 -07001021 wait_queue_entry_t *wait;
1022 bool ret;
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001023
Jens Axboef906a6a2017-11-09 16:10:13 -07001024 if (!shared_tags) {
1025 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state))
1026 set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state);
1027 } else {
1028 wait = &this_hctx->dispatch_wait;
1029 if (!list_empty_careful(&wait->entry))
1030 return false;
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001031
Jens Axboef906a6a2017-11-09 16:10:13 -07001032 spin_lock(&this_hctx->lock);
1033 if (!list_empty(&wait->entry)) {
1034 spin_unlock(&this_hctx->lock);
1035 return false;
1036 }
1037
1038 ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx);
1039 add_wait_queue(&ws->wait, wait);
Jens Axboeeb619fd2017-11-09 08:32:43 -07001040 }
1041
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001042 /*
Jens Axboeeb619fd2017-11-09 08:32:43 -07001043 * It's possible that a tag was freed in the window between the
1044 * allocation failure and adding the hardware queue to the wait
1045 * queue.
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001046 */
Jens Axboef906a6a2017-11-09 16:10:13 -07001047 ret = blk_mq_get_driver_tag(rq, hctx, false);
Jens Axboeeb619fd2017-11-09 08:32:43 -07001048
Jens Axboef906a6a2017-11-09 16:10:13 -07001049 if (!shared_tags) {
1050 /*
1051 * Don't clear RESTART here, someone else could have set it.
1052 * At most this will cost an extra queue run.
1053 */
1054 return ret;
1055 } else {
1056 if (!ret) {
1057 spin_unlock(&this_hctx->lock);
1058 return false;
1059 }
1060
1061 /*
1062 * We got a tag, remove ourselves from the wait queue to ensure
1063 * someone else gets the wakeup.
1064 */
1065 spin_lock_irq(&ws->wait.lock);
1066 list_del_init(&wait->entry);
1067 spin_unlock_irq(&ws->wait.lock);
1068 spin_unlock(&this_hctx->lock);
1069 return true;
1070 }
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001071}
1072
Ming Leide148292017-10-14 17:22:29 +08001073bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
Jens Axboeeb619fd2017-11-09 08:32:43 -07001074 bool got_budget)
Jens Axboef04c3df2016-12-07 08:41:17 -07001075{
Omar Sandoval81380ca2017-04-07 08:56:26 -06001076 struct blk_mq_hw_ctx *hctx;
Jianchao Wang6d6f167c2017-11-02 23:24:32 +08001077 struct request *rq, *nxt;
Jens Axboeeb619fd2017-11-09 08:32:43 -07001078 bool no_tag = false;
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001079 int errors, queued;
Jens Axboef04c3df2016-12-07 08:41:17 -07001080
Omar Sandoval81380ca2017-04-07 08:56:26 -06001081 if (list_empty(list))
1082 return false;
1083
Ming Leide148292017-10-14 17:22:29 +08001084 WARN_ON(!list_is_singular(list) && got_budget);
1085
Jens Axboef04c3df2016-12-07 08:41:17 -07001086 /*
Jens Axboef04c3df2016-12-07 08:41:17 -07001087 * Now process all the entries, sending them to the driver.
1088 */
Jens Axboe93efe982017-03-24 12:04:19 -06001089 errors = queued = 0;
Omar Sandoval81380ca2017-04-07 08:56:26 -06001090 do {
Jens Axboef04c3df2016-12-07 08:41:17 -07001091 struct blk_mq_queue_data bd;
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001092 blk_status_t ret;
Jens Axboef04c3df2016-12-07 08:41:17 -07001093
1094 rq = list_first_entry(list, struct request, queuelist);
Jens Axboebd166ef2017-01-17 06:03:22 -07001095 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
Jens Axboe3c782d62017-01-26 12:50:36 -07001096 /*
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001097 * The initial allocation attempt failed, so we need to
Jens Axboeeb619fd2017-11-09 08:32:43 -07001098 * rerun the hardware queue when a tag is freed. The
1099 * waitqueue takes care of that. If the queue is run
1100 * before we add this entry back on the dispatch list,
1101 * we'll re-run it below.
Jens Axboe3c782d62017-01-26 12:50:36 -07001102 */
Jens Axboef906a6a2017-11-09 16:10:13 -07001103 if (!blk_mq_mark_tag_wait(&hctx, rq)) {
Ming Leide148292017-10-14 17:22:29 +08001104 if (got_budget)
1105 blk_mq_put_dispatch_budget(hctx);
Jens Axboef906a6a2017-11-09 16:10:13 -07001106 /*
1107 * For non-shared tags, the RESTART check
1108 * will suffice.
1109 */
1110 if (hctx->flags & BLK_MQ_F_TAG_SHARED)
1111 no_tag = true;
Omar Sandoval807b1042017-04-05 12:01:35 -07001112 break;
Ming Leide148292017-10-14 17:22:29 +08001113 }
1114 }
1115
Ming Lei0c6af1c2017-11-08 09:11:22 +08001116 if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
1117 blk_mq_put_driver_tag(rq);
Ming Lei88022d72017-11-05 02:21:12 +08001118 break;
Ming Lei0c6af1c2017-11-08 09:11:22 +08001119 }
Omar Sandovalda55f2c2017-02-22 10:58:29 -08001120
Jens Axboef04c3df2016-12-07 08:41:17 -07001121 list_del_init(&rq->queuelist);
1122
1123 bd.rq = rq;
Jens Axboe113285b2017-03-02 13:26:04 -07001124
1125 /*
1126 * Flag last if we have no more requests, or if we have more
1127 * but can't assign a driver tag to it.
1128 */
1129 if (list_empty(list))
1130 bd.last = true;
1131 else {
Jens Axboe113285b2017-03-02 13:26:04 -07001132 nxt = list_first_entry(list, struct request, queuelist);
1133 bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1134 }
Jens Axboef04c3df2016-12-07 08:41:17 -07001135
1136 ret = q->mq_ops->queue_rq(hctx, &bd);
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001137 if (ret == BLK_STS_RESOURCE) {
Jianchao Wang6d6f167c2017-11-02 23:24:32 +08001138 /*
1139 * If an I/O scheduler has been configured and we got a
1140 * driver tag for the next request already, free it again.
1141 */
1142 if (!list_empty(list)) {
1143 nxt = list_first_entry(list, struct request, queuelist);
1144 blk_mq_put_driver_tag(nxt);
1145 }
Jens Axboef04c3df2016-12-07 08:41:17 -07001146 list_add(&rq->queuelist, list);
1147 __blk_mq_requeue_request(rq);
1148 break;
Jens Axboef04c3df2016-12-07 08:41:17 -07001149 }
1150
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001151 if (unlikely(ret != BLK_STS_OK)) {
1152 errors++;
1153 blk_mq_end_request(rq, BLK_STS_IOERR);
1154 continue;
1155 }
1156
1157 queued++;
Omar Sandoval81380ca2017-04-07 08:56:26 -06001158 } while (!list_empty(list));
Jens Axboef04c3df2016-12-07 08:41:17 -07001159
1160 hctx->dispatched[queued_to_index(queued)]++;
1161
1162 /*
1163 * Any items that need requeuing? Stuff them into hctx->dispatch,
1164 * that is where we will continue on next queue run.
1165 */
1166 if (!list_empty(list)) {
1167 spin_lock(&hctx->lock);
Jens Axboec13660a2017-01-26 12:40:07 -07001168 list_splice_init(list, &hctx->dispatch);
Jens Axboef04c3df2016-12-07 08:41:17 -07001169 spin_unlock(&hctx->lock);
1170
1171 /*
Bart Van Assche710c7852017-04-07 11:16:51 -07001172 * If SCHED_RESTART was set by the caller of this function and
1173 * it is no longer set that means that it was cleared by another
1174 * thread and hence that a queue rerun is needed.
Jens Axboef04c3df2016-12-07 08:41:17 -07001175 *
Jens Axboeeb619fd2017-11-09 08:32:43 -07001176 * If 'no_tag' is set, that means that we failed getting
1177 * a driver tag with an I/O scheduler attached. If our dispatch
1178 * waitqueue is no longer active, ensure that we run the queue
1179 * AFTER adding our entries back to the list.
Jens Axboebd166ef2017-01-17 06:03:22 -07001180 *
Bart Van Assche710c7852017-04-07 11:16:51 -07001181 * If no I/O scheduler has been configured it is possible that
1182 * the hardware queue got stopped and restarted before requests
1183 * were pushed back onto the dispatch list. Rerun the queue to
1184 * avoid starvation. Notes:
1185 * - blk_mq_run_hw_queue() checks whether or not a queue has
1186 * been stopped before rerunning a queue.
1187 * - Some but not all block drivers stop a queue before
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001188 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
Bart Van Assche710c7852017-04-07 11:16:51 -07001189 * and dm-rq.
Jens Axboebd166ef2017-01-17 06:03:22 -07001190 */
Jens Axboeeb619fd2017-11-09 08:32:43 -07001191 if (!blk_mq_sched_needs_restart(hctx) ||
1192 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
Jens Axboebd166ef2017-01-17 06:03:22 -07001193 blk_mq_run_hw_queue(hctx, true);
Jens Axboef04c3df2016-12-07 08:41:17 -07001194 }
1195
Jens Axboe93efe982017-03-24 12:04:19 -06001196 return (queued + errors) != 0;
Jens Axboef04c3df2016-12-07 08:41:17 -07001197}
1198
Bart Van Assche6a83e742016-11-02 10:09:51 -06001199static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1200{
1201 int srcu_idx;
1202
Jens Axboeb7a71e62017-08-01 09:28:24 -06001203 /*
1204 * We should be running this queue from one of the CPUs that
1205 * are mapped to it.
1206 */
Bart Van Assche6a83e742016-11-02 10:09:51 -06001207 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1208 cpu_online(hctx->next_cpu));
1209
Jens Axboeb7a71e62017-08-01 09:28:24 -06001210 /*
1211 * We can't run the queue inline with ints disabled. Ensure that
1212 * we catch bad users of this early.
1213 */
1214 WARN_ON_ONCE(in_interrupt());
1215
Bart Van Assche6a83e742016-11-02 10:09:51 -06001216 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1217 rcu_read_lock();
Ming Lei1f460b62017-10-27 12:43:30 +08001218 blk_mq_sched_dispatch_requests(hctx);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001219 rcu_read_unlock();
1220 } else {
Jens Axboebf4907c2017-03-30 12:30:39 -06001221 might_sleep();
1222
Bart Van Assche07319672017-06-20 11:15:38 -07001223 srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
Ming Lei1f460b62017-10-27 12:43:30 +08001224 blk_mq_sched_dispatch_requests(hctx);
Bart Van Assche07319672017-06-20 11:15:38 -07001225 srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
Bart Van Assche6a83e742016-11-02 10:09:51 -06001226 }
1227}
1228
Jens Axboe506e9312014-05-07 10:26:44 -06001229/*
1230 * It'd be great if the workqueue API had a way to pass
1231 * in a mask and had some smarts for more clever placement.
1232 * For now we just round-robin here, switching for every
1233 * BLK_MQ_CPU_WORK_BATCH queued items.
1234 */
1235static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1236{
Christoph Hellwigb657d7e2014-11-24 09:27:23 +01001237 if (hctx->queue->nr_hw_queues == 1)
1238 return WORK_CPU_UNBOUND;
Jens Axboe506e9312014-05-07 10:26:44 -06001239
1240 if (--hctx->next_cpu_batch <= 0) {
Gabriel Krisman Bertazic02ebfd2016-09-28 00:24:24 -03001241 int next_cpu;
Jens Axboe506e9312014-05-07 10:26:44 -06001242
1243 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1244 if (next_cpu >= nr_cpu_ids)
1245 next_cpu = cpumask_first(hctx->cpumask);
1246
1247 hctx->next_cpu = next_cpu;
1248 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1249 }
1250
Christoph Hellwigb657d7e2014-11-24 09:27:23 +01001251 return hctx->next_cpu;
Jens Axboe506e9312014-05-07 10:26:44 -06001252}
1253
Bart Van Assche7587a5a2017-04-07 11:16:52 -07001254static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1255 unsigned long msecs)
Jens Axboe320ae512013-10-24 09:20:05 +01001256{
Bart Van Assche5435c022017-06-20 11:15:49 -07001257 if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1258 return;
1259
1260 if (unlikely(blk_mq_hctx_stopped(hctx)))
Jens Axboe320ae512013-10-24 09:20:05 +01001261 return;
1262
Jens Axboe1b792f22016-09-21 10:12:13 -06001263 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001264 int cpu = get_cpu();
1265 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
Paolo Bonzini398205b2014-11-07 23:03:59 +01001266 __blk_mq_run_hw_queue(hctx);
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001267 put_cpu();
Paolo Bonzini398205b2014-11-07 23:03:59 +01001268 return;
1269 }
Jens Axboee4043dc2014-04-09 10:18:23 -06001270
Paolo Bonzini2a90d4a2014-11-07 23:04:00 +01001271 put_cpu();
Jens Axboee4043dc2014-04-09 10:18:23 -06001272 }
Paolo Bonzini398205b2014-11-07 23:03:59 +01001273
Jens Axboe9f993732017-04-10 09:54:54 -06001274 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1275 &hctx->run_work,
1276 msecs_to_jiffies(msecs));
Bart Van Assche7587a5a2017-04-07 11:16:52 -07001277}
1278
1279void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1280{
1281 __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1282}
1283EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1284
Jens Axboe79f720a2017-11-10 09:13:21 -07001285bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
Bart Van Assche7587a5a2017-04-07 11:16:52 -07001286{
Jens Axboe79f720a2017-11-10 09:13:21 -07001287 if (blk_mq_hctx_has_pending(hctx)) {
1288 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1289 return true;
1290 }
1291
1292 return false;
Jens Axboe320ae512013-10-24 09:20:05 +01001293}
Omar Sandoval5b727272017-04-14 01:00:00 -07001294EXPORT_SYMBOL(blk_mq_run_hw_queue);
Jens Axboe320ae512013-10-24 09:20:05 +01001295
Mike Snitzerb94ec292015-03-11 23:56:38 -04001296void blk_mq_run_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +01001297{
1298 struct blk_mq_hw_ctx *hctx;
1299 int i;
1300
1301 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe79f720a2017-11-10 09:13:21 -07001302 if (blk_mq_hctx_stopped(hctx))
Jens Axboe320ae512013-10-24 09:20:05 +01001303 continue;
1304
Mike Snitzerb94ec292015-03-11 23:56:38 -04001305 blk_mq_run_hw_queue(hctx, async);
Jens Axboe320ae512013-10-24 09:20:05 +01001306 }
1307}
Mike Snitzerb94ec292015-03-11 23:56:38 -04001308EXPORT_SYMBOL(blk_mq_run_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01001309
Bart Van Asschefd001442016-10-28 17:19:37 -07001310/**
1311 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1312 * @q: request queue.
1313 *
1314 * The caller is responsible for serializing this function against
1315 * blk_mq_{start,stop}_hw_queue().
1316 */
1317bool blk_mq_queue_stopped(struct request_queue *q)
1318{
1319 struct blk_mq_hw_ctx *hctx;
1320 int i;
1321
1322 queue_for_each_hw_ctx(q, hctx, i)
1323 if (blk_mq_hctx_stopped(hctx))
1324 return true;
1325
1326 return false;
1327}
1328EXPORT_SYMBOL(blk_mq_queue_stopped);
1329
Ming Lei39a70c72017-06-06 23:22:09 +08001330/*
1331 * This function is often used for pausing .queue_rq() by driver when
1332 * there isn't enough resource or some conditions aren't satisfied, and
Bart Van Assche4d606212017-08-17 16:23:00 -07001333 * BLK_STS_RESOURCE is usually returned.
Ming Lei39a70c72017-06-06 23:22:09 +08001334 *
1335 * We do not guarantee that dispatch can be drained or blocked
1336 * after blk_mq_stop_hw_queue() returns. Please use
1337 * blk_mq_quiesce_queue() for that requirement.
1338 */
Jens Axboe320ae512013-10-24 09:20:05 +01001339void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1340{
Ming Lei641a9ed2017-06-06 23:22:10 +08001341 cancel_delayed_work(&hctx->run_work);
1342
1343 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboe320ae512013-10-24 09:20:05 +01001344}
1345EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1346
Ming Lei39a70c72017-06-06 23:22:09 +08001347/*
1348 * This function is often used for pausing .queue_rq() by driver when
1349 * there isn't enough resource or some conditions aren't satisfied, and
Bart Van Assche4d606212017-08-17 16:23:00 -07001350 * BLK_STS_RESOURCE is usually returned.
Ming Lei39a70c72017-06-06 23:22:09 +08001351 *
1352 * We do not guarantee that dispatch can be drained or blocked
1353 * after blk_mq_stop_hw_queues() returns. Please use
1354 * blk_mq_quiesce_queue() for that requirement.
1355 */
Jens Axboe2719aa22017-05-03 11:08:14 -06001356void blk_mq_stop_hw_queues(struct request_queue *q)
1357{
Ming Lei641a9ed2017-06-06 23:22:10 +08001358 struct blk_mq_hw_ctx *hctx;
1359 int i;
1360
1361 queue_for_each_hw_ctx(q, hctx, i)
1362 blk_mq_stop_hw_queue(hctx);
Christoph Hellwig280d45f2013-10-25 14:45:58 +01001363}
1364EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1365
Jens Axboe320ae512013-10-24 09:20:05 +01001366void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1367{
1368 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
Jens Axboee4043dc2014-04-09 10:18:23 -06001369
Jens Axboe0ffbce82014-06-25 08:22:34 -06001370 blk_mq_run_hw_queue(hctx, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001371}
1372EXPORT_SYMBOL(blk_mq_start_hw_queue);
1373
Christoph Hellwig2f268552014-04-16 09:44:56 +02001374void blk_mq_start_hw_queues(struct request_queue *q)
1375{
1376 struct blk_mq_hw_ctx *hctx;
1377 int i;
1378
1379 queue_for_each_hw_ctx(q, hctx, i)
1380 blk_mq_start_hw_queue(hctx);
1381}
1382EXPORT_SYMBOL(blk_mq_start_hw_queues);
1383
Jens Axboeae911c52016-12-08 13:19:30 -07001384void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1385{
1386 if (!blk_mq_hctx_stopped(hctx))
1387 return;
1388
1389 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1390 blk_mq_run_hw_queue(hctx, async);
1391}
1392EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1393
Christoph Hellwig1b4a3252014-04-16 09:44:54 +02001394void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
Jens Axboe320ae512013-10-24 09:20:05 +01001395{
1396 struct blk_mq_hw_ctx *hctx;
1397 int i;
1398
Jens Axboeae911c52016-12-08 13:19:30 -07001399 queue_for_each_hw_ctx(q, hctx, i)
1400 blk_mq_start_stopped_hw_queue(hctx, async);
Jens Axboe320ae512013-10-24 09:20:05 +01001401}
1402EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1403
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001404static void blk_mq_run_work_fn(struct work_struct *work)
Jens Axboe320ae512013-10-24 09:20:05 +01001405{
1406 struct blk_mq_hw_ctx *hctx;
1407
Jens Axboe9f993732017-04-10 09:54:54 -06001408 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
Jens Axboe21c6e932017-04-10 09:54:56 -06001409
1410 /*
1411 * If we are stopped, don't run the queue. The exception is if
1412 * BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear
1413 * the STOPPED bit and run it.
1414 */
1415 if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) {
1416 if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state))
1417 return;
1418
1419 clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1420 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1421 }
Jens Axboee4043dc2014-04-09 10:18:23 -06001422
Jens Axboe320ae512013-10-24 09:20:05 +01001423 __blk_mq_run_hw_queue(hctx);
1424}
1425
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001426
1427void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1428{
Bart Van Assche5435c022017-06-20 11:15:49 -07001429 if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
Ming Lei19c66e52014-12-03 19:38:04 +08001430 return;
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001431
Jens Axboe21c6e932017-04-10 09:54:56 -06001432 /*
1433 * Stop the hw queue, then modify currently delayed work.
1434 * This should prevent us from running the queue prematurely.
1435 * Mark the queue as auto-clearing STOPPED when it runs.
1436 */
Jens Axboe7e79dad2017-01-19 07:58:59 -07001437 blk_mq_stop_hw_queue(hctx);
Jens Axboe21c6e932017-04-10 09:54:56 -06001438 set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1439 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1440 &hctx->run_work,
1441 msecs_to_jiffies(msecs));
Christoph Hellwig70f4db62014-04-16 10:48:08 -06001442}
1443EXPORT_SYMBOL(blk_mq_delay_queue);
1444
Ming Leicfd0c552015-10-20 23:13:57 +08001445static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
Ming Leicfd0c552015-10-20 23:13:57 +08001446 struct request *rq,
1447 bool at_head)
Jens Axboe320ae512013-10-24 09:20:05 +01001448{
Jens Axboee57690f2016-08-24 15:34:35 -06001449 struct blk_mq_ctx *ctx = rq->mq_ctx;
1450
Bart Van Assche7b607812017-06-20 11:15:47 -07001451 lockdep_assert_held(&ctx->lock);
1452
Jens Axboe01b983c2013-11-19 18:59:10 -07001453 trace_block_rq_insert(hctx->queue, rq);
1454
Christoph Hellwig72a0a362014-02-07 10:22:36 -08001455 if (at_head)
1456 list_add(&rq->queuelist, &ctx->rq_list);
1457 else
1458 list_add_tail(&rq->queuelist, &ctx->rq_list);
Ming Leicfd0c552015-10-20 23:13:57 +08001459}
Jens Axboe4bb659b2014-05-09 09:36:49 -06001460
Jens Axboe2c3ad662016-12-14 14:34:47 -07001461void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1462 bool at_head)
Ming Leicfd0c552015-10-20 23:13:57 +08001463{
1464 struct blk_mq_ctx *ctx = rq->mq_ctx;
1465
Bart Van Assche7b607812017-06-20 11:15:47 -07001466 lockdep_assert_held(&ctx->lock);
1467
Jens Axboee57690f2016-08-24 15:34:35 -06001468 __blk_mq_insert_req_list(hctx, rq, at_head);
Jens Axboe320ae512013-10-24 09:20:05 +01001469 blk_mq_hctx_mark_pending(hctx, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001470}
1471
Jens Axboe157f3772017-09-11 16:43:57 -06001472/*
1473 * Should only be used carefully, when the caller knows we want to
1474 * bypass a potential IO scheduler on the target device.
1475 */
Ming Leib0850292017-11-02 23:24:34 +08001476void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
Jens Axboe157f3772017-09-11 16:43:57 -06001477{
1478 struct blk_mq_ctx *ctx = rq->mq_ctx;
1479 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
1480
1481 spin_lock(&hctx->lock);
1482 list_add_tail(&rq->queuelist, &hctx->dispatch);
1483 spin_unlock(&hctx->lock);
1484
Ming Leib0850292017-11-02 23:24:34 +08001485 if (run_queue)
1486 blk_mq_run_hw_queue(hctx, false);
Jens Axboe157f3772017-09-11 16:43:57 -06001487}
1488
Jens Axboebd166ef2017-01-17 06:03:22 -07001489void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1490 struct list_head *list)
Jens Axboe320ae512013-10-24 09:20:05 +01001491
1492{
Jens Axboe320ae512013-10-24 09:20:05 +01001493 /*
1494 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1495 * offline now
1496 */
1497 spin_lock(&ctx->lock);
1498 while (!list_empty(list)) {
1499 struct request *rq;
1500
1501 rq = list_first_entry(list, struct request, queuelist);
Jens Axboee57690f2016-08-24 15:34:35 -06001502 BUG_ON(rq->mq_ctx != ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001503 list_del_init(&rq->queuelist);
Jens Axboee57690f2016-08-24 15:34:35 -06001504 __blk_mq_insert_req_list(hctx, rq, false);
Jens Axboe320ae512013-10-24 09:20:05 +01001505 }
Ming Leicfd0c552015-10-20 23:13:57 +08001506 blk_mq_hctx_mark_pending(hctx, ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01001507 spin_unlock(&ctx->lock);
Jens Axboe320ae512013-10-24 09:20:05 +01001508}
1509
1510static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1511{
1512 struct request *rqa = container_of(a, struct request, queuelist);
1513 struct request *rqb = container_of(b, struct request, queuelist);
1514
1515 return !(rqa->mq_ctx < rqb->mq_ctx ||
1516 (rqa->mq_ctx == rqb->mq_ctx &&
1517 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1518}
1519
1520void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1521{
1522 struct blk_mq_ctx *this_ctx;
1523 struct request_queue *this_q;
1524 struct request *rq;
1525 LIST_HEAD(list);
1526 LIST_HEAD(ctx_list);
1527 unsigned int depth;
1528
1529 list_splice_init(&plug->mq_list, &list);
1530
1531 list_sort(NULL, &list, plug_ctx_cmp);
1532
1533 this_q = NULL;
1534 this_ctx = NULL;
1535 depth = 0;
1536
1537 while (!list_empty(&list)) {
1538 rq = list_entry_rq(list.next);
1539 list_del_init(&rq->queuelist);
1540 BUG_ON(!rq->q);
1541 if (rq->mq_ctx != this_ctx) {
1542 if (this_ctx) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001543 trace_block_unplug(this_q, depth, from_schedule);
1544 blk_mq_sched_insert_requests(this_q, this_ctx,
1545 &ctx_list,
1546 from_schedule);
Jens Axboe320ae512013-10-24 09:20:05 +01001547 }
1548
1549 this_ctx = rq->mq_ctx;
1550 this_q = rq->q;
1551 depth = 0;
1552 }
1553
1554 depth++;
1555 list_add_tail(&rq->queuelist, &ctx_list);
1556 }
1557
1558 /*
1559 * If 'this_ctx' is set, we know we have entries to complete
1560 * on 'ctx_list'. Do those.
1561 */
1562 if (this_ctx) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001563 trace_block_unplug(this_q, depth, from_schedule);
1564 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1565 from_schedule);
Jens Axboe320ae512013-10-24 09:20:05 +01001566 }
1567}
1568
1569static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1570{
Bart Van Asscheda8d7f02017-04-19 14:01:24 -07001571 blk_init_request_from_bio(rq, bio);
Jens Axboe4b570522014-05-29 11:00:11 -06001572
Shaohua Li85acb3b2017-10-06 17:56:00 -07001573 blk_rq_set_rl(rq, blk_get_rl(rq->q, bio));
1574
Jens Axboe6e85eaf2016-12-02 20:00:14 -07001575 blk_account_io_start(rq, true);
Jens Axboe320ae512013-10-24 09:20:05 +01001576}
1577
Ming Leiab42f352017-05-26 19:53:19 +08001578static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
1579 struct blk_mq_ctx *ctx,
1580 struct request *rq)
1581{
1582 spin_lock(&ctx->lock);
1583 __blk_mq_insert_request(hctx, rq, false);
1584 spin_unlock(&ctx->lock);
Jens Axboe07068d52014-05-22 10:40:51 -06001585}
1586
Jens Axboefd2d3322017-01-12 10:04:45 -07001587static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1588{
Jens Axboebd166ef2017-01-17 06:03:22 -07001589 if (rq->tag != -1)
1590 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1591
1592 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
Jens Axboefd2d3322017-01-12 10:04:45 -07001593}
1594
Ming Leid964f042017-06-06 23:22:00 +08001595static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1596 struct request *rq,
1597 blk_qc_t *cookie, bool may_sleep)
Shaohua Lif984df12015-05-08 10:51:32 -07001598{
Shaohua Lif984df12015-05-08 10:51:32 -07001599 struct request_queue *q = rq->q;
Shaohua Lif984df12015-05-08 10:51:32 -07001600 struct blk_mq_queue_data bd = {
1601 .rq = rq,
Omar Sandovald945a362017-04-05 12:01:36 -07001602 .last = true,
Shaohua Lif984df12015-05-08 10:51:32 -07001603 };
Jens Axboebd166ef2017-01-17 06:03:22 -07001604 blk_qc_t new_cookie;
Jens Axboef06345a2017-06-12 11:22:46 -06001605 blk_status_t ret;
Ming Leid964f042017-06-06 23:22:00 +08001606 bool run_queue = true;
1607
Ming Leif4560ff2017-06-18 14:24:27 -06001608 /* RCU or SRCU read lock is needed before checking quiesced flag */
1609 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
Ming Leid964f042017-06-06 23:22:00 +08001610 run_queue = false;
1611 goto insert;
1612 }
Shaohua Lif984df12015-05-08 10:51:32 -07001613
Jens Axboebd166ef2017-01-17 06:03:22 -07001614 if (q->elevator)
Bart Van Assche2253efc2016-10-28 17:20:02 -07001615 goto insert;
1616
Ming Leid964f042017-06-06 23:22:00 +08001617 if (!blk_mq_get_driver_tag(rq, NULL, false))
Jens Axboebd166ef2017-01-17 06:03:22 -07001618 goto insert;
1619
Ming Lei88022d72017-11-05 02:21:12 +08001620 if (!blk_mq_get_dispatch_budget(hctx)) {
Ming Leide148292017-10-14 17:22:29 +08001621 blk_mq_put_driver_tag(rq);
1622 goto insert;
Ming Lei88022d72017-11-05 02:21:12 +08001623 }
Ming Leide148292017-10-14 17:22:29 +08001624
Jens Axboebd166ef2017-01-17 06:03:22 -07001625 new_cookie = request_to_qc_t(hctx, rq);
1626
Shaohua Lif984df12015-05-08 10:51:32 -07001627 /*
1628 * For OK queue, we are done. For error, kill it. Any other
1629 * error (busy), just add it to our list as we previously
1630 * would have done
1631 */
1632 ret = q->mq_ops->queue_rq(hctx, &bd);
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001633 switch (ret) {
1634 case BLK_STS_OK:
Jens Axboe7b371632015-11-05 10:41:40 -07001635 *cookie = new_cookie;
Bart Van Assche2253efc2016-10-28 17:20:02 -07001636 return;
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001637 case BLK_STS_RESOURCE:
1638 __blk_mq_requeue_request(rq);
1639 goto insert;
1640 default:
Jens Axboe7b371632015-11-05 10:41:40 -07001641 *cookie = BLK_QC_T_NONE;
Christoph Hellwigfc17b652017-06-03 09:38:05 +02001642 blk_mq_end_request(rq, ret);
Bart Van Assche2253efc2016-10-28 17:20:02 -07001643 return;
Jens Axboe7b371632015-11-05 10:41:40 -07001644 }
1645
Bart Van Assche2253efc2016-10-28 17:20:02 -07001646insert:
Ming Leid964f042017-06-06 23:22:00 +08001647 blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
Shaohua Lif984df12015-05-08 10:51:32 -07001648}
1649
Christoph Hellwig5eb61262017-03-22 15:01:51 -04001650static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1651 struct request *rq, blk_qc_t *cookie)
1652{
1653 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1654 rcu_read_lock();
Ming Leid964f042017-06-06 23:22:00 +08001655 __blk_mq_try_issue_directly(hctx, rq, cookie, false);
Christoph Hellwig5eb61262017-03-22 15:01:51 -04001656 rcu_read_unlock();
1657 } else {
Jens Axboebf4907c2017-03-30 12:30:39 -06001658 unsigned int srcu_idx;
1659
1660 might_sleep();
1661
Bart Van Assche07319672017-06-20 11:15:38 -07001662 srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
Ming Leid964f042017-06-06 23:22:00 +08001663 __blk_mq_try_issue_directly(hctx, rq, cookie, true);
Bart Van Assche07319672017-06-20 11:15:38 -07001664 srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
Christoph Hellwig5eb61262017-03-22 15:01:51 -04001665 }
1666}
1667
Jens Axboedece1632015-11-05 10:41:16 -07001668static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
Jens Axboe07068d52014-05-22 10:40:51 -06001669{
Christoph Hellwigef295ec2016-10-28 08:48:16 -06001670 const int is_sync = op_is_sync(bio->bi_opf);
Christoph Hellwigf73f44e2017-01-27 08:30:47 -07001671 const int is_flush_fua = op_is_flush(bio->bi_opf);
Jens Axboe5a797e02017-01-26 12:22:11 -07001672 struct blk_mq_alloc_data data = { .flags = 0 };
Jens Axboe07068d52014-05-22 10:40:51 -06001673 struct request *rq;
Christoph Hellwig5eb61262017-03-22 15:01:51 -04001674 unsigned int request_count = 0;
Shaohua Lif984df12015-05-08 10:51:32 -07001675 struct blk_plug *plug;
Shaohua Li5b3f3412015-05-08 10:51:33 -07001676 struct request *same_queue_rq = NULL;
Jens Axboe7b371632015-11-05 10:41:40 -07001677 blk_qc_t cookie;
Jens Axboe87760e52016-11-09 12:38:14 -07001678 unsigned int wb_acct;
Jens Axboe07068d52014-05-22 10:40:51 -06001679
1680 blk_queue_bounce(q, &bio);
1681
NeilBrownaf67c312017-06-18 14:38:57 +10001682 blk_queue_split(q, &bio);
Wen Xiongf36ea502017-05-10 08:54:11 -05001683
Dmitry Monakhove23947b2017-06-29 11:31:11 -07001684 if (!bio_integrity_prep(bio))
Jens Axboedece1632015-11-05 10:41:16 -07001685 return BLK_QC_T_NONE;
Jens Axboe07068d52014-05-22 10:40:51 -06001686
Omar Sandoval87c279e2016-06-01 22:18:48 -07001687 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1688 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1689 return BLK_QC_T_NONE;
Shaohua Lif984df12015-05-08 10:51:32 -07001690
Jens Axboebd166ef2017-01-17 06:03:22 -07001691 if (blk_mq_sched_bio_merge(q, bio))
1692 return BLK_QC_T_NONE;
1693
Jens Axboe87760e52016-11-09 12:38:14 -07001694 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1695
Jens Axboebd166ef2017-01-17 06:03:22 -07001696 trace_block_getrq(q, bio, bio->bi_opf);
1697
Christoph Hellwigd2c0d382017-06-16 18:15:19 +02001698 rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
Jens Axboe87760e52016-11-09 12:38:14 -07001699 if (unlikely(!rq)) {
1700 __wbt_done(q->rq_wb, wb_acct);
Goldwyn Rodrigues03a07c92017-06-20 07:05:46 -05001701 if (bio->bi_opf & REQ_NOWAIT)
1702 bio_wouldblock_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -07001703 return BLK_QC_T_NONE;
Jens Axboe87760e52016-11-09 12:38:14 -07001704 }
1705
1706 wbt_track(&rq->issue_stat, wb_acct);
Jens Axboe07068d52014-05-22 10:40:51 -06001707
Jens Axboefd2d3322017-01-12 10:04:45 -07001708 cookie = request_to_qc_t(data.hctx, rq);
Jens Axboe07068d52014-05-22 10:40:51 -06001709
Shaohua Lif984df12015-05-08 10:51:32 -07001710 plug = current->plug;
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001711 if (unlikely(is_flush_fua)) {
Shaohua Lif984df12015-05-08 10:51:32 -07001712 blk_mq_put_ctx(data.ctx);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001713 blk_mq_bio_to_request(rq, bio);
Ming Lei923218f2017-11-02 23:24:38 +08001714
1715 /* bypass scheduler for flush rq */
1716 blk_insert_flush(rq);
1717 blk_mq_run_hw_queue(data.hctx, true);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001718 } else if (plug && q->nr_hw_queues == 1) {
Shaohua Li600271d2016-11-03 17:03:54 -07001719 struct request *last = NULL;
1720
Jens Axboeb00c53e2017-04-20 16:40:36 -06001721 blk_mq_put_ctx(data.ctx);
Jeff Moyere6c44382015-05-08 10:51:30 -07001722 blk_mq_bio_to_request(rq, bio);
Ming Lei0a6219a2016-11-16 18:07:05 +08001723
1724 /*
1725 * @request_count may become stale because of schedule
1726 * out, so check the list again.
1727 */
1728 if (list_empty(&plug->mq_list))
1729 request_count = 0;
Christoph Hellwig254d2592017-03-22 15:01:50 -04001730 else if (blk_queue_nomerges(q))
1731 request_count = blk_plug_queued_count(q);
1732
Ming Lei676d0602015-10-20 23:13:56 +08001733 if (!request_count)
Jeff Moyere6c44382015-05-08 10:51:30 -07001734 trace_block_plug(q);
Shaohua Li600271d2016-11-03 17:03:54 -07001735 else
1736 last = list_entry_rq(plug->mq_list.prev);
Jens Axboeb094f892015-11-20 20:29:45 -07001737
Shaohua Li600271d2016-11-03 17:03:54 -07001738 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1739 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
Jeff Moyere6c44382015-05-08 10:51:30 -07001740 blk_flush_plug_list(plug, false);
1741 trace_block_plug(q);
Jens Axboe320ae512013-10-24 09:20:05 +01001742 }
Jens Axboeb094f892015-11-20 20:29:45 -07001743
Jeff Moyere6c44382015-05-08 10:51:30 -07001744 list_add_tail(&rq->queuelist, &plug->mq_list);
Christoph Hellwig22997222017-03-22 15:01:52 -04001745 } else if (plug && !blk_queue_nomerges(q)) {
Jens Axboe320ae512013-10-24 09:20:05 +01001746 blk_mq_bio_to_request(rq, bio);
Jens Axboe320ae512013-10-24 09:20:05 +01001747
Jens Axboe320ae512013-10-24 09:20:05 +01001748 /*
1749 * We do limited plugging. If the bio can be merged, do that.
1750 * Otherwise the existing request in the plug list will be
1751 * issued. So the plug list will have one request at most
Christoph Hellwig22997222017-03-22 15:01:52 -04001752 * The plug list might get flushed before this. If that happens,
1753 * the plug list is empty, and same_queue_rq is invalid.
Jens Axboe320ae512013-10-24 09:20:05 +01001754 */
Christoph Hellwig22997222017-03-22 15:01:52 -04001755 if (list_empty(&plug->mq_list))
1756 same_queue_rq = NULL;
1757 if (same_queue_rq)
1758 list_del_init(&same_queue_rq->queuelist);
1759 list_add_tail(&rq->queuelist, &plug->mq_list);
1760
Jens Axboebf4907c2017-03-30 12:30:39 -06001761 blk_mq_put_ctx(data.ctx);
1762
Ming Leidad7a3b2017-06-06 23:21:59 +08001763 if (same_queue_rq) {
1764 data.hctx = blk_mq_map_queue(q,
1765 same_queue_rq->mq_ctx->cpu);
Christoph Hellwig22997222017-03-22 15:01:52 -04001766 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1767 &cookie);
Ming Leidad7a3b2017-06-06 23:21:59 +08001768 }
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001769 } else if (q->nr_hw_queues > 1 && is_sync) {
Jens Axboebd166ef2017-01-17 06:03:22 -07001770 blk_mq_put_ctx(data.ctx);
1771 blk_mq_bio_to_request(rq, bio);
Christoph Hellwig22997222017-03-22 15:01:52 -04001772 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001773 } else if (q->elevator) {
Jens Axboeb00c53e2017-04-20 16:40:36 -06001774 blk_mq_put_ctx(data.ctx);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001775 blk_mq_bio_to_request(rq, bio);
1776 blk_mq_sched_insert_request(rq, false, true, true, true);
Ming Leiab42f352017-05-26 19:53:19 +08001777 } else {
Jens Axboeb00c53e2017-04-20 16:40:36 -06001778 blk_mq_put_ctx(data.ctx);
Ming Leiab42f352017-05-26 19:53:19 +08001779 blk_mq_bio_to_request(rq, bio);
1780 blk_mq_queue_io(data.hctx, data.ctx, rq);
Christoph Hellwiga4d907b2017-03-22 15:01:53 -04001781 blk_mq_run_hw_queue(data.hctx, true);
Ming Leiab42f352017-05-26 19:53:19 +08001782 }
Jens Axboe320ae512013-10-24 09:20:05 +01001783
Jens Axboe7b371632015-11-05 10:41:40 -07001784 return cookie;
Jens Axboe320ae512013-10-24 09:20:05 +01001785}
1786
Jens Axboecc71a6f2017-01-11 14:29:56 -07001787void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1788 unsigned int hctx_idx)
Jens Axboe320ae512013-10-24 09:20:05 +01001789{
1790 struct page *page;
1791
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001792 if (tags->rqs && set->ops->exit_request) {
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001793 int i;
1794
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001795 for (i = 0; i < tags->nr_tags; i++) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001796 struct request *rq = tags->static_rqs[i];
1797
1798 if (!rq)
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001799 continue;
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001800 set->ops->exit_request(set, rq, hctx_idx);
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001801 tags->static_rqs[i] = NULL;
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001802 }
1803 }
1804
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001805 while (!list_empty(&tags->page_list)) {
1806 page = list_first_entry(&tags->page_list, struct page, lru);
Dave Hansen67534712014-01-08 20:17:46 -07001807 list_del_init(&page->lru);
Catalin Marinasf75782e2015-09-14 18:16:02 +01001808 /*
1809 * Remove kmemleak object previously allocated in
1810 * blk_mq_init_rq_map().
1811 */
1812 kmemleak_free(page_address(page));
Jens Axboe320ae512013-10-24 09:20:05 +01001813 __free_pages(page, page->private);
1814 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07001815}
Jens Axboe320ae512013-10-24 09:20:05 +01001816
Jens Axboecc71a6f2017-01-11 14:29:56 -07001817void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1818{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001819 kfree(tags->rqs);
Jens Axboecc71a6f2017-01-11 14:29:56 -07001820 tags->rqs = NULL;
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001821 kfree(tags->static_rqs);
1822 tags->static_rqs = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01001823
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001824 blk_mq_free_tags(tags);
Jens Axboe320ae512013-10-24 09:20:05 +01001825}
1826
Jens Axboecc71a6f2017-01-11 14:29:56 -07001827struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1828 unsigned int hctx_idx,
1829 unsigned int nr_tags,
1830 unsigned int reserved_tags)
Jens Axboe320ae512013-10-24 09:20:05 +01001831{
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001832 struct blk_mq_tags *tags;
Shaohua Li59f082e2017-02-01 09:53:14 -08001833 int node;
Jens Axboe320ae512013-10-24 09:20:05 +01001834
Shaohua Li59f082e2017-02-01 09:53:14 -08001835 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1836 if (node == NUMA_NO_NODE)
1837 node = set->numa_node;
1838
1839 tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
Shaohua Li24391c02015-01-23 14:18:00 -07001840 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001841 if (!tags)
1842 return NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01001843
Jens Axboecc71a6f2017-01-11 14:29:56 -07001844 tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02001845 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
Shaohua Li59f082e2017-02-01 09:53:14 -08001846 node);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001847 if (!tags->rqs) {
1848 blk_mq_free_tags(tags);
1849 return NULL;
1850 }
Jens Axboe320ae512013-10-24 09:20:05 +01001851
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001852 tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1853 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
Shaohua Li59f082e2017-02-01 09:53:14 -08001854 node);
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001855 if (!tags->static_rqs) {
1856 kfree(tags->rqs);
1857 blk_mq_free_tags(tags);
1858 return NULL;
1859 }
1860
Jens Axboecc71a6f2017-01-11 14:29:56 -07001861 return tags;
1862}
1863
1864static size_t order_to_size(unsigned int order)
1865{
1866 return (size_t)PAGE_SIZE << order;
1867}
1868
1869int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1870 unsigned int hctx_idx, unsigned int depth)
1871{
1872 unsigned int i, j, entries_per_page, max_order = 4;
1873 size_t rq_size, left;
Shaohua Li59f082e2017-02-01 09:53:14 -08001874 int node;
1875
1876 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1877 if (node == NUMA_NO_NODE)
1878 node = set->numa_node;
Jens Axboecc71a6f2017-01-11 14:29:56 -07001879
1880 INIT_LIST_HEAD(&tags->page_list);
1881
Jens Axboe320ae512013-10-24 09:20:05 +01001882 /*
1883 * rq_size is the size of the request plus driver payload, rounded
1884 * to the cacheline size
1885 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001886 rq_size = round_up(sizeof(struct request) + set->cmd_size,
Jens Axboe320ae512013-10-24 09:20:05 +01001887 cache_line_size());
Jens Axboecc71a6f2017-01-11 14:29:56 -07001888 left = rq_size * depth;
Jens Axboe320ae512013-10-24 09:20:05 +01001889
Jens Axboecc71a6f2017-01-11 14:29:56 -07001890 for (i = 0; i < depth; ) {
Jens Axboe320ae512013-10-24 09:20:05 +01001891 int this_order = max_order;
1892 struct page *page;
1893 int to_do;
1894 void *p;
1895
Bartlomiej Zolnierkiewiczb3a834b2016-05-16 09:54:47 -06001896 while (this_order && left < order_to_size(this_order - 1))
Jens Axboe320ae512013-10-24 09:20:05 +01001897 this_order--;
1898
1899 do {
Shaohua Li59f082e2017-02-01 09:53:14 -08001900 page = alloc_pages_node(node,
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02001901 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
Jens Axboea5164402014-09-10 09:02:03 -06001902 this_order);
Jens Axboe320ae512013-10-24 09:20:05 +01001903 if (page)
1904 break;
1905 if (!this_order--)
1906 break;
1907 if (order_to_size(this_order) < rq_size)
1908 break;
1909 } while (1);
1910
1911 if (!page)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001912 goto fail;
Jens Axboe320ae512013-10-24 09:20:05 +01001913
1914 page->private = this_order;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001915 list_add_tail(&page->lru, &tags->page_list);
Jens Axboe320ae512013-10-24 09:20:05 +01001916
1917 p = page_address(page);
Catalin Marinasf75782e2015-09-14 18:16:02 +01001918 /*
1919 * Allow kmemleak to scan these pages as they contain pointers
1920 * to additional allocations like via ops->init_request().
1921 */
Gabriel Krisman Bertazi36e1f3d12016-12-06 13:31:44 -02001922 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
Jens Axboe320ae512013-10-24 09:20:05 +01001923 entries_per_page = order_to_size(this_order) / rq_size;
Jens Axboecc71a6f2017-01-11 14:29:56 -07001924 to_do = min(entries_per_page, depth - i);
Jens Axboe320ae512013-10-24 09:20:05 +01001925 left -= to_do * rq_size;
1926 for (j = 0; j < to_do; j++) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001927 struct request *rq = p;
1928
1929 tags->static_rqs[i] = rq;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001930 if (set->ops->init_request) {
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001931 if (set->ops->init_request(set, rq, hctx_idx,
Shaohua Li59f082e2017-02-01 09:53:14 -08001932 node)) {
Jens Axboe2af8cbe2017-01-13 14:39:30 -07001933 tags->static_rqs[i] = NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001934 goto fail;
Jens Axboea5164402014-09-10 09:02:03 -06001935 }
Christoph Hellwige9b267d2014-04-15 13:59:10 -06001936 }
1937
Jens Axboe320ae512013-10-24 09:20:05 +01001938 p += rq_size;
1939 i++;
1940 }
1941 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07001942 return 0;
Jens Axboe320ae512013-10-24 09:20:05 +01001943
Christoph Hellwig24d2f902014-04-15 14:14:00 -06001944fail:
Jens Axboecc71a6f2017-01-11 14:29:56 -07001945 blk_mq_free_rqs(set, tags, hctx_idx);
1946 return -ENOMEM;
Jens Axboe320ae512013-10-24 09:20:05 +01001947}
1948
Jens Axboee57690f2016-08-24 15:34:35 -06001949/*
1950 * 'cpu' is going away. splice any existing rq_list entries from this
1951 * software queue to the hw queue dispatch list, and ensure that it
1952 * gets run.
1953 */
Thomas Gleixner9467f852016-09-22 08:05:17 -06001954static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
Jens Axboe484b4062014-05-21 14:01:15 -06001955{
Thomas Gleixner9467f852016-09-22 08:05:17 -06001956 struct blk_mq_hw_ctx *hctx;
Jens Axboe484b4062014-05-21 14:01:15 -06001957 struct blk_mq_ctx *ctx;
1958 LIST_HEAD(tmp);
1959
Thomas Gleixner9467f852016-09-22 08:05:17 -06001960 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
Jens Axboee57690f2016-08-24 15:34:35 -06001961 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
Jens Axboe484b4062014-05-21 14:01:15 -06001962
1963 spin_lock(&ctx->lock);
1964 if (!list_empty(&ctx->rq_list)) {
1965 list_splice_init(&ctx->rq_list, &tmp);
1966 blk_mq_hctx_clear_pending(hctx, ctx);
1967 }
1968 spin_unlock(&ctx->lock);
1969
1970 if (list_empty(&tmp))
Thomas Gleixner9467f852016-09-22 08:05:17 -06001971 return 0;
Jens Axboe484b4062014-05-21 14:01:15 -06001972
Jens Axboee57690f2016-08-24 15:34:35 -06001973 spin_lock(&hctx->lock);
1974 list_splice_tail_init(&tmp, &hctx->dispatch);
1975 spin_unlock(&hctx->lock);
Jens Axboe484b4062014-05-21 14:01:15 -06001976
1977 blk_mq_run_hw_queue(hctx, true);
Thomas Gleixner9467f852016-09-22 08:05:17 -06001978 return 0;
Jens Axboe484b4062014-05-21 14:01:15 -06001979}
1980
Thomas Gleixner9467f852016-09-22 08:05:17 -06001981static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
Jens Axboe484b4062014-05-21 14:01:15 -06001982{
Thomas Gleixner9467f852016-09-22 08:05:17 -06001983 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1984 &hctx->cpuhp_dead);
Jens Axboe484b4062014-05-21 14:01:15 -06001985}
1986
Ming Leic3b4afc2015-06-04 22:25:04 +08001987/* hctx->ctxs will be freed in queue's release handler */
Ming Lei08e98fc2014-09-25 23:23:38 +08001988static void blk_mq_exit_hctx(struct request_queue *q,
1989 struct blk_mq_tag_set *set,
1990 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1991{
Omar Sandoval9c1051a2017-05-04 08:17:21 -06001992 blk_mq_debugfs_unregister_hctx(hctx);
1993
Ming Lei08e98fc2014-09-25 23:23:38 +08001994 blk_mq_tag_idle(hctx);
1995
Ming Leif70ced02014-09-25 23:23:47 +08001996 if (set->ops->exit_request)
Christoph Hellwigd6296d392017-05-01 10:19:08 -06001997 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
Ming Leif70ced02014-09-25 23:23:47 +08001998
Omar Sandoval93252632017-04-05 12:01:31 -07001999 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2000
Ming Lei08e98fc2014-09-25 23:23:38 +08002001 if (set->ops->exit_hctx)
2002 set->ops->exit_hctx(hctx, hctx_idx);
2003
Bart Van Assche6a83e742016-11-02 10:09:51 -06002004 if (hctx->flags & BLK_MQ_F_BLOCKING)
Bart Van Assche07319672017-06-20 11:15:38 -07002005 cleanup_srcu_struct(hctx->queue_rq_srcu);
Bart Van Assche6a83e742016-11-02 10:09:51 -06002006
Thomas Gleixner9467f852016-09-22 08:05:17 -06002007 blk_mq_remove_cpuhp(hctx);
Ming Leif70ced02014-09-25 23:23:47 +08002008 blk_free_flush_queue(hctx->fq);
Omar Sandoval88459642016-09-17 08:38:44 -06002009 sbitmap_free(&hctx->ctx_map);
Ming Lei08e98fc2014-09-25 23:23:38 +08002010}
2011
Ming Lei624dbe42014-05-27 23:35:13 +08002012static void blk_mq_exit_hw_queues(struct request_queue *q,
2013 struct blk_mq_tag_set *set, int nr_queue)
2014{
2015 struct blk_mq_hw_ctx *hctx;
2016 unsigned int i;
2017
2018 queue_for_each_hw_ctx(q, hctx, i) {
2019 if (i == nr_queue)
2020 break;
Ming Lei08e98fc2014-09-25 23:23:38 +08002021 blk_mq_exit_hctx(q, set, hctx, i);
Ming Lei624dbe42014-05-27 23:35:13 +08002022 }
Ming Lei624dbe42014-05-27 23:35:13 +08002023}
2024
Ming Lei08e98fc2014-09-25 23:23:38 +08002025static int blk_mq_init_hctx(struct request_queue *q,
2026 struct blk_mq_tag_set *set,
2027 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
2028{
2029 int node;
2030
2031 node = hctx->numa_node;
2032 if (node == NUMA_NO_NODE)
2033 node = hctx->numa_node = set->numa_node;
2034
Jens Axboe9f993732017-04-10 09:54:54 -06002035 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
Ming Lei08e98fc2014-09-25 23:23:38 +08002036 spin_lock_init(&hctx->lock);
2037 INIT_LIST_HEAD(&hctx->dispatch);
2038 hctx->queue = q;
Jeff Moyer2404e602015-11-03 10:40:06 -05002039 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
Ming Lei08e98fc2014-09-25 23:23:38 +08002040
Thomas Gleixner9467f852016-09-22 08:05:17 -06002041 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
Ming Lei08e98fc2014-09-25 23:23:38 +08002042
2043 hctx->tags = set->tags[hctx_idx];
2044
2045 /*
2046 * Allocate space for all possible cpus to avoid allocation at
2047 * runtime
2048 */
2049 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
2050 GFP_KERNEL, node);
2051 if (!hctx->ctxs)
2052 goto unregister_cpu_notifier;
2053
Omar Sandoval88459642016-09-17 08:38:44 -06002054 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
2055 node))
Ming Lei08e98fc2014-09-25 23:23:38 +08002056 goto free_ctxs;
2057
2058 hctx->nr_ctx = 0;
2059
Jens Axboeeb619fd2017-11-09 08:32:43 -07002060 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2061 INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2062
Ming Lei08e98fc2014-09-25 23:23:38 +08002063 if (set->ops->init_hctx &&
2064 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2065 goto free_bitmap;
2066
Omar Sandoval93252632017-04-05 12:01:31 -07002067 if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
2068 goto exit_hctx;
2069
Ming Leif70ced02014-09-25 23:23:47 +08002070 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2071 if (!hctx->fq)
Omar Sandoval93252632017-04-05 12:01:31 -07002072 goto sched_exit_hctx;
Ming Leif70ced02014-09-25 23:23:47 +08002073
2074 if (set->ops->init_request &&
Christoph Hellwigd6296d392017-05-01 10:19:08 -06002075 set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx,
2076 node))
Ming Leif70ced02014-09-25 23:23:47 +08002077 goto free_fq;
2078
Bart Van Assche6a83e742016-11-02 10:09:51 -06002079 if (hctx->flags & BLK_MQ_F_BLOCKING)
Bart Van Assche07319672017-06-20 11:15:38 -07002080 init_srcu_struct(hctx->queue_rq_srcu);
Bart Van Assche6a83e742016-11-02 10:09:51 -06002081
Omar Sandoval9c1051a2017-05-04 08:17:21 -06002082 blk_mq_debugfs_register_hctx(q, hctx);
2083
Ming Lei08e98fc2014-09-25 23:23:38 +08002084 return 0;
2085
Ming Leif70ced02014-09-25 23:23:47 +08002086 free_fq:
2087 kfree(hctx->fq);
Omar Sandoval93252632017-04-05 12:01:31 -07002088 sched_exit_hctx:
2089 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
Ming Leif70ced02014-09-25 23:23:47 +08002090 exit_hctx:
2091 if (set->ops->exit_hctx)
2092 set->ops->exit_hctx(hctx, hctx_idx);
Ming Lei08e98fc2014-09-25 23:23:38 +08002093 free_bitmap:
Omar Sandoval88459642016-09-17 08:38:44 -06002094 sbitmap_free(&hctx->ctx_map);
Ming Lei08e98fc2014-09-25 23:23:38 +08002095 free_ctxs:
2096 kfree(hctx->ctxs);
2097 unregister_cpu_notifier:
Thomas Gleixner9467f852016-09-22 08:05:17 -06002098 blk_mq_remove_cpuhp(hctx);
Ming Lei08e98fc2014-09-25 23:23:38 +08002099 return -1;
2100}
2101
Jens Axboe320ae512013-10-24 09:20:05 +01002102static void blk_mq_init_cpu_queues(struct request_queue *q,
2103 unsigned int nr_hw_queues)
2104{
2105 unsigned int i;
2106
2107 for_each_possible_cpu(i) {
2108 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2109 struct blk_mq_hw_ctx *hctx;
2110
Jens Axboe320ae512013-10-24 09:20:05 +01002111 __ctx->cpu = i;
2112 spin_lock_init(&__ctx->lock);
2113 INIT_LIST_HEAD(&__ctx->rq_list);
2114 __ctx->queue = q;
2115
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002116 /* If the cpu isn't present, the cpu is mapped to first hctx */
2117 if (!cpu_present(i))
Jens Axboe320ae512013-10-24 09:20:05 +01002118 continue;
2119
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02002120 hctx = blk_mq_map_queue(q, i);
Jens Axboee4043dc2014-04-09 10:18:23 -06002121
Jens Axboe320ae512013-10-24 09:20:05 +01002122 /*
2123 * Set local node, IFF we have more than one hw queue. If
2124 * not, we remain on the home node of the device
2125 */
2126 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
Raghavendra K Tbffed452015-12-02 16:59:05 +05302127 hctx->numa_node = local_memory_node(cpu_to_node(i));
Jens Axboe320ae512013-10-24 09:20:05 +01002128 }
2129}
2130
Jens Axboecc71a6f2017-01-11 14:29:56 -07002131static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2132{
2133 int ret = 0;
2134
2135 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2136 set->queue_depth, set->reserved_tags);
2137 if (!set->tags[hctx_idx])
2138 return false;
2139
2140 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2141 set->queue_depth);
2142 if (!ret)
2143 return true;
2144
2145 blk_mq_free_rq_map(set->tags[hctx_idx]);
2146 set->tags[hctx_idx] = NULL;
2147 return false;
2148}
2149
2150static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2151 unsigned int hctx_idx)
2152{
Jens Axboebd166ef2017-01-17 06:03:22 -07002153 if (set->tags[hctx_idx]) {
2154 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2155 blk_mq_free_rq_map(set->tags[hctx_idx]);
2156 set->tags[hctx_idx] = NULL;
2157 }
Jens Axboecc71a6f2017-01-11 14:29:56 -07002158}
2159
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002160static void blk_mq_map_swqueue(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +01002161{
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002162 unsigned int i, hctx_idx;
Jens Axboe320ae512013-10-24 09:20:05 +01002163 struct blk_mq_hw_ctx *hctx;
2164 struct blk_mq_ctx *ctx;
Ming Lei2a34c082015-04-21 10:00:20 +08002165 struct blk_mq_tag_set *set = q->tag_set;
Jens Axboe320ae512013-10-24 09:20:05 +01002166
Akinobu Mita60de0742015-09-27 02:09:25 +09002167 /*
2168 * Avoid others reading imcomplete hctx->cpumask through sysfs
2169 */
2170 mutex_lock(&q->sysfs_lock);
2171
Jens Axboe320ae512013-10-24 09:20:05 +01002172 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboee4043dc2014-04-09 10:18:23 -06002173 cpumask_clear(hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01002174 hctx->nr_ctx = 0;
2175 }
2176
2177 /*
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002178 * Map software to hardware queues.
2179 *
2180 * If the cpu isn't present, the cpu is mapped to first hctx.
Jens Axboe320ae512013-10-24 09:20:05 +01002181 */
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002182 for_each_present_cpu(i) {
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002183 hctx_idx = q->mq_map[i];
2184 /* unmapped hw queue can be remapped after CPU topo changed */
Jens Axboecc71a6f2017-01-11 14:29:56 -07002185 if (!set->tags[hctx_idx] &&
2186 !__blk_mq_alloc_rq_map(set, hctx_idx)) {
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002187 /*
2188 * If tags initialization fail for some hctx,
2189 * that hctx won't be brought online. In this
2190 * case, remap the current ctx to hctx[0] which
2191 * is guaranteed to always have tags allocated
2192 */
Jens Axboecc71a6f2017-01-11 14:29:56 -07002193 q->mq_map[i] = 0;
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002194 }
2195
Thomas Gleixner897bb0c2016-03-19 11:30:33 +01002196 ctx = per_cpu_ptr(q->queue_ctx, i);
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02002197 hctx = blk_mq_map_queue(q, i);
Keith Busch868f2f02015-12-17 17:08:14 -07002198
Jens Axboee4043dc2014-04-09 10:18:23 -06002199 cpumask_set_cpu(i, hctx->cpumask);
Jens Axboe320ae512013-10-24 09:20:05 +01002200 ctx->index_hw = hctx->nr_ctx;
2201 hctx->ctxs[hctx->nr_ctx++] = ctx;
2202 }
Jens Axboe506e9312014-05-07 10:26:44 -06002203
Akinobu Mita60de0742015-09-27 02:09:25 +09002204 mutex_unlock(&q->sysfs_lock);
2205
Jens Axboe506e9312014-05-07 10:26:44 -06002206 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe484b4062014-05-21 14:01:15 -06002207 /*
Jens Axboea68aafa2014-08-15 13:19:15 -06002208 * If no software queues are mapped to this hardware queue,
2209 * disable it and free the request entries.
Jens Axboe484b4062014-05-21 14:01:15 -06002210 */
2211 if (!hctx->nr_ctx) {
Gabriel Krisman Bertazid1b1cea2016-12-14 18:48:36 -02002212 /* Never unmap queue 0. We need it as a
2213 * fallback in case of a new remap fails
2214 * allocation
2215 */
Jens Axboecc71a6f2017-01-11 14:29:56 -07002216 if (i && set->tags[i])
2217 blk_mq_free_map_and_requests(set, i);
2218
Ming Lei2a34c082015-04-21 10:00:20 +08002219 hctx->tags = NULL;
Jens Axboe484b4062014-05-21 14:01:15 -06002220 continue;
2221 }
2222
Ming Lei2a34c082015-04-21 10:00:20 +08002223 hctx->tags = set->tags[i];
2224 WARN_ON(!hctx->tags);
2225
Jens Axboe484b4062014-05-21 14:01:15 -06002226 /*
Chong Yuan889fa312015-04-15 11:39:29 -06002227 * Set the map size to the number of mapped software queues.
2228 * This is more accurate and more efficient than looping
2229 * over all possibly mapped software queues.
2230 */
Omar Sandoval88459642016-09-17 08:38:44 -06002231 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
Chong Yuan889fa312015-04-15 11:39:29 -06002232
2233 /*
Jens Axboe484b4062014-05-21 14:01:15 -06002234 * Initialize batch roundrobin counts
2235 */
Jens Axboe506e9312014-05-07 10:26:44 -06002236 hctx->next_cpu = cpumask_first(hctx->cpumask);
2237 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2238 }
Jens Axboe320ae512013-10-24 09:20:05 +01002239}
2240
Jens Axboe8e8320c2017-06-20 17:56:13 -06002241/*
2242 * Caller needs to ensure that we're either frozen/quiesced, or that
2243 * the queue isn't live yet.
2244 */
Jeff Moyer2404e602015-11-03 10:40:06 -05002245static void queue_set_hctx_shared(struct request_queue *q, bool shared)
Jens Axboe0d2602c2014-05-13 15:10:52 -06002246{
2247 struct blk_mq_hw_ctx *hctx;
Jens Axboe0d2602c2014-05-13 15:10:52 -06002248 int i;
2249
Jeff Moyer2404e602015-11-03 10:40:06 -05002250 queue_for_each_hw_ctx(q, hctx, i) {
Jens Axboe8e8320c2017-06-20 17:56:13 -06002251 if (shared) {
2252 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2253 atomic_inc(&q->shared_hctx_restart);
Jeff Moyer2404e602015-11-03 10:40:06 -05002254 hctx->flags |= BLK_MQ_F_TAG_SHARED;
Jens Axboe8e8320c2017-06-20 17:56:13 -06002255 } else {
2256 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2257 atomic_dec(&q->shared_hctx_restart);
Jeff Moyer2404e602015-11-03 10:40:06 -05002258 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
Jens Axboe8e8320c2017-06-20 17:56:13 -06002259 }
Jeff Moyer2404e602015-11-03 10:40:06 -05002260 }
2261}
2262
Jens Axboe8e8320c2017-06-20 17:56:13 -06002263static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2264 bool shared)
Jeff Moyer2404e602015-11-03 10:40:06 -05002265{
2266 struct request_queue *q;
Jens Axboe0d2602c2014-05-13 15:10:52 -06002267
Bart Van Assche705cda92017-04-07 11:16:49 -07002268 lockdep_assert_held(&set->tag_list_lock);
2269
Jens Axboe0d2602c2014-05-13 15:10:52 -06002270 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2271 blk_mq_freeze_queue(q);
Jeff Moyer2404e602015-11-03 10:40:06 -05002272 queue_set_hctx_shared(q, shared);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002273 blk_mq_unfreeze_queue(q);
2274 }
2275}
2276
2277static void blk_mq_del_queue_tag_set(struct request_queue *q)
2278{
2279 struct blk_mq_tag_set *set = q->tag_set;
2280
Jens Axboe0d2602c2014-05-13 15:10:52 -06002281 mutex_lock(&set->tag_list_lock);
Bart Van Assche705cda92017-04-07 11:16:49 -07002282 list_del_rcu(&q->tag_set_list);
2283 INIT_LIST_HEAD(&q->tag_set_list);
Jeff Moyer2404e602015-11-03 10:40:06 -05002284 if (list_is_singular(&set->tag_list)) {
2285 /* just transitioned to unshared */
2286 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2287 /* update existing queue */
2288 blk_mq_update_tag_set_depth(set, false);
2289 }
Jens Axboe0d2602c2014-05-13 15:10:52 -06002290 mutex_unlock(&set->tag_list_lock);
Bart Van Assche705cda92017-04-07 11:16:49 -07002291
2292 synchronize_rcu();
Jens Axboe0d2602c2014-05-13 15:10:52 -06002293}
2294
2295static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2296 struct request_queue *q)
2297{
2298 q->tag_set = set;
2299
2300 mutex_lock(&set->tag_list_lock);
Jeff Moyer2404e602015-11-03 10:40:06 -05002301
2302 /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2303 if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2304 set->flags |= BLK_MQ_F_TAG_SHARED;
2305 /* update existing queue */
2306 blk_mq_update_tag_set_depth(set, true);
2307 }
2308 if (set->flags & BLK_MQ_F_TAG_SHARED)
2309 queue_set_hctx_shared(q, true);
Bart Van Assche705cda92017-04-07 11:16:49 -07002310 list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
Jeff Moyer2404e602015-11-03 10:40:06 -05002311
Jens Axboe0d2602c2014-05-13 15:10:52 -06002312 mutex_unlock(&set->tag_list_lock);
2313}
2314
Ming Leie09aae7e2015-01-29 20:17:27 +08002315/*
2316 * It is the actual release handler for mq, but we do it from
2317 * request queue's release handler for avoiding use-after-free
2318 * and headache because q->mq_kobj shouldn't have been introduced,
2319 * but we can't group ctx/kctx kobj without it.
2320 */
2321void blk_mq_release(struct request_queue *q)
2322{
2323 struct blk_mq_hw_ctx *hctx;
2324 unsigned int i;
2325
2326 /* hctx kobj stays in hctx */
Ming Leic3b4afc2015-06-04 22:25:04 +08002327 queue_for_each_hw_ctx(q, hctx, i) {
2328 if (!hctx)
2329 continue;
Ming Lei6c8b2322017-02-22 18:14:01 +08002330 kobject_put(&hctx->kobj);
Ming Leic3b4afc2015-06-04 22:25:04 +08002331 }
Ming Leie09aae7e2015-01-29 20:17:27 +08002332
Akinobu Mitaa723bab2015-09-27 02:09:21 +09002333 q->mq_map = NULL;
2334
Ming Leie09aae7e2015-01-29 20:17:27 +08002335 kfree(q->queue_hw_ctx);
2336
Ming Lei7ea5fe32017-02-22 18:14:00 +08002337 /*
2338 * release .mq_kobj and sw queue's kobject now because
2339 * both share lifetime with request queue.
2340 */
2341 blk_mq_sysfs_deinit(q);
2342
Ming Leie09aae7e2015-01-29 20:17:27 +08002343 free_percpu(q->queue_ctx);
2344}
2345
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002346struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
Jens Axboe320ae512013-10-24 09:20:05 +01002347{
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002348 struct request_queue *uninit_q, *q;
2349
2350 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2351 if (!uninit_q)
2352 return ERR_PTR(-ENOMEM);
2353
2354 q = blk_mq_init_allocated_queue(set, uninit_q);
2355 if (IS_ERR(q))
2356 blk_cleanup_queue(uninit_q);
2357
2358 return q;
2359}
2360EXPORT_SYMBOL(blk_mq_init_queue);
2361
Bart Van Assche07319672017-06-20 11:15:38 -07002362static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2363{
2364 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2365
2366 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu),
2367 __alignof__(struct blk_mq_hw_ctx)) !=
2368 sizeof(struct blk_mq_hw_ctx));
2369
2370 if (tag_set->flags & BLK_MQ_F_BLOCKING)
2371 hw_ctx_size += sizeof(struct srcu_struct);
2372
2373 return hw_ctx_size;
2374}
2375
Keith Busch868f2f02015-12-17 17:08:14 -07002376static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2377 struct request_queue *q)
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002378{
Keith Busch868f2f02015-12-17 17:08:14 -07002379 int i, j;
2380 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
Jens Axboe320ae512013-10-24 09:20:05 +01002381
Keith Busch868f2f02015-12-17 17:08:14 -07002382 blk_mq_sysfs_unregister(q);
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002383 for (i = 0; i < set->nr_hw_queues; i++) {
Keith Busch868f2f02015-12-17 17:08:14 -07002384 int node;
Jens Axboef14bbe72014-05-27 12:06:53 -06002385
Keith Busch868f2f02015-12-17 17:08:14 -07002386 if (hctxs[i])
2387 continue;
2388
2389 node = blk_mq_hw_queue_to_node(q->mq_map, i);
Bart Van Assche07319672017-06-20 11:15:38 -07002390 hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
Christoph Hellwigcdef54d2014-05-28 18:11:06 +02002391 GFP_KERNEL, node);
Jens Axboe320ae512013-10-24 09:20:05 +01002392 if (!hctxs[i])
Keith Busch868f2f02015-12-17 17:08:14 -07002393 break;
Jens Axboe320ae512013-10-24 09:20:05 +01002394
Jens Axboea86073e2014-10-13 15:41:54 -06002395 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
Keith Busch868f2f02015-12-17 17:08:14 -07002396 node)) {
2397 kfree(hctxs[i]);
2398 hctxs[i] = NULL;
2399 break;
2400 }
Jens Axboee4043dc2014-04-09 10:18:23 -06002401
Jens Axboe0d2602c2014-05-13 15:10:52 -06002402 atomic_set(&hctxs[i]->nr_active, 0);
Jens Axboef14bbe72014-05-27 12:06:53 -06002403 hctxs[i]->numa_node = node;
Jens Axboe320ae512013-10-24 09:20:05 +01002404 hctxs[i]->queue_num = i;
Keith Busch868f2f02015-12-17 17:08:14 -07002405
2406 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2407 free_cpumask_var(hctxs[i]->cpumask);
2408 kfree(hctxs[i]);
2409 hctxs[i] = NULL;
2410 break;
2411 }
2412 blk_mq_hctx_kobj_init(hctxs[i]);
Jens Axboe320ae512013-10-24 09:20:05 +01002413 }
Keith Busch868f2f02015-12-17 17:08:14 -07002414 for (j = i; j < q->nr_hw_queues; j++) {
2415 struct blk_mq_hw_ctx *hctx = hctxs[j];
2416
2417 if (hctx) {
Jens Axboecc71a6f2017-01-11 14:29:56 -07002418 if (hctx->tags)
2419 blk_mq_free_map_and_requests(set, j);
Keith Busch868f2f02015-12-17 17:08:14 -07002420 blk_mq_exit_hctx(q, set, hctx, j);
Keith Busch868f2f02015-12-17 17:08:14 -07002421 kobject_put(&hctx->kobj);
Keith Busch868f2f02015-12-17 17:08:14 -07002422 hctxs[j] = NULL;
2423
2424 }
2425 }
2426 q->nr_hw_queues = i;
2427 blk_mq_sysfs_register(q);
2428}
2429
2430struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2431 struct request_queue *q)
2432{
Ming Lei66841672016-02-12 15:27:00 +08002433 /* mark the queue as mq asap */
2434 q->mq_ops = set->ops;
2435
Omar Sandoval34dbad52017-03-21 08:56:08 -07002436 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
Stephen Bates720b8cc2017-04-07 06:24:03 -06002437 blk_mq_poll_stats_bkt,
2438 BLK_MQ_POLL_STATS_BKTS, q);
Omar Sandoval34dbad52017-03-21 08:56:08 -07002439 if (!q->poll_cb)
2440 goto err_exit;
2441
Keith Busch868f2f02015-12-17 17:08:14 -07002442 q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2443 if (!q->queue_ctx)
Ming Linc7de5722016-05-25 23:23:27 -07002444 goto err_exit;
Keith Busch868f2f02015-12-17 17:08:14 -07002445
Ming Lei737f98c2017-02-22 18:13:59 +08002446 /* init q->mq_kobj and sw queues' kobjects */
2447 blk_mq_sysfs_init(q);
2448
Keith Busch868f2f02015-12-17 17:08:14 -07002449 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2450 GFP_KERNEL, set->numa_node);
2451 if (!q->queue_hw_ctx)
2452 goto err_percpu;
2453
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002454 q->mq_map = set->mq_map;
Keith Busch868f2f02015-12-17 17:08:14 -07002455
2456 blk_mq_realloc_hw_ctxs(set, q);
2457 if (!q->nr_hw_queues)
2458 goto err_hctxs;
Jens Axboe320ae512013-10-24 09:20:05 +01002459
Christoph Hellwig287922e2015-10-30 20:57:30 +08002460 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
Ming Leie56f6982015-07-16 19:53:22 +08002461 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
Jens Axboe320ae512013-10-24 09:20:05 +01002462
2463 q->nr_queues = nr_cpu_ids;
Jens Axboe320ae512013-10-24 09:20:05 +01002464
Jens Axboe94eddfb2013-11-19 09:25:07 -07002465 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
Jens Axboe320ae512013-10-24 09:20:05 +01002466
Jens Axboe05f1dd52014-05-29 09:53:32 -06002467 if (!(set->flags & BLK_MQ_F_SG_MERGE))
2468 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2469
Christoph Hellwig1be036e2014-02-07 10:22:39 -08002470 q->sg_reserved_size = INT_MAX;
2471
Mike Snitzer28494502016-09-14 13:28:30 -04002472 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
Christoph Hellwig6fca6a62014-05-28 08:08:02 -06002473 INIT_LIST_HEAD(&q->requeue_list);
2474 spin_lock_init(&q->requeue_lock);
2475
Christoph Hellwig254d2592017-03-22 15:01:50 -04002476 blk_queue_make_request(q, blk_mq_make_request);
Christoph Hellwigea435e12017-11-02 21:29:54 +03002477 if (q->mq_ops->poll)
2478 q->poll_fn = blk_mq_poll;
Jens Axboe07068d52014-05-22 10:40:51 -06002479
Jens Axboeeba71762014-05-20 15:17:27 -06002480 /*
2481 * Do this after blk_queue_make_request() overrides it...
2482 */
2483 q->nr_requests = set->queue_depth;
2484
Jens Axboe64f1c212016-11-14 13:03:03 -07002485 /*
2486 * Default to classic polling
2487 */
2488 q->poll_nsec = -1;
2489
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002490 if (set->ops->complete)
2491 blk_queue_softirq_done(q, set->ops->complete);
Christoph Hellwig30a91cb2014-02-10 03:24:38 -08002492
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002493 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
Jens Axboe0d2602c2014-05-13 15:10:52 -06002494 blk_mq_add_queue_tag_set(set, q);
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002495 blk_mq_map_swqueue(q);
Akinobu Mita4593fdb2015-09-27 02:09:20 +09002496
Jens Axboed3484992017-01-13 14:43:58 -07002497 if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2498 int ret;
2499
2500 ret = blk_mq_sched_init(q);
2501 if (ret)
2502 return ERR_PTR(ret);
2503 }
2504
Jens Axboe320ae512013-10-24 09:20:05 +01002505 return q;
Christoph Hellwig18741982014-02-10 09:29:00 -07002506
Jens Axboe320ae512013-10-24 09:20:05 +01002507err_hctxs:
Keith Busch868f2f02015-12-17 17:08:14 -07002508 kfree(q->queue_hw_ctx);
Jens Axboe320ae512013-10-24 09:20:05 +01002509err_percpu:
Keith Busch868f2f02015-12-17 17:08:14 -07002510 free_percpu(q->queue_ctx);
Ming Linc7de5722016-05-25 23:23:27 -07002511err_exit:
2512 q->mq_ops = NULL;
Jens Axboe320ae512013-10-24 09:20:05 +01002513 return ERR_PTR(-ENOMEM);
2514}
Mike Snitzerb62c21b2015-03-12 23:56:02 -04002515EXPORT_SYMBOL(blk_mq_init_allocated_queue);
Jens Axboe320ae512013-10-24 09:20:05 +01002516
2517void blk_mq_free_queue(struct request_queue *q)
2518{
Ming Lei624dbe42014-05-27 23:35:13 +08002519 struct blk_mq_tag_set *set = q->tag_set;
Jens Axboe320ae512013-10-24 09:20:05 +01002520
Jens Axboe0d2602c2014-05-13 15:10:52 -06002521 blk_mq_del_queue_tag_set(q);
Ming Lei624dbe42014-05-27 23:35:13 +08002522 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
Jens Axboe320ae512013-10-24 09:20:05 +01002523}
Jens Axboe320ae512013-10-24 09:20:05 +01002524
2525/* Basically redo blk_mq_init_queue with queue frozen */
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002526static void blk_mq_queue_reinit(struct request_queue *q)
Jens Axboe320ae512013-10-24 09:20:05 +01002527{
Christoph Hellwig4ecd4fe2015-05-07 09:38:13 +02002528 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
Jens Axboe320ae512013-10-24 09:20:05 +01002529
Omar Sandoval9c1051a2017-05-04 08:17:21 -06002530 blk_mq_debugfs_unregister_hctxs(q);
Jens Axboe67aec142014-05-30 08:25:36 -06002531 blk_mq_sysfs_unregister(q);
2532
Jens Axboe320ae512013-10-24 09:20:05 +01002533 /*
2534 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2535 * we should change hctx numa_node according to new topology (this
2536 * involves free and re-allocate memory, worthy doing?)
2537 */
2538
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002539 blk_mq_map_swqueue(q);
Jens Axboe320ae512013-10-24 09:20:05 +01002540
Jens Axboe67aec142014-05-30 08:25:36 -06002541 blk_mq_sysfs_register(q);
Omar Sandoval9c1051a2017-05-04 08:17:21 -06002542 blk_mq_debugfs_register_hctxs(q);
Jens Axboe320ae512013-10-24 09:20:05 +01002543}
2544
Jens Axboea5164402014-09-10 09:02:03 -06002545static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2546{
2547 int i;
2548
Jens Axboecc71a6f2017-01-11 14:29:56 -07002549 for (i = 0; i < set->nr_hw_queues; i++)
2550 if (!__blk_mq_alloc_rq_map(set, i))
Jens Axboea5164402014-09-10 09:02:03 -06002551 goto out_unwind;
Jens Axboea5164402014-09-10 09:02:03 -06002552
2553 return 0;
2554
2555out_unwind:
2556 while (--i >= 0)
Jens Axboecc71a6f2017-01-11 14:29:56 -07002557 blk_mq_free_rq_map(set->tags[i]);
Jens Axboea5164402014-09-10 09:02:03 -06002558
Jens Axboea5164402014-09-10 09:02:03 -06002559 return -ENOMEM;
2560}
2561
2562/*
2563 * Allocate the request maps associated with this tag_set. Note that this
2564 * may reduce the depth asked for, if memory is tight. set->queue_depth
2565 * will be updated to reflect the allocated depth.
2566 */
2567static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2568{
2569 unsigned int depth;
2570 int err;
2571
2572 depth = set->queue_depth;
2573 do {
2574 err = __blk_mq_alloc_rq_maps(set);
2575 if (!err)
2576 break;
2577
2578 set->queue_depth >>= 1;
2579 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2580 err = -ENOMEM;
2581 break;
2582 }
2583 } while (set->queue_depth);
2584
2585 if (!set->queue_depth || err) {
2586 pr_err("blk-mq: failed to allocate request map\n");
2587 return -ENOMEM;
2588 }
2589
2590 if (depth != set->queue_depth)
2591 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2592 depth, set->queue_depth);
2593
2594 return 0;
2595}
2596
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002597static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2598{
2599 if (set->ops->map_queues)
2600 return set->ops->map_queues(set);
2601 else
2602 return blk_mq_map_queues(set);
2603}
2604
Jens Axboea4391c62014-06-05 15:21:56 -06002605/*
2606 * Alloc a tag set to be associated with one or more request queues.
2607 * May fail with EINVAL for various error conditions. May adjust the
2608 * requested depth down, if if it too large. In that case, the set
2609 * value will be stored in set->queue_depth.
2610 */
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002611int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2612{
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002613 int ret;
2614
Bart Van Assche205fb5f2014-10-30 14:45:11 +01002615 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2616
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002617 if (!set->nr_hw_queues)
2618 return -EINVAL;
Jens Axboea4391c62014-06-05 15:21:56 -06002619 if (!set->queue_depth)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002620 return -EINVAL;
2621 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2622 return -EINVAL;
2623
Christoph Hellwig7d7e0f92016-09-14 16:18:54 +02002624 if (!set->ops->queue_rq)
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002625 return -EINVAL;
2626
Ming Leide148292017-10-14 17:22:29 +08002627 if (!set->ops->get_budget ^ !set->ops->put_budget)
2628 return -EINVAL;
2629
Jens Axboea4391c62014-06-05 15:21:56 -06002630 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2631 pr_info("blk-mq: reduced tag depth to %u\n",
2632 BLK_MQ_MAX_DEPTH);
2633 set->queue_depth = BLK_MQ_MAX_DEPTH;
2634 }
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002635
Shaohua Li6637fad2014-11-30 16:00:58 -08002636 /*
2637 * If a crashdump is active, then we are potentially in a very
2638 * memory constrained environment. Limit us to 1 queue and
2639 * 64 tags to prevent using too much memory.
2640 */
2641 if (is_kdump_kernel()) {
2642 set->nr_hw_queues = 1;
2643 set->queue_depth = min(64U, set->queue_depth);
2644 }
Keith Busch868f2f02015-12-17 17:08:14 -07002645 /*
2646 * There is no use for more h/w queues than cpus.
2647 */
2648 if (set->nr_hw_queues > nr_cpu_ids)
2649 set->nr_hw_queues = nr_cpu_ids;
Shaohua Li6637fad2014-11-30 16:00:58 -08002650
Keith Busch868f2f02015-12-17 17:08:14 -07002651 set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002652 GFP_KERNEL, set->numa_node);
2653 if (!set->tags)
Jens Axboea5164402014-09-10 09:02:03 -06002654 return -ENOMEM;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002655
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002656 ret = -ENOMEM;
2657 set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2658 GFP_KERNEL, set->numa_node);
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002659 if (!set->mq_map)
2660 goto out_free_tags;
2661
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002662 ret = blk_mq_update_queue_map(set);
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002663 if (ret)
2664 goto out_free_mq_map;
2665
2666 ret = blk_mq_alloc_rq_maps(set);
2667 if (ret)
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002668 goto out_free_mq_map;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002669
Jens Axboe0d2602c2014-05-13 15:10:52 -06002670 mutex_init(&set->tag_list_lock);
2671 INIT_LIST_HEAD(&set->tag_list);
2672
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002673 return 0;
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002674
2675out_free_mq_map:
2676 kfree(set->mq_map);
2677 set->mq_map = NULL;
2678out_free_tags:
Robert Elliott5676e7b2014-09-02 11:38:44 -05002679 kfree(set->tags);
2680 set->tags = NULL;
Christoph Hellwigda695ba2016-09-14 16:18:55 +02002681 return ret;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002682}
2683EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2684
2685void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2686{
2687 int i;
2688
Jens Axboecc71a6f2017-01-11 14:29:56 -07002689 for (i = 0; i < nr_cpu_ids; i++)
2690 blk_mq_free_map_and_requests(set, i);
Jens Axboe484b4062014-05-21 14:01:15 -06002691
Christoph Hellwigbdd17e72016-09-14 16:18:53 +02002692 kfree(set->mq_map);
2693 set->mq_map = NULL;
2694
Ming Lei981bd182014-04-24 00:07:34 +08002695 kfree(set->tags);
Robert Elliott5676e7b2014-09-02 11:38:44 -05002696 set->tags = NULL;
Christoph Hellwig24d2f902014-04-15 14:14:00 -06002697}
2698EXPORT_SYMBOL(blk_mq_free_tag_set);
2699
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002700int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2701{
2702 struct blk_mq_tag_set *set = q->tag_set;
2703 struct blk_mq_hw_ctx *hctx;
2704 int i, ret;
2705
Jens Axboebd166ef2017-01-17 06:03:22 -07002706 if (!set)
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002707 return -EINVAL;
2708
Jens Axboe70f36b62017-01-19 10:59:07 -07002709 blk_mq_freeze_queue(q);
Jens Axboe70f36b62017-01-19 10:59:07 -07002710
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002711 ret = 0;
2712 queue_for_each_hw_ctx(q, hctx, i) {
Keith Busche9137d42016-02-18 14:56:35 -07002713 if (!hctx->tags)
2714 continue;
Jens Axboebd166ef2017-01-17 06:03:22 -07002715 /*
2716 * If we're using an MQ scheduler, just update the scheduler
2717 * queue depth. This is similar to what the old code would do.
2718 */
Jens Axboe70f36b62017-01-19 10:59:07 -07002719 if (!hctx->sched_tags) {
weiping zhangc2e82a22017-09-22 23:36:28 +08002720 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
Jens Axboe70f36b62017-01-19 10:59:07 -07002721 false);
2722 } else {
2723 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2724 nr, true);
2725 }
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002726 if (ret)
2727 break;
2728 }
2729
2730 if (!ret)
2731 q->nr_requests = nr;
2732
Jens Axboe70f36b62017-01-19 10:59:07 -07002733 blk_mq_unfreeze_queue(q);
Jens Axboe70f36b62017-01-19 10:59:07 -07002734
Jens Axboee3a2b3f2014-05-20 11:49:02 -06002735 return ret;
2736}
2737
Keith Busche4dc2b32017-05-30 14:39:11 -04002738static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2739 int nr_hw_queues)
Keith Busch868f2f02015-12-17 17:08:14 -07002740{
2741 struct request_queue *q;
2742
Bart Van Assche705cda92017-04-07 11:16:49 -07002743 lockdep_assert_held(&set->tag_list_lock);
2744
Keith Busch868f2f02015-12-17 17:08:14 -07002745 if (nr_hw_queues > nr_cpu_ids)
2746 nr_hw_queues = nr_cpu_ids;
2747 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2748 return;
2749
2750 list_for_each_entry(q, &set->tag_list, tag_set_list)
2751 blk_mq_freeze_queue(q);
2752
2753 set->nr_hw_queues = nr_hw_queues;
Omar Sandovalebe8bdd2017-04-07 08:53:11 -06002754 blk_mq_update_queue_map(set);
Keith Busch868f2f02015-12-17 17:08:14 -07002755 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2756 blk_mq_realloc_hw_ctxs(set, q);
Christoph Hellwig4b855ad2017-06-26 12:20:57 +02002757 blk_mq_queue_reinit(q);
Keith Busch868f2f02015-12-17 17:08:14 -07002758 }
2759
2760 list_for_each_entry(q, &set->tag_list, tag_set_list)
2761 blk_mq_unfreeze_queue(q);
2762}
Keith Busche4dc2b32017-05-30 14:39:11 -04002763
2764void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2765{
2766 mutex_lock(&set->tag_list_lock);
2767 __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
2768 mutex_unlock(&set->tag_list_lock);
2769}
Keith Busch868f2f02015-12-17 17:08:14 -07002770EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2771
Omar Sandoval34dbad52017-03-21 08:56:08 -07002772/* Enable polling stats and return whether they were already enabled. */
2773static bool blk_poll_stats_enable(struct request_queue *q)
2774{
2775 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2776 test_and_set_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
2777 return true;
2778 blk_stat_add_callback(q, q->poll_cb);
2779 return false;
2780}
2781
2782static void blk_mq_poll_stats_start(struct request_queue *q)
2783{
2784 /*
2785 * We don't arm the callback if polling stats are not enabled or the
2786 * callback is already active.
2787 */
2788 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2789 blk_stat_is_active(q->poll_cb))
2790 return;
2791
2792 blk_stat_activate_msecs(q->poll_cb, 100);
2793}
2794
2795static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
2796{
2797 struct request_queue *q = cb->data;
Stephen Bates720b8cc2017-04-07 06:24:03 -06002798 int bucket;
Omar Sandoval34dbad52017-03-21 08:56:08 -07002799
Stephen Bates720b8cc2017-04-07 06:24:03 -06002800 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
2801 if (cb->stat[bucket].nr_samples)
2802 q->poll_stat[bucket] = cb->stat[bucket];
2803 }
Omar Sandoval34dbad52017-03-21 08:56:08 -07002804}
2805
Jens Axboe64f1c212016-11-14 13:03:03 -07002806static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2807 struct blk_mq_hw_ctx *hctx,
2808 struct request *rq)
2809{
Jens Axboe64f1c212016-11-14 13:03:03 -07002810 unsigned long ret = 0;
Stephen Bates720b8cc2017-04-07 06:24:03 -06002811 int bucket;
Jens Axboe64f1c212016-11-14 13:03:03 -07002812
2813 /*
2814 * If stats collection isn't on, don't sleep but turn it on for
2815 * future users
2816 */
Omar Sandoval34dbad52017-03-21 08:56:08 -07002817 if (!blk_poll_stats_enable(q))
Jens Axboe64f1c212016-11-14 13:03:03 -07002818 return 0;
2819
2820 /*
Jens Axboe64f1c212016-11-14 13:03:03 -07002821 * As an optimistic guess, use half of the mean service time
2822 * for this type of request. We can (and should) make this smarter.
2823 * For instance, if the completion latencies are tight, we can
2824 * get closer than just half the mean. This is especially
2825 * important on devices where the completion latencies are longer
Stephen Bates720b8cc2017-04-07 06:24:03 -06002826 * than ~10 usec. We do use the stats for the relevant IO size
2827 * if available which does lead to better estimates.
Jens Axboe64f1c212016-11-14 13:03:03 -07002828 */
Stephen Bates720b8cc2017-04-07 06:24:03 -06002829 bucket = blk_mq_poll_stats_bkt(rq);
2830 if (bucket < 0)
2831 return ret;
2832
2833 if (q->poll_stat[bucket].nr_samples)
2834 ret = (q->poll_stat[bucket].mean + 1) / 2;
Jens Axboe64f1c212016-11-14 13:03:03 -07002835
2836 return ret;
2837}
2838
Jens Axboe06426ad2016-11-14 13:01:59 -07002839static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
Jens Axboe64f1c212016-11-14 13:03:03 -07002840 struct blk_mq_hw_ctx *hctx,
Jens Axboe06426ad2016-11-14 13:01:59 -07002841 struct request *rq)
2842{
2843 struct hrtimer_sleeper hs;
2844 enum hrtimer_mode mode;
Jens Axboe64f1c212016-11-14 13:03:03 -07002845 unsigned int nsecs;
Jens Axboe06426ad2016-11-14 13:01:59 -07002846 ktime_t kt;
2847
Jens Axboe64f1c212016-11-14 13:03:03 -07002848 if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2849 return false;
2850
2851 /*
2852 * poll_nsec can be:
2853 *
2854 * -1: don't ever hybrid sleep
2855 * 0: use half of prev avg
2856 * >0: use this specific value
2857 */
2858 if (q->poll_nsec == -1)
2859 return false;
2860 else if (q->poll_nsec > 0)
2861 nsecs = q->poll_nsec;
2862 else
2863 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2864
2865 if (!nsecs)
Jens Axboe06426ad2016-11-14 13:01:59 -07002866 return false;
2867
2868 set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2869
2870 /*
2871 * This will be replaced with the stats tracking code, using
2872 * 'avg_completion_time / 2' as the pre-sleep target.
2873 */
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01002874 kt = nsecs;
Jens Axboe06426ad2016-11-14 13:01:59 -07002875
2876 mode = HRTIMER_MODE_REL;
2877 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2878 hrtimer_set_expires(&hs.timer, kt);
2879
2880 hrtimer_init_sleeper(&hs, current);
2881 do {
2882 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2883 break;
2884 set_current_state(TASK_UNINTERRUPTIBLE);
2885 hrtimer_start_expires(&hs.timer, mode);
2886 if (hs.task)
2887 io_schedule();
2888 hrtimer_cancel(&hs.timer);
2889 mode = HRTIMER_MODE_ABS;
2890 } while (hs.task && !signal_pending(current));
2891
2892 __set_current_state(TASK_RUNNING);
2893 destroy_hrtimer_on_stack(&hs.timer);
2894 return true;
2895}
2896
Jens Axboebbd7bb72016-11-04 09:34:34 -06002897static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2898{
2899 struct request_queue *q = hctx->queue;
2900 long state;
2901
Jens Axboe06426ad2016-11-14 13:01:59 -07002902 /*
2903 * If we sleep, have the caller restart the poll loop to reset
2904 * the state. Like for the other success return cases, the
2905 * caller is responsible for checking if the IO completed. If
2906 * the IO isn't complete, we'll get called again and will go
2907 * straight to the busy poll loop.
2908 */
Jens Axboe64f1c212016-11-14 13:03:03 -07002909 if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
Jens Axboe06426ad2016-11-14 13:01:59 -07002910 return true;
2911
Jens Axboebbd7bb72016-11-04 09:34:34 -06002912 hctx->poll_considered++;
2913
2914 state = current->state;
2915 while (!need_resched()) {
2916 int ret;
2917
2918 hctx->poll_invoked++;
2919
2920 ret = q->mq_ops->poll(hctx, rq->tag);
2921 if (ret > 0) {
2922 hctx->poll_success++;
2923 set_current_state(TASK_RUNNING);
2924 return true;
2925 }
2926
2927 if (signal_pending_state(state, current))
2928 set_current_state(TASK_RUNNING);
2929
2930 if (current->state == TASK_RUNNING)
2931 return true;
2932 if (ret < 0)
2933 break;
2934 cpu_relax();
2935 }
2936
2937 return false;
2938}
2939
Christoph Hellwigea435e12017-11-02 21:29:54 +03002940static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
Jens Axboebbd7bb72016-11-04 09:34:34 -06002941{
2942 struct blk_mq_hw_ctx *hctx;
Jens Axboebbd7bb72016-11-04 09:34:34 -06002943 struct request *rq;
2944
Christoph Hellwigea435e12017-11-02 21:29:54 +03002945 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
Jens Axboebbd7bb72016-11-04 09:34:34 -06002946 return false;
2947
Jens Axboebbd7bb72016-11-04 09:34:34 -06002948 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
Jens Axboebd166ef2017-01-17 06:03:22 -07002949 if (!blk_qc_t_is_internal(cookie))
2950 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
Jens Axboe3a07bb12017-04-20 14:53:28 -06002951 else {
Jens Axboebd166ef2017-01-17 06:03:22 -07002952 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
Jens Axboe3a07bb12017-04-20 14:53:28 -06002953 /*
2954 * With scheduling, if the request has completed, we'll
2955 * get a NULL return here, as we clear the sched tag when
2956 * that happens. The request still remains valid, like always,
2957 * so we should be safe with just the NULL check.
2958 */
2959 if (!rq)
2960 return false;
2961 }
Jens Axboebbd7bb72016-11-04 09:34:34 -06002962
2963 return __blk_mq_poll(hctx, rq);
2964}
Jens Axboebbd7bb72016-11-04 09:34:34 -06002965
Jens Axboe320ae512013-10-24 09:20:05 +01002966static int __init blk_mq_init(void)
2967{
Jens Axboefc134572017-10-04 11:22:24 -06002968 /*
2969 * See comment in block/blk.h rq_atomic_flags enum
2970 */
2971 BUILD_BUG_ON((REQ_ATOM_STARTED / BITS_PER_BYTE) !=
2972 (REQ_ATOM_COMPLETE / BITS_PER_BYTE));
2973
Thomas Gleixner9467f852016-09-22 08:05:17 -06002974 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2975 blk_mq_hctx_notify_dead);
Jens Axboe320ae512013-10-24 09:20:05 +01002976 return 0;
2977}
2978subsys_initcall(blk_mq_init);