blob: 09c958b6f038f899c9248b6635ea22869cdedfe8 [file] [log] [blame]
Mike Snitzer4cc96132016-05-12 16:28:10 -04001/*
2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-core.h"
8#include "dm-rq.h"
9
10#include <linux/elevator.h> /* for rq_end_sector() */
11#include <linux/blk-mq.h>
12
13#define DM_MSG_PREFIX "core-rq"
14
15#define DM_MQ_NR_HW_QUEUES 1
16#define DM_MQ_QUEUE_DEPTH 2048
17static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
18static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
19
20/*
21 * Request-based DM's mempools' reserved IOs set by the user.
22 */
23#define RESERVED_REQUEST_BASED_IOS 256
24static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
25
26#ifdef CONFIG_DM_MQ_DEFAULT
27static bool use_blk_mq = true;
28#else
29static bool use_blk_mq = false;
30#endif
31
32bool dm_use_blk_mq_default(void)
33{
34 return use_blk_mq;
35}
36
37bool dm_use_blk_mq(struct mapped_device *md)
38{
39 return md->use_blk_mq;
40}
41EXPORT_SYMBOL_GPL(dm_use_blk_mq);
42
43unsigned dm_get_reserved_rq_based_ios(void)
44{
45 return __dm_get_module_param(&reserved_rq_based_ios,
46 RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
47}
48EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
49
50static unsigned dm_get_blk_mq_nr_hw_queues(void)
51{
52 return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
53}
54
55static unsigned dm_get_blk_mq_queue_depth(void)
56{
57 return __dm_get_module_param(&dm_mq_queue_depth,
58 DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
59}
60
61int dm_request_based(struct mapped_device *md)
62{
63 return blk_queue_stackable(md->queue);
64}
65
66static void dm_old_start_queue(struct request_queue *q)
67{
68 unsigned long flags;
69
70 spin_lock_irqsave(q->queue_lock, flags);
71 if (blk_queue_stopped(q))
72 blk_start_queue(q);
73 spin_unlock_irqrestore(q->queue_lock, flags);
74}
75
Mike Snitzer9dbeaea2016-09-01 11:59:33 -040076static void dm_mq_start_queue(struct request_queue *q)
77{
Mike Snitzer9dbeaea2016-09-01 11:59:33 -040078 blk_mq_start_stopped_hw_queues(q, true);
79 blk_mq_kick_requeue_list(q);
80}
81
Mike Snitzer4cc96132016-05-12 16:28:10 -040082void dm_start_queue(struct request_queue *q)
83{
84 if (!q->mq_ops)
85 dm_old_start_queue(q);
Mike Snitzer9dbeaea2016-09-01 11:59:33 -040086 else
87 dm_mq_start_queue(q);
Mike Snitzer4cc96132016-05-12 16:28:10 -040088}
89
90static void dm_old_stop_queue(struct request_queue *q)
91{
92 unsigned long flags;
93
94 spin_lock_irqsave(q->queue_lock, flags);
Bart Van Asschec533f242016-08-31 15:17:24 -070095 if (!blk_queue_stopped(q))
96 blk_stop_queue(q);
Mike Snitzer4cc96132016-05-12 16:28:10 -040097 spin_unlock_irqrestore(q->queue_lock, flags);
98}
99
Bart Van Assche2397a152016-08-31 15:18:11 -0700100static void dm_mq_stop_queue(struct request_queue *q)
101{
Bart Van Asschef0d33ab2016-10-28 17:22:00 -0700102 if (blk_mq_queue_stopped(q))
Mike Snitzer4cc96132016-05-12 16:28:10 -0400103 return;
Bart Van Assche2397a152016-08-31 15:18:11 -0700104
Bart Van Assche2397a152016-08-31 15:18:11 -0700105 blk_mq_stop_hw_queues(q);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400106}
107
108void dm_stop_queue(struct request_queue *q)
109{
110 if (!q->mq_ops)
111 dm_old_stop_queue(q);
Bart Van Assche2397a152016-08-31 15:18:11 -0700112 else
113 dm_mq_stop_queue(q);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400114}
115
116static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
117 gfp_t gfp_mask)
118{
119 return mempool_alloc(md->io_pool, gfp_mask);
120}
121
122static void free_old_rq_tio(struct dm_rq_target_io *tio)
123{
124 mempool_free(tio, tio->md->io_pool);
125}
126
127static struct request *alloc_old_clone_request(struct mapped_device *md,
128 gfp_t gfp_mask)
129{
130 return mempool_alloc(md->rq_pool, gfp_mask);
131}
132
133static void free_old_clone_request(struct mapped_device *md, struct request *rq)
134{
135 mempool_free(rq, md->rq_pool);
136}
137
138/*
139 * Partial completion handling for request-based dm
140 */
141static void end_clone_bio(struct bio *clone)
142{
143 struct dm_rq_clone_bio_info *info =
144 container_of(clone, struct dm_rq_clone_bio_info, clone);
145 struct dm_rq_target_io *tio = info->tio;
146 struct bio *bio = info->orig;
147 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
148 int error = clone->bi_error;
149
150 bio_put(clone);
151
152 if (tio->error)
153 /*
154 * An error has already been detected on the request.
155 * Once error occurred, just let clone->end_io() handle
156 * the remainder.
157 */
158 return;
159 else if (error) {
160 /*
161 * Don't notice the error to the upper layer yet.
162 * The error handling decision is made by the target driver,
163 * when the request is completed.
164 */
165 tio->error = error;
166 return;
167 }
168
169 /*
170 * I/O for the bio successfully completed.
171 * Notice the data completion to the upper layer.
172 */
173
174 /*
175 * bios are processed from the head of the list.
176 * So the completing bio should always be rq->bio.
177 * If it's not, something wrong is happening.
178 */
179 if (tio->orig->bio != bio)
180 DMERR("bio completion is going in the middle of the request");
181
182 /*
183 * Update the original request.
184 * Do not use blk_end_request() here, because it may complete
185 * the original request before the clone, and break the ordering.
186 */
187 blk_update_request(tio->orig, 0, nr_bytes);
188}
189
190static struct dm_rq_target_io *tio_from_request(struct request *rq)
191{
192 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
193}
194
195static void rq_end_stats(struct mapped_device *md, struct request *orig)
196{
197 if (unlikely(dm_stats_used(&md->stats))) {
198 struct dm_rq_target_io *tio = tio_from_request(orig);
199 tio->duration_jiffies = jiffies - tio->duration_jiffies;
200 dm_stats_account_io(&md->stats, rq_data_dir(orig),
201 blk_rq_pos(orig), tio->n_sectors, true,
202 tio->duration_jiffies, &tio->stats_aux);
203 }
204}
205
206/*
207 * Don't touch any member of the md after calling this function because
208 * the md may be freed in dm_put() at the end of this function.
209 * Or do dm_get() before calling this function and dm_put() later.
210 */
211static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
212{
213 atomic_dec(&md->pending[rw]);
214
215 /* nudge anyone waiting on suspend queue */
216 if (!md_in_flight(md))
217 wake_up(&md->wait);
218
219 /*
220 * Run this off this callpath, as drivers could invoke end_io while
221 * inside their request_fn (and holding the queue lock). Calling
222 * back into ->request_fn() could deadlock attempting to grab the
223 * queue lock again.
224 */
225 if (!md->queue->mq_ops && run_queue)
226 blk_run_queue_async(md->queue);
227
228 /*
229 * dm_put() must be at the end of this function. See the comment above
230 */
231 dm_put(md);
232}
233
234static void free_rq_clone(struct request *clone)
235{
236 struct dm_rq_target_io *tio = clone->end_io_data;
237 struct mapped_device *md = tio->md;
238
239 blk_rq_unprep_clone(clone);
240
Mike Snitzere83068a2016-05-24 21:16:51 -0400241 /*
242 * It is possible for a clone_old_rq() allocated clone to
243 * get passed in -- it may not yet have a request_queue.
244 * This is known to occur if the error target replaces
245 * a multipath target that has a request_fn queue stacked
246 * on blk-mq queue(s).
247 */
248 if (clone->q && clone->q->mq_ops)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400249 /* stacked on blk-mq queue(s) */
250 tio->ti->type->release_clone_rq(clone);
251 else if (!md->queue->mq_ops)
252 /* request_fn queue stacked on request_fn queue(s) */
253 free_old_clone_request(md, clone);
254
255 if (!md->queue->mq_ops)
256 free_old_rq_tio(tio);
257}
258
259/*
260 * Complete the clone and the original request.
261 * Must be called without clone's queue lock held,
262 * see end_clone_request() for more details.
263 */
264static void dm_end_request(struct request *clone, int error)
265{
266 int rw = rq_data_dir(clone);
267 struct dm_rq_target_io *tio = clone->end_io_data;
268 struct mapped_device *md = tio->md;
269 struct request *rq = tio->orig;
270
271 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
272 rq->errors = clone->errors;
273 rq->resid_len = clone->resid_len;
274
275 if (rq->sense)
276 /*
277 * We are using the sense buffer of the original
278 * request.
279 * So setting the length of the sense data is enough.
280 */
281 rq->sense_len = clone->sense_len;
282 }
283
284 free_rq_clone(clone);
285 rq_end_stats(md, rq);
286 if (!rq->q->mq_ops)
287 blk_end_request_all(rq, error);
288 else
289 blk_mq_end_request(rq, error);
290 rq_completed(md, rw, true);
291}
292
293static void dm_unprep_request(struct request *rq)
294{
295 struct dm_rq_target_io *tio = tio_from_request(rq);
296 struct request *clone = tio->clone;
297
298 if (!rq->q->mq_ops) {
299 rq->special = NULL;
Christoph Hellwige8064022016-10-20 15:12:13 +0200300 rq->rq_flags &= ~RQF_DONTPREP;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400301 }
302
303 if (clone)
304 free_rq_clone(clone);
305 else if (!tio->md->queue->mq_ops)
306 free_old_rq_tio(tio);
307}
308
309/*
310 * Requeue the original request of a clone.
311 */
312static void dm_old_requeue_request(struct request *rq)
313{
314 struct request_queue *q = rq->q;
315 unsigned long flags;
316
317 spin_lock_irqsave(q->queue_lock, flags);
318 blk_requeue_request(q, rq);
319 blk_run_queue_async(q);
320 spin_unlock_irqrestore(q->queue_lock, flags);
321}
322
Mike Snitzere0c10752016-09-14 10:36:39 -0400323static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400324{
Bart Van Assche52d7f1b2016-10-28 17:20:32 -0700325 blk_mq_delay_kick_requeue_list(q, msecs);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400326}
327
Mike Snitzere0c10752016-09-14 10:36:39 -0400328void dm_mq_kick_requeue_list(struct mapped_device *md)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400329{
Mike Snitzere0c10752016-09-14 10:36:39 -0400330 __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
331}
332EXPORT_SYMBOL(dm_mq_kick_requeue_list);
333
334static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
335{
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700336 blk_mq_requeue_request(rq, false);
Mike Snitzere0c10752016-09-14 10:36:39 -0400337 __dm_mq_kick_requeue_list(rq->q, msecs);
338}
339
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400340static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400341{
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400342 struct mapped_device *md = tio->md;
343 struct request *rq = tio->orig;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400344 int rw = rq_data_dir(rq);
345
346 rq_end_stats(md, rq);
347 dm_unprep_request(rq);
348
349 if (!rq->q->mq_ops)
350 dm_old_requeue_request(rq);
351 else
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400352 dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400353
354 rq_completed(md, rw, false);
355}
356
357static void dm_done(struct request *clone, int error, bool mapped)
358{
359 int r = error;
360 struct dm_rq_target_io *tio = clone->end_io_data;
361 dm_request_endio_fn rq_end_io = NULL;
362
363 if (tio->ti) {
364 rq_end_io = tio->ti->type->rq_end_io;
365
366 if (mapped && rq_end_io)
367 r = rq_end_io(tio->ti, clone, error, &tio->info);
368 }
369
370 if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) &&
371 !clone->q->limits.max_write_same_sectors))
372 disable_write_same(tio->md);
373
374 if (r <= 0)
375 /* The target wants to complete the I/O */
376 dm_end_request(clone, r);
377 else if (r == DM_ENDIO_INCOMPLETE)
378 /* The target will handle the I/O */
379 return;
380 else if (r == DM_ENDIO_REQUEUE)
381 /* The target wants to requeue the I/O */
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400382 dm_requeue_original_request(tio, false);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400383 else {
384 DMWARN("unimplemented target endio return value: %d", r);
385 BUG();
386 }
387}
388
389/*
390 * Request completion handler for request-based dm
391 */
392static void dm_softirq_done(struct request *rq)
393{
394 bool mapped = true;
395 struct dm_rq_target_io *tio = tio_from_request(rq);
396 struct request *clone = tio->clone;
397 int rw;
398
399 if (!clone) {
400 rq_end_stats(tio->md, rq);
401 rw = rq_data_dir(rq);
402 if (!rq->q->mq_ops) {
403 blk_end_request_all(rq, tio->error);
404 rq_completed(tio->md, rw, false);
405 free_old_rq_tio(tio);
406 } else {
407 blk_mq_end_request(rq, tio->error);
408 rq_completed(tio->md, rw, false);
409 }
410 return;
411 }
412
Christoph Hellwige8064022016-10-20 15:12:13 +0200413 if (rq->rq_flags & RQF_FAILED)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400414 mapped = false;
415
416 dm_done(clone, tio->error, mapped);
417}
418
419/*
420 * Complete the clone and the original request with the error status
421 * through softirq context.
422 */
423static void dm_complete_request(struct request *rq, int error)
424{
425 struct dm_rq_target_io *tio = tio_from_request(rq);
426
427 tio->error = error;
428 if (!rq->q->mq_ops)
429 blk_complete_request(rq);
430 else
431 blk_mq_complete_request(rq, error);
432}
433
434/*
435 * Complete the not-mapped clone and the original request with the error status
436 * through softirq context.
437 * Target's rq_end_io() function isn't called.
438 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
439 */
440static void dm_kill_unmapped_request(struct request *rq, int error)
441{
Christoph Hellwige8064022016-10-20 15:12:13 +0200442 rq->rq_flags |= RQF_FAILED;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400443 dm_complete_request(rq, error);
444}
445
446/*
447 * Called with the clone's queue lock held (in the case of .request_fn)
448 */
449static void end_clone_request(struct request *clone, int error)
450{
451 struct dm_rq_target_io *tio = clone->end_io_data;
452
453 if (!clone->q->mq_ops) {
454 /*
455 * For just cleaning up the information of the queue in which
456 * the clone was dispatched.
457 * The clone is *NOT* freed actually here because it is alloced
Christoph Hellwige8064022016-10-20 15:12:13 +0200458 * from dm own mempool (RQF_ALLOCED isn't set).
Mike Snitzer4cc96132016-05-12 16:28:10 -0400459 */
460 __blk_put_request(clone->q, clone);
461 }
462
463 /*
464 * Actual request completion is done in a softirq context which doesn't
465 * hold the clone's queue lock. Otherwise, deadlock could occur because:
466 * - another request may be submitted by the upper level driver
467 * of the stacking during the completion
468 * - the submission which requires queue lock may be done
469 * against this clone's queue
470 */
471 dm_complete_request(tio->orig, error);
472}
473
474static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
475{
476 int r;
477
478 if (blk_queue_io_stat(clone->q))
Christoph Hellwige8064022016-10-20 15:12:13 +0200479 clone->rq_flags |= RQF_IO_STAT;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400480
481 clone->start_time = jiffies;
482 r = blk_insert_cloned_request(clone->q, clone);
483 if (r)
484 /* must complete clone in terms of original request */
485 dm_complete_request(rq, r);
486}
487
488static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
489 void *data)
490{
491 struct dm_rq_target_io *tio = data;
492 struct dm_rq_clone_bio_info *info =
493 container_of(bio, struct dm_rq_clone_bio_info, clone);
494
495 info->orig = bio_orig;
496 info->tio = tio;
497 bio->bi_end_io = end_clone_bio;
498
499 return 0;
500}
501
502static int setup_clone(struct request *clone, struct request *rq,
503 struct dm_rq_target_io *tio, gfp_t gfp_mask)
504{
505 int r;
506
507 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
508 dm_rq_bio_constructor, tio);
509 if (r)
510 return r;
511
512 clone->cmd = rq->cmd;
513 clone->cmd_len = rq->cmd_len;
514 clone->sense = rq->sense;
515 clone->end_io = end_clone_request;
516 clone->end_io_data = tio;
517
518 tio->clone = clone;
519
520 return 0;
521}
522
523static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
524 struct dm_rq_target_io *tio, gfp_t gfp_mask)
525{
526 /*
527 * Create clone for use with .request_fn request_queue
528 */
529 struct request *clone;
530
531 clone = alloc_old_clone_request(md, gfp_mask);
532 if (!clone)
533 return NULL;
534
535 blk_rq_init(NULL, clone);
536 if (setup_clone(clone, rq, tio, gfp_mask)) {
537 /* -ENOMEM */
538 free_old_clone_request(md, clone);
539 return NULL;
540 }
541
542 return clone;
543}
544
545static void map_tio_request(struct kthread_work *work);
546
547static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
548 struct mapped_device *md)
549{
550 tio->md = md;
551 tio->ti = NULL;
552 tio->clone = NULL;
553 tio->orig = rq;
554 tio->error = 0;
555 /*
556 * Avoid initializing info for blk-mq; it passes
557 * target-specific data through info.ptr
558 * (see: dm_mq_init_request)
559 */
560 if (!md->init_tio_pdu)
561 memset(&tio->info, 0, sizeof(tio->info));
562 if (md->kworker_task)
Petr Mladek39891442016-10-11 13:55:20 -0700563 kthread_init_work(&tio->work, map_tio_request);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400564}
565
566static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
567 struct mapped_device *md,
568 gfp_t gfp_mask)
569{
570 struct dm_rq_target_io *tio;
571 int srcu_idx;
572 struct dm_table *table;
573
574 tio = alloc_old_rq_tio(md, gfp_mask);
575 if (!tio)
576 return NULL;
577
578 init_tio(tio, rq, md);
579
580 table = dm_get_live_table(md, &srcu_idx);
581 /*
582 * Must clone a request if this .request_fn DM device
583 * is stacked on .request_fn device(s).
584 */
Mike Snitzere83068a2016-05-24 21:16:51 -0400585 if (!dm_table_all_blk_mq_devices(table)) {
Mike Snitzer4cc96132016-05-12 16:28:10 -0400586 if (!clone_old_rq(rq, md, tio, gfp_mask)) {
587 dm_put_live_table(md, srcu_idx);
588 free_old_rq_tio(tio);
589 return NULL;
590 }
591 }
592 dm_put_live_table(md, srcu_idx);
593
594 return tio;
595}
596
597/*
598 * Called with the queue lock held.
599 */
600static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
601{
602 struct mapped_device *md = q->queuedata;
603 struct dm_rq_target_io *tio;
604
605 if (unlikely(rq->special)) {
606 DMWARN("Already has something in rq->special.");
607 return BLKPREP_KILL;
608 }
609
610 tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
611 if (!tio)
612 return BLKPREP_DEFER;
613
614 rq->special = tio;
Christoph Hellwige8064022016-10-20 15:12:13 +0200615 rq->rq_flags |= RQF_DONTPREP;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400616
617 return BLKPREP_OK;
618}
619
620/*
621 * Returns:
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400622 * DM_MAPIO_* : the request has been processed as indicated
623 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
Mike Snitzer4cc96132016-05-12 16:28:10 -0400624 * < 0 : the request was completed due to failure
625 */
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400626static int map_request(struct dm_rq_target_io *tio)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400627{
628 int r;
629 struct dm_target *ti = tio->ti;
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400630 struct mapped_device *md = tio->md;
631 struct request *rq = tio->orig;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400632 struct request *clone = NULL;
633
634 if (tio->clone) {
635 clone = tio->clone;
636 r = ti->type->map_rq(ti, clone, &tio->info);
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400637 if (r == DM_MAPIO_DELAY_REQUEUE)
638 return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
Mike Snitzer4cc96132016-05-12 16:28:10 -0400639 } else {
640 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
641 if (r < 0) {
642 /* The target wants to complete the I/O */
643 dm_kill_unmapped_request(rq, r);
644 return r;
645 }
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400646 if (r == DM_MAPIO_REMAPPED &&
647 setup_clone(clone, rq, tio, GFP_ATOMIC)) {
Mike Snitzer4cc96132016-05-12 16:28:10 -0400648 /* -ENOMEM */
649 ti->type->release_clone_rq(clone);
650 return DM_MAPIO_REQUEUE;
651 }
652 }
653
654 switch (r) {
655 case DM_MAPIO_SUBMITTED:
656 /* The target has taken the I/O to submit by itself later */
657 break;
658 case DM_MAPIO_REMAPPED:
659 /* The target has remapped the I/O so dispatch it */
660 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
661 blk_rq_pos(rq));
662 dm_dispatch_clone_request(clone, rq);
663 break;
664 case DM_MAPIO_REQUEUE:
665 /* The target wants to requeue the I/O */
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400666 break;
667 case DM_MAPIO_DELAY_REQUEUE:
668 /* The target wants to requeue the I/O after a delay */
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400669 dm_requeue_original_request(tio, true);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400670 break;
671 default:
672 if (r > 0) {
673 DMWARN("unimplemented target map return value: %d", r);
674 BUG();
675 }
676
677 /* The target wants to complete the I/O */
678 dm_kill_unmapped_request(rq, r);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400679 }
680
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400681 return r;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400682}
683
684static void dm_start_request(struct mapped_device *md, struct request *orig)
685{
686 if (!orig->q->mq_ops)
687 blk_start_request(orig);
688 else
689 blk_mq_start_request(orig);
690 atomic_inc(&md->pending[rq_data_dir(orig)]);
691
692 if (md->seq_rq_merge_deadline_usecs) {
693 md->last_rq_pos = rq_end_sector(orig);
694 md->last_rq_rw = rq_data_dir(orig);
695 md->last_rq_start_time = ktime_get();
696 }
697
698 if (unlikely(dm_stats_used(&md->stats))) {
699 struct dm_rq_target_io *tio = tio_from_request(orig);
700 tio->duration_jiffies = jiffies;
701 tio->n_sectors = blk_rq_sectors(orig);
702 dm_stats_account_io(&md->stats, rq_data_dir(orig),
703 blk_rq_pos(orig), tio->n_sectors, false, 0,
704 &tio->stats_aux);
705 }
706
707 /*
708 * Hold the md reference here for the in-flight I/O.
709 * We can't rely on the reference count by device opener,
710 * because the device may be closed during the request completion
711 * when all bios are completed.
712 * See the comment in rq_completed() too.
713 */
714 dm_get(md);
715}
716
717static void map_tio_request(struct kthread_work *work)
718{
719 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400720
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400721 if (map_request(tio) == DM_MAPIO_REQUEUE)
722 dm_requeue_original_request(tio, false);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400723}
724
725ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
726{
727 return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
728}
729
730#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
731
732ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
733 const char *buf, size_t count)
734{
735 unsigned deadline;
736
Mike Snitzere83068a2016-05-24 21:16:51 -0400737 if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400738 return count;
739
740 if (kstrtouint(buf, 10, &deadline))
741 return -EINVAL;
742
743 if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
744 deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
745
746 md->seq_rq_merge_deadline_usecs = deadline;
747
748 return count;
749}
750
751static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md)
752{
753 ktime_t kt_deadline;
754
755 if (!md->seq_rq_merge_deadline_usecs)
756 return false;
757
758 kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
759 kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
760
761 return !ktime_after(ktime_get(), kt_deadline);
762}
763
764/*
765 * q->request_fn for old request-based dm.
766 * Called with the queue lock held.
767 */
768static void dm_old_request_fn(struct request_queue *q)
769{
770 struct mapped_device *md = q->queuedata;
771 struct dm_target *ti = md->immutable_target;
772 struct request *rq;
773 struct dm_rq_target_io *tio;
774 sector_t pos = 0;
775
776 if (unlikely(!ti)) {
777 int srcu_idx;
778 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
779
780 ti = dm_table_find_target(map, pos);
781 dm_put_live_table(md, srcu_idx);
782 }
783
784 /*
785 * For suspend, check blk_queue_stopped() and increment
786 * ->pending within a single queue_lock not to increment the
787 * number of in-flight I/Os after the queue is stopped in
788 * dm_suspend().
789 */
790 while (!blk_queue_stopped(q)) {
791 rq = blk_peek_request(q);
792 if (!rq)
793 return;
794
795 /* always use block 0 to find the target for flushes for now */
796 pos = 0;
797 if (req_op(rq) != REQ_OP_FLUSH)
798 pos = blk_rq_pos(rq);
799
800 if ((dm_old_request_peeked_before_merge_deadline(md) &&
801 md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
802 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
803 (ti->type->busy && ti->type->busy(ti))) {
Tahsin Erdoganbd9f55e2016-07-15 06:27:08 -0700804 blk_delay_queue(q, 10);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400805 return;
806 }
807
808 dm_start_request(md, rq);
809
810 tio = tio_from_request(rq);
811 /* Establish tio->ti before queuing work (map_tio_request) */
812 tio->ti = ti;
Petr Mladek39891442016-10-11 13:55:20 -0700813 kthread_queue_work(&md->kworker, &tio->work);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400814 BUG_ON(!irqs_disabled());
815 }
816}
817
818/*
819 * Fully initialize a .request_fn request-based queue.
820 */
821int dm_old_init_request_queue(struct mapped_device *md)
822{
823 /* Fully initialize the queue */
824 if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL))
825 return -EINVAL;
826
827 /* disable dm_old_request_fn's merge heuristic by default */
828 md->seq_rq_merge_deadline_usecs = 0;
829
830 dm_init_normal_md_queue(md);
831 blk_queue_softirq_done(md->queue, dm_softirq_done);
832 blk_queue_prep_rq(md->queue, dm_old_prep_fn);
833
834 /* Initialize the request-based DM worker thread */
Petr Mladek39891442016-10-11 13:55:20 -0700835 kthread_init_worker(&md->kworker);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400836 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
837 "kdmwork-%s", dm_device_name(md));
Mike Snitzer7193a9d2016-07-06 09:06:37 -0400838 if (IS_ERR(md->kworker_task))
839 return PTR_ERR(md->kworker_task);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400840
841 elv_register_queue(md->queue);
842
843 return 0;
844}
845
846static int dm_mq_init_request(void *data, struct request *rq,
847 unsigned int hctx_idx, unsigned int request_idx,
848 unsigned int numa_node)
849{
850 struct mapped_device *md = data;
851 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
852
853 /*
854 * Must initialize md member of tio, otherwise it won't
855 * be available in dm_mq_queue_rq.
856 */
857 tio->md = md;
858
859 if (md->init_tio_pdu) {
860 /* target-specific per-io data is immediately after the tio */
861 tio->info.ptr = tio + 1;
862 }
863
864 return 0;
865}
866
867static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
868 const struct blk_mq_queue_data *bd)
869{
870 struct request *rq = bd->rq;
871 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
872 struct mapped_device *md = tio->md;
873 struct dm_target *ti = md->immutable_target;
874
875 if (unlikely(!ti)) {
876 int srcu_idx;
877 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
878
879 ti = dm_table_find_target(map, 0);
880 dm_put_live_table(md, srcu_idx);
881 }
882
Mike Snitzer7d9595d2016-08-02 12:51:11 -0400883 /*
884 * On suspend dm_stop_queue() handles stopping the blk-mq
885 * request_queue BUT: even though the hw_queues are marked
886 * BLK_MQ_S_STOPPED at that point there is still a race that
887 * is allowing block/blk-mq.c to call ->queue_rq against a
888 * hctx that it really shouldn't. The following check guards
889 * against this rarity (albeit _not_ race-free).
890 */
891 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
892 return BLK_MQ_RQ_QUEUE_BUSY;
893
Mike Snitzer4cc96132016-05-12 16:28:10 -0400894 if (ti->type->busy && ti->type->busy(ti))
895 return BLK_MQ_RQ_QUEUE_BUSY;
896
897 dm_start_request(md, rq);
898
899 /* Init tio using md established in .init_request */
900 init_tio(tio, rq, md);
901
902 /*
903 * Establish tio->ti before calling map_request().
904 */
905 tio->ti = ti;
906
907 /* Direct call is fine since .queue_rq allows allocations */
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400908 if (map_request(tio) == DM_MAPIO_REQUEUE) {
Mike Snitzer4cc96132016-05-12 16:28:10 -0400909 /* Undo dm_start_request() before requeuing */
910 rq_end_stats(md, rq);
911 rq_completed(md, rq_data_dir(rq), false);
912 return BLK_MQ_RQ_QUEUE_BUSY;
913 }
914
915 return BLK_MQ_RQ_QUEUE_OK;
916}
917
918static struct blk_mq_ops dm_mq_ops = {
919 .queue_rq = dm_mq_queue_rq,
Mike Snitzer4cc96132016-05-12 16:28:10 -0400920 .complete = dm_softirq_done,
921 .init_request = dm_mq_init_request,
922};
923
Mike Snitzere83068a2016-05-24 21:16:51 -0400924int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400925{
926 struct request_queue *q;
Mike Snitzere83068a2016-05-24 21:16:51 -0400927 struct dm_target *immutable_tgt;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400928 int err;
929
Mike Snitzere83068a2016-05-24 21:16:51 -0400930 if (!dm_table_all_blk_mq_devices(t)) {
Mike Snitzer4cc96132016-05-12 16:28:10 -0400931 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
932 return -EINVAL;
933 }
934
935 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
936 if (!md->tag_set)
937 return -ENOMEM;
938
939 md->tag_set->ops = &dm_mq_ops;
940 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
941 md->tag_set->numa_node = md->numa_node_id;
942 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
943 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
944 md->tag_set->driver_data = md;
945
946 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
Mike Snitzere83068a2016-05-24 21:16:51 -0400947 immutable_tgt = dm_table_get_immutable_target(t);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400948 if (immutable_tgt && immutable_tgt->per_io_data_size) {
949 /* any target-specific per-io data is immediately after the tio */
950 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
951 md->init_tio_pdu = true;
952 }
953
954 err = blk_mq_alloc_tag_set(md->tag_set);
955 if (err)
956 goto out_kfree_tag_set;
957
958 q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
959 if (IS_ERR(q)) {
960 err = PTR_ERR(q);
961 goto out_tag_set;
962 }
963 dm_init_md_queue(md);
964
965 /* backfill 'mq' sysfs registration normally done in blk_register_queue */
Matias Bjørlingb21d5b32016-09-16 14:25:06 +0200966 blk_mq_register_dev(disk_to_dev(md->disk), q);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400967
968 return 0;
969
970out_tag_set:
971 blk_mq_free_tag_set(md->tag_set);
972out_kfree_tag_set:
973 kfree(md->tag_set);
974
975 return err;
976}
977
978void dm_mq_cleanup_mapped_device(struct mapped_device *md)
979{
980 if (md->tag_set) {
981 blk_mq_free_tag_set(md->tag_set);
982 kfree(md->tag_set);
983 }
984}
985
986module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
987MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
988
989module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
990MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
991
992module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
993MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
994
995module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
996MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");