blob: 3f12916f2424e4c729174d50aa5cdd1721e50e09 [file] [log] [blame]
Mike Snitzer4cc96132016-05-12 16:28:10 -04001/*
2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-core.h"
8#include "dm-rq.h"
9
10#include <linux/elevator.h> /* for rq_end_sector() */
11#include <linux/blk-mq.h>
12
13#define DM_MSG_PREFIX "core-rq"
14
15#define DM_MQ_NR_HW_QUEUES 1
16#define DM_MQ_QUEUE_DEPTH 2048
17static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
18static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
19
20/*
21 * Request-based DM's mempools' reserved IOs set by the user.
22 */
23#define RESERVED_REQUEST_BASED_IOS 256
24static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
25
Bart Van Asscheb23df0d2016-11-18 14:27:42 -080026static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT);
Mike Snitzer4cc96132016-05-12 16:28:10 -040027
28bool dm_use_blk_mq_default(void)
29{
30 return use_blk_mq;
31}
32
33bool dm_use_blk_mq(struct mapped_device *md)
34{
35 return md->use_blk_mq;
36}
37EXPORT_SYMBOL_GPL(dm_use_blk_mq);
38
39unsigned dm_get_reserved_rq_based_ios(void)
40{
41 return __dm_get_module_param(&reserved_rq_based_ios,
42 RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
43}
44EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
45
46static unsigned dm_get_blk_mq_nr_hw_queues(void)
47{
48 return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
49}
50
51static unsigned dm_get_blk_mq_queue_depth(void)
52{
53 return __dm_get_module_param(&dm_mq_queue_depth,
54 DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
55}
56
57int dm_request_based(struct mapped_device *md)
58{
59 return blk_queue_stackable(md->queue);
60}
61
62static void dm_old_start_queue(struct request_queue *q)
63{
64 unsigned long flags;
65
66 spin_lock_irqsave(q->queue_lock, flags);
67 if (blk_queue_stopped(q))
68 blk_start_queue(q);
69 spin_unlock_irqrestore(q->queue_lock, flags);
70}
71
Mike Snitzer9dbeaea2016-09-01 11:59:33 -040072static void dm_mq_start_queue(struct request_queue *q)
73{
Mike Snitzer9dbeaea2016-09-01 11:59:33 -040074 blk_mq_start_stopped_hw_queues(q, true);
75 blk_mq_kick_requeue_list(q);
76}
77
Mike Snitzer4cc96132016-05-12 16:28:10 -040078void dm_start_queue(struct request_queue *q)
79{
80 if (!q->mq_ops)
81 dm_old_start_queue(q);
Mike Snitzer9dbeaea2016-09-01 11:59:33 -040082 else
83 dm_mq_start_queue(q);
Mike Snitzer4cc96132016-05-12 16:28:10 -040084}
85
86static void dm_old_stop_queue(struct request_queue *q)
87{
88 unsigned long flags;
89
90 spin_lock_irqsave(q->queue_lock, flags);
Bart Van Asschec533f242016-08-31 15:17:24 -070091 if (!blk_queue_stopped(q))
92 blk_stop_queue(q);
Mike Snitzer4cc96132016-05-12 16:28:10 -040093 spin_unlock_irqrestore(q->queue_lock, flags);
94}
95
Bart Van Assche2397a152016-08-31 15:18:11 -070096static void dm_mq_stop_queue(struct request_queue *q)
97{
Bart Van Asschef0d33ab2016-10-28 17:22:00 -070098 if (blk_mq_queue_stopped(q))
Mike Snitzer4cc96132016-05-12 16:28:10 -040099 return;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400100
Bart Van Assche7b17c2f2016-10-28 17:22:16 -0700101 blk_mq_quiesce_queue(q);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400102}
103
104void dm_stop_queue(struct request_queue *q)
105{
106 if (!q->mq_ops)
107 dm_old_stop_queue(q);
Bart Van Assche2397a152016-08-31 15:18:11 -0700108 else
109 dm_mq_stop_queue(q);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400110}
111
112static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
113 gfp_t gfp_mask)
114{
115 return mempool_alloc(md->io_pool, gfp_mask);
116}
117
118static void free_old_rq_tio(struct dm_rq_target_io *tio)
119{
120 mempool_free(tio, tio->md->io_pool);
121}
122
123static struct request *alloc_old_clone_request(struct mapped_device *md,
124 gfp_t gfp_mask)
125{
126 return mempool_alloc(md->rq_pool, gfp_mask);
127}
128
129static void free_old_clone_request(struct mapped_device *md, struct request *rq)
130{
131 mempool_free(rq, md->rq_pool);
132}
133
134/*
135 * Partial completion handling for request-based dm
136 */
137static void end_clone_bio(struct bio *clone)
138{
139 struct dm_rq_clone_bio_info *info =
140 container_of(clone, struct dm_rq_clone_bio_info, clone);
141 struct dm_rq_target_io *tio = info->tio;
142 struct bio *bio = info->orig;
143 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
144 int error = clone->bi_error;
145
146 bio_put(clone);
147
148 if (tio->error)
149 /*
150 * An error has already been detected on the request.
151 * Once error occurred, just let clone->end_io() handle
152 * the remainder.
153 */
154 return;
155 else if (error) {
156 /*
157 * Don't notice the error to the upper layer yet.
158 * The error handling decision is made by the target driver,
159 * when the request is completed.
160 */
161 tio->error = error;
162 return;
163 }
164
165 /*
166 * I/O for the bio successfully completed.
167 * Notice the data completion to the upper layer.
168 */
169
170 /*
171 * bios are processed from the head of the list.
172 * So the completing bio should always be rq->bio.
173 * If it's not, something wrong is happening.
174 */
175 if (tio->orig->bio != bio)
176 DMERR("bio completion is going in the middle of the request");
177
178 /*
179 * Update the original request.
180 * Do not use blk_end_request() here, because it may complete
181 * the original request before the clone, and break the ordering.
182 */
183 blk_update_request(tio->orig, 0, nr_bytes);
184}
185
186static struct dm_rq_target_io *tio_from_request(struct request *rq)
187{
188 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
189}
190
191static void rq_end_stats(struct mapped_device *md, struct request *orig)
192{
193 if (unlikely(dm_stats_used(&md->stats))) {
194 struct dm_rq_target_io *tio = tio_from_request(orig);
195 tio->duration_jiffies = jiffies - tio->duration_jiffies;
196 dm_stats_account_io(&md->stats, rq_data_dir(orig),
197 blk_rq_pos(orig), tio->n_sectors, true,
198 tio->duration_jiffies, &tio->stats_aux);
199 }
200}
201
202/*
203 * Don't touch any member of the md after calling this function because
204 * the md may be freed in dm_put() at the end of this function.
205 * Or do dm_get() before calling this function and dm_put() later.
206 */
207static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
208{
Bart Van Assched15bb3a2016-11-11 17:05:27 -0800209 struct request_queue *q = md->queue;
210 unsigned long flags;
211
Mike Snitzer4cc96132016-05-12 16:28:10 -0400212 atomic_dec(&md->pending[rw]);
213
214 /* nudge anyone waiting on suspend queue */
215 if (!md_in_flight(md))
216 wake_up(&md->wait);
217
218 /*
219 * Run this off this callpath, as drivers could invoke end_io while
220 * inside their request_fn (and holding the queue lock). Calling
221 * back into ->request_fn() could deadlock attempting to grab the
222 * queue lock again.
223 */
Bart Van Assched15bb3a2016-11-11 17:05:27 -0800224 if (!q->mq_ops && run_queue) {
225 spin_lock_irqsave(q->queue_lock, flags);
226 blk_run_queue_async(q);
227 spin_unlock_irqrestore(q->queue_lock, flags);
228 }
Mike Snitzer4cc96132016-05-12 16:28:10 -0400229
230 /*
231 * dm_put() must be at the end of this function. See the comment above
232 */
233 dm_put(md);
234}
235
236static void free_rq_clone(struct request *clone)
237{
238 struct dm_rq_target_io *tio = clone->end_io_data;
239 struct mapped_device *md = tio->md;
240
241 blk_rq_unprep_clone(clone);
242
Mike Snitzere83068a2016-05-24 21:16:51 -0400243 /*
244 * It is possible for a clone_old_rq() allocated clone to
245 * get passed in -- it may not yet have a request_queue.
246 * This is known to occur if the error target replaces
247 * a multipath target that has a request_fn queue stacked
248 * on blk-mq queue(s).
249 */
250 if (clone->q && clone->q->mq_ops)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400251 /* stacked on blk-mq queue(s) */
252 tio->ti->type->release_clone_rq(clone);
253 else if (!md->queue->mq_ops)
254 /* request_fn queue stacked on request_fn queue(s) */
255 free_old_clone_request(md, clone);
256
257 if (!md->queue->mq_ops)
258 free_old_rq_tio(tio);
259}
260
261/*
262 * Complete the clone and the original request.
263 * Must be called without clone's queue lock held,
264 * see end_clone_request() for more details.
265 */
266static void dm_end_request(struct request *clone, int error)
267{
268 int rw = rq_data_dir(clone);
269 struct dm_rq_target_io *tio = clone->end_io_data;
270 struct mapped_device *md = tio->md;
271 struct request *rq = tio->orig;
272
Mike Snitzer4cc96132016-05-12 16:28:10 -0400273 free_rq_clone(clone);
274 rq_end_stats(md, rq);
275 if (!rq->q->mq_ops)
276 blk_end_request_all(rq, error);
277 else
278 blk_mq_end_request(rq, error);
279 rq_completed(md, rw, true);
280}
281
282static void dm_unprep_request(struct request *rq)
283{
284 struct dm_rq_target_io *tio = tio_from_request(rq);
285 struct request *clone = tio->clone;
286
287 if (!rq->q->mq_ops) {
288 rq->special = NULL;
Christoph Hellwige8064022016-10-20 15:12:13 +0200289 rq->rq_flags &= ~RQF_DONTPREP;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400290 }
291
292 if (clone)
293 free_rq_clone(clone);
294 else if (!tio->md->queue->mq_ops)
295 free_old_rq_tio(tio);
296}
297
298/*
299 * Requeue the original request of a clone.
300 */
301static void dm_old_requeue_request(struct request *rq)
302{
303 struct request_queue *q = rq->q;
304 unsigned long flags;
305
306 spin_lock_irqsave(q->queue_lock, flags);
307 blk_requeue_request(q, rq);
308 blk_run_queue_async(q);
309 spin_unlock_irqrestore(q->queue_lock, flags);
310}
311
Mike Snitzere0c10752016-09-14 10:36:39 -0400312static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400313{
Bart Van Assche52d7f1b2016-10-28 17:20:32 -0700314 blk_mq_delay_kick_requeue_list(q, msecs);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400315}
316
Mike Snitzere0c10752016-09-14 10:36:39 -0400317void dm_mq_kick_requeue_list(struct mapped_device *md)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400318{
Mike Snitzere0c10752016-09-14 10:36:39 -0400319 __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
320}
321EXPORT_SYMBOL(dm_mq_kick_requeue_list);
322
323static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
324{
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700325 blk_mq_requeue_request(rq, false);
Mike Snitzere0c10752016-09-14 10:36:39 -0400326 __dm_mq_kick_requeue_list(rq->q, msecs);
327}
328
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400329static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400330{
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400331 struct mapped_device *md = tio->md;
332 struct request *rq = tio->orig;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400333 int rw = rq_data_dir(rq);
334
335 rq_end_stats(md, rq);
336 dm_unprep_request(rq);
337
338 if (!rq->q->mq_ops)
339 dm_old_requeue_request(rq);
340 else
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400341 dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400342
343 rq_completed(md, rw, false);
344}
345
346static void dm_done(struct request *clone, int error, bool mapped)
347{
348 int r = error;
349 struct dm_rq_target_io *tio = clone->end_io_data;
350 dm_request_endio_fn rq_end_io = NULL;
351
352 if (tio->ti) {
353 rq_end_io = tio->ti->type->rq_end_io;
354
355 if (mapped && rq_end_io)
356 r = rq_end_io(tio->ti, clone, error, &tio->info);
357 }
358
359 if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) &&
360 !clone->q->limits.max_write_same_sectors))
361 disable_write_same(tio->md);
362
363 if (r <= 0)
364 /* The target wants to complete the I/O */
365 dm_end_request(clone, r);
366 else if (r == DM_ENDIO_INCOMPLETE)
367 /* The target will handle the I/O */
368 return;
369 else if (r == DM_ENDIO_REQUEUE)
370 /* The target wants to requeue the I/O */
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400371 dm_requeue_original_request(tio, false);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400372 else {
373 DMWARN("unimplemented target endio return value: %d", r);
374 BUG();
375 }
376}
377
378/*
379 * Request completion handler for request-based dm
380 */
381static void dm_softirq_done(struct request *rq)
382{
383 bool mapped = true;
384 struct dm_rq_target_io *tio = tio_from_request(rq);
385 struct request *clone = tio->clone;
386 int rw;
387
388 if (!clone) {
389 rq_end_stats(tio->md, rq);
390 rw = rq_data_dir(rq);
391 if (!rq->q->mq_ops) {
392 blk_end_request_all(rq, tio->error);
393 rq_completed(tio->md, rw, false);
394 free_old_rq_tio(tio);
395 } else {
396 blk_mq_end_request(rq, tio->error);
397 rq_completed(tio->md, rw, false);
398 }
399 return;
400 }
401
Christoph Hellwige8064022016-10-20 15:12:13 +0200402 if (rq->rq_flags & RQF_FAILED)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400403 mapped = false;
404
405 dm_done(clone, tio->error, mapped);
406}
407
408/*
409 * Complete the clone and the original request with the error status
410 * through softirq context.
411 */
412static void dm_complete_request(struct request *rq, int error)
413{
414 struct dm_rq_target_io *tio = tio_from_request(rq);
415
416 tio->error = error;
417 if (!rq->q->mq_ops)
418 blk_complete_request(rq);
419 else
420 blk_mq_complete_request(rq, error);
421}
422
423/*
424 * Complete the not-mapped clone and the original request with the error status
425 * through softirq context.
426 * Target's rq_end_io() function isn't called.
427 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
428 */
429static void dm_kill_unmapped_request(struct request *rq, int error)
430{
Christoph Hellwige8064022016-10-20 15:12:13 +0200431 rq->rq_flags |= RQF_FAILED;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400432 dm_complete_request(rq, error);
433}
434
435/*
436 * Called with the clone's queue lock held (in the case of .request_fn)
437 */
438static void end_clone_request(struct request *clone, int error)
439{
440 struct dm_rq_target_io *tio = clone->end_io_data;
441
442 if (!clone->q->mq_ops) {
443 /*
444 * For just cleaning up the information of the queue in which
445 * the clone was dispatched.
446 * The clone is *NOT* freed actually here because it is alloced
Christoph Hellwige8064022016-10-20 15:12:13 +0200447 * from dm own mempool (RQF_ALLOCED isn't set).
Mike Snitzer4cc96132016-05-12 16:28:10 -0400448 */
449 __blk_put_request(clone->q, clone);
450 }
451
452 /*
453 * Actual request completion is done in a softirq context which doesn't
454 * hold the clone's queue lock. Otherwise, deadlock could occur because:
455 * - another request may be submitted by the upper level driver
456 * of the stacking during the completion
457 * - the submission which requires queue lock may be done
458 * against this clone's queue
459 */
460 dm_complete_request(tio->orig, error);
461}
462
463static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
464{
465 int r;
466
467 if (blk_queue_io_stat(clone->q))
Christoph Hellwige8064022016-10-20 15:12:13 +0200468 clone->rq_flags |= RQF_IO_STAT;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400469
470 clone->start_time = jiffies;
471 r = blk_insert_cloned_request(clone->q, clone);
472 if (r)
473 /* must complete clone in terms of original request */
474 dm_complete_request(rq, r);
475}
476
477static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
478 void *data)
479{
480 struct dm_rq_target_io *tio = data;
481 struct dm_rq_clone_bio_info *info =
482 container_of(bio, struct dm_rq_clone_bio_info, clone);
483
484 info->orig = bio_orig;
485 info->tio = tio;
486 bio->bi_end_io = end_clone_bio;
487
488 return 0;
489}
490
491static int setup_clone(struct request *clone, struct request *rq,
492 struct dm_rq_target_io *tio, gfp_t gfp_mask)
493{
494 int r;
495
496 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
497 dm_rq_bio_constructor, tio);
498 if (r)
499 return r;
500
Mike Snitzer4cc96132016-05-12 16:28:10 -0400501 clone->end_io = end_clone_request;
502 clone->end_io_data = tio;
503
504 tio->clone = clone;
505
506 return 0;
507}
508
509static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
510 struct dm_rq_target_io *tio, gfp_t gfp_mask)
511{
512 /*
513 * Create clone for use with .request_fn request_queue
514 */
515 struct request *clone;
516
517 clone = alloc_old_clone_request(md, gfp_mask);
518 if (!clone)
519 return NULL;
520
521 blk_rq_init(NULL, clone);
522 if (setup_clone(clone, rq, tio, gfp_mask)) {
523 /* -ENOMEM */
524 free_old_clone_request(md, clone);
525 return NULL;
526 }
527
528 return clone;
529}
530
531static void map_tio_request(struct kthread_work *work);
532
533static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
534 struct mapped_device *md)
535{
536 tio->md = md;
537 tio->ti = NULL;
538 tio->clone = NULL;
539 tio->orig = rq;
540 tio->error = 0;
541 /*
542 * Avoid initializing info for blk-mq; it passes
543 * target-specific data through info.ptr
544 * (see: dm_mq_init_request)
545 */
546 if (!md->init_tio_pdu)
547 memset(&tio->info, 0, sizeof(tio->info));
548 if (md->kworker_task)
Petr Mladek39891442016-10-11 13:55:20 -0700549 kthread_init_work(&tio->work, map_tio_request);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400550}
551
552static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
553 struct mapped_device *md,
554 gfp_t gfp_mask)
555{
556 struct dm_rq_target_io *tio;
557 int srcu_idx;
558 struct dm_table *table;
559
560 tio = alloc_old_rq_tio(md, gfp_mask);
561 if (!tio)
562 return NULL;
563
564 init_tio(tio, rq, md);
565
566 table = dm_get_live_table(md, &srcu_idx);
567 /*
568 * Must clone a request if this .request_fn DM device
569 * is stacked on .request_fn device(s).
570 */
Mike Snitzere83068a2016-05-24 21:16:51 -0400571 if (!dm_table_all_blk_mq_devices(table)) {
Mike Snitzer4cc96132016-05-12 16:28:10 -0400572 if (!clone_old_rq(rq, md, tio, gfp_mask)) {
573 dm_put_live_table(md, srcu_idx);
574 free_old_rq_tio(tio);
575 return NULL;
576 }
577 }
578 dm_put_live_table(md, srcu_idx);
579
580 return tio;
581}
582
583/*
584 * Called with the queue lock held.
585 */
586static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
587{
588 struct mapped_device *md = q->queuedata;
589 struct dm_rq_target_io *tio;
590
591 if (unlikely(rq->special)) {
592 DMWARN("Already has something in rq->special.");
593 return BLKPREP_KILL;
594 }
595
596 tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
597 if (!tio)
598 return BLKPREP_DEFER;
599
600 rq->special = tio;
Christoph Hellwige8064022016-10-20 15:12:13 +0200601 rq->rq_flags |= RQF_DONTPREP;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400602
603 return BLKPREP_OK;
604}
605
606/*
607 * Returns:
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400608 * DM_MAPIO_* : the request has been processed as indicated
609 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
Mike Snitzer4cc96132016-05-12 16:28:10 -0400610 * < 0 : the request was completed due to failure
611 */
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400612static int map_request(struct dm_rq_target_io *tio)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400613{
614 int r;
615 struct dm_target *ti = tio->ti;
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400616 struct mapped_device *md = tio->md;
617 struct request *rq = tio->orig;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400618 struct request *clone = NULL;
619
620 if (tio->clone) {
621 clone = tio->clone;
622 r = ti->type->map_rq(ti, clone, &tio->info);
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400623 if (r == DM_MAPIO_DELAY_REQUEUE)
624 return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
Mike Snitzer4cc96132016-05-12 16:28:10 -0400625 } else {
626 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
627 if (r < 0) {
628 /* The target wants to complete the I/O */
629 dm_kill_unmapped_request(rq, r);
630 return r;
631 }
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400632 if (r == DM_MAPIO_REMAPPED &&
633 setup_clone(clone, rq, tio, GFP_ATOMIC)) {
Mike Snitzer4cc96132016-05-12 16:28:10 -0400634 /* -ENOMEM */
635 ti->type->release_clone_rq(clone);
636 return DM_MAPIO_REQUEUE;
637 }
638 }
639
640 switch (r) {
641 case DM_MAPIO_SUBMITTED:
642 /* The target has taken the I/O to submit by itself later */
643 break;
644 case DM_MAPIO_REMAPPED:
645 /* The target has remapped the I/O so dispatch it */
646 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
647 blk_rq_pos(rq));
648 dm_dispatch_clone_request(clone, rq);
649 break;
650 case DM_MAPIO_REQUEUE:
651 /* The target wants to requeue the I/O */
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400652 break;
653 case DM_MAPIO_DELAY_REQUEUE:
654 /* The target wants to requeue the I/O after a delay */
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400655 dm_requeue_original_request(tio, true);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400656 break;
657 default:
658 if (r > 0) {
659 DMWARN("unimplemented target map return value: %d", r);
660 BUG();
661 }
662
663 /* The target wants to complete the I/O */
664 dm_kill_unmapped_request(rq, r);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400665 }
666
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400667 return r;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400668}
669
670static void dm_start_request(struct mapped_device *md, struct request *orig)
671{
672 if (!orig->q->mq_ops)
673 blk_start_request(orig);
674 else
675 blk_mq_start_request(orig);
676 atomic_inc(&md->pending[rq_data_dir(orig)]);
677
678 if (md->seq_rq_merge_deadline_usecs) {
679 md->last_rq_pos = rq_end_sector(orig);
680 md->last_rq_rw = rq_data_dir(orig);
681 md->last_rq_start_time = ktime_get();
682 }
683
684 if (unlikely(dm_stats_used(&md->stats))) {
685 struct dm_rq_target_io *tio = tio_from_request(orig);
686 tio->duration_jiffies = jiffies;
687 tio->n_sectors = blk_rq_sectors(orig);
688 dm_stats_account_io(&md->stats, rq_data_dir(orig),
689 blk_rq_pos(orig), tio->n_sectors, false, 0,
690 &tio->stats_aux);
691 }
692
693 /*
694 * Hold the md reference here for the in-flight I/O.
695 * We can't rely on the reference count by device opener,
696 * because the device may be closed during the request completion
697 * when all bios are completed.
698 * See the comment in rq_completed() too.
699 */
700 dm_get(md);
701}
702
703static void map_tio_request(struct kthread_work *work)
704{
705 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400706
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400707 if (map_request(tio) == DM_MAPIO_REQUEUE)
708 dm_requeue_original_request(tio, false);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400709}
710
711ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
712{
713 return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
714}
715
716#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
717
718ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
719 const char *buf, size_t count)
720{
721 unsigned deadline;
722
Mike Snitzere83068a2016-05-24 21:16:51 -0400723 if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400724 return count;
725
726 if (kstrtouint(buf, 10, &deadline))
727 return -EINVAL;
728
729 if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
730 deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
731
732 md->seq_rq_merge_deadline_usecs = deadline;
733
734 return count;
735}
736
737static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md)
738{
739 ktime_t kt_deadline;
740
741 if (!md->seq_rq_merge_deadline_usecs)
742 return false;
743
744 kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
745 kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
746
747 return !ktime_after(ktime_get(), kt_deadline);
748}
749
750/*
751 * q->request_fn for old request-based dm.
752 * Called with the queue lock held.
753 */
754static void dm_old_request_fn(struct request_queue *q)
755{
756 struct mapped_device *md = q->queuedata;
757 struct dm_target *ti = md->immutable_target;
758 struct request *rq;
759 struct dm_rq_target_io *tio;
760 sector_t pos = 0;
761
762 if (unlikely(!ti)) {
763 int srcu_idx;
764 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
765
766 ti = dm_table_find_target(map, pos);
767 dm_put_live_table(md, srcu_idx);
768 }
769
770 /*
771 * For suspend, check blk_queue_stopped() and increment
772 * ->pending within a single queue_lock not to increment the
773 * number of in-flight I/Os after the queue is stopped in
774 * dm_suspend().
775 */
776 while (!blk_queue_stopped(q)) {
777 rq = blk_peek_request(q);
778 if (!rq)
779 return;
780
781 /* always use block 0 to find the target for flushes for now */
782 pos = 0;
783 if (req_op(rq) != REQ_OP_FLUSH)
784 pos = blk_rq_pos(rq);
785
786 if ((dm_old_request_peeked_before_merge_deadline(md) &&
Ming Lei4f9c74c2016-11-11 20:05:36 +0800787 md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
Mike Snitzer4cc96132016-05-12 16:28:10 -0400788 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
789 (ti->type->busy && ti->type->busy(ti))) {
Tahsin Erdoganbd9f55e2016-07-15 06:27:08 -0700790 blk_delay_queue(q, 10);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400791 return;
792 }
793
794 dm_start_request(md, rq);
795
796 tio = tio_from_request(rq);
797 /* Establish tio->ti before queuing work (map_tio_request) */
798 tio->ti = ti;
Petr Mladek39891442016-10-11 13:55:20 -0700799 kthread_queue_work(&md->kworker, &tio->work);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400800 BUG_ON(!irqs_disabled());
801 }
802}
803
804/*
805 * Fully initialize a .request_fn request-based queue.
806 */
807int dm_old_init_request_queue(struct mapped_device *md)
808{
809 /* Fully initialize the queue */
Christoph Hellwig5ea708d2017-01-03 14:52:44 +0300810 md->queue->request_fn = dm_old_request_fn;
811 if (blk_init_allocated_queue(md->queue) < 0)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400812 return -EINVAL;
813
814 /* disable dm_old_request_fn's merge heuristic by default */
815 md->seq_rq_merge_deadline_usecs = 0;
816
817 dm_init_normal_md_queue(md);
818 blk_queue_softirq_done(md->queue, dm_softirq_done);
819 blk_queue_prep_rq(md->queue, dm_old_prep_fn);
820
821 /* Initialize the request-based DM worker thread */
Petr Mladek39891442016-10-11 13:55:20 -0700822 kthread_init_worker(&md->kworker);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400823 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
824 "kdmwork-%s", dm_device_name(md));
Mike Snitzer937fa622016-10-18 14:02:04 -0400825 if (IS_ERR(md->kworker_task)) {
826 int error = PTR_ERR(md->kworker_task);
827 md->kworker_task = NULL;
828 return error;
829 }
Mike Snitzer4cc96132016-05-12 16:28:10 -0400830
831 elv_register_queue(md->queue);
832
833 return 0;
834}
835
836static int dm_mq_init_request(void *data, struct request *rq,
837 unsigned int hctx_idx, unsigned int request_idx,
838 unsigned int numa_node)
839{
840 struct mapped_device *md = data;
841 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
842
843 /*
844 * Must initialize md member of tio, otherwise it won't
845 * be available in dm_mq_queue_rq.
846 */
847 tio->md = md;
848
849 if (md->init_tio_pdu) {
850 /* target-specific per-io data is immediately after the tio */
851 tio->info.ptr = tio + 1;
852 }
853
854 return 0;
855}
856
857static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
858 const struct blk_mq_queue_data *bd)
859{
860 struct request *rq = bd->rq;
861 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
862 struct mapped_device *md = tio->md;
863 struct dm_target *ti = md->immutable_target;
864
865 if (unlikely(!ti)) {
866 int srcu_idx;
867 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
868
869 ti = dm_table_find_target(map, 0);
870 dm_put_live_table(md, srcu_idx);
871 }
872
873 if (ti->type->busy && ti->type->busy(ti))
874 return BLK_MQ_RQ_QUEUE_BUSY;
875
876 dm_start_request(md, rq);
877
878 /* Init tio using md established in .init_request */
879 init_tio(tio, rq, md);
880
881 /*
882 * Establish tio->ti before calling map_request().
883 */
884 tio->ti = ti;
885
886 /* Direct call is fine since .queue_rq allows allocations */
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400887 if (map_request(tio) == DM_MAPIO_REQUEUE) {
Mike Snitzer4cc96132016-05-12 16:28:10 -0400888 /* Undo dm_start_request() before requeuing */
889 rq_end_stats(md, rq);
890 rq_completed(md, rq_data_dir(rq), false);
891 return BLK_MQ_RQ_QUEUE_BUSY;
892 }
893
894 return BLK_MQ_RQ_QUEUE_OK;
895}
896
897static struct blk_mq_ops dm_mq_ops = {
898 .queue_rq = dm_mq_queue_rq,
Mike Snitzer4cc96132016-05-12 16:28:10 -0400899 .complete = dm_softirq_done,
900 .init_request = dm_mq_init_request,
901};
902
Mike Snitzere83068a2016-05-24 21:16:51 -0400903int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400904{
905 struct request_queue *q;
Mike Snitzere83068a2016-05-24 21:16:51 -0400906 struct dm_target *immutable_tgt;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400907 int err;
908
Mike Snitzere83068a2016-05-24 21:16:51 -0400909 if (!dm_table_all_blk_mq_devices(t)) {
Mike Snitzer4cc96132016-05-12 16:28:10 -0400910 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
911 return -EINVAL;
912 }
913
914 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
915 if (!md->tag_set)
916 return -ENOMEM;
917
918 md->tag_set->ops = &dm_mq_ops;
919 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
920 md->tag_set->numa_node = md->numa_node_id;
921 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
922 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
923 md->tag_set->driver_data = md;
924
925 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
Mike Snitzere83068a2016-05-24 21:16:51 -0400926 immutable_tgt = dm_table_get_immutable_target(t);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400927 if (immutable_tgt && immutable_tgt->per_io_data_size) {
928 /* any target-specific per-io data is immediately after the tio */
929 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
930 md->init_tio_pdu = true;
931 }
932
933 err = blk_mq_alloc_tag_set(md->tag_set);
934 if (err)
935 goto out_kfree_tag_set;
936
937 q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
938 if (IS_ERR(q)) {
939 err = PTR_ERR(q);
940 goto out_tag_set;
941 }
942 dm_init_md_queue(md);
943
944 /* backfill 'mq' sysfs registration normally done in blk_register_queue */
Matias Bjørlingb21d5b32016-09-16 14:25:06 +0200945 blk_mq_register_dev(disk_to_dev(md->disk), q);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400946
947 return 0;
948
949out_tag_set:
950 blk_mq_free_tag_set(md->tag_set);
951out_kfree_tag_set:
952 kfree(md->tag_set);
953
954 return err;
955}
956
957void dm_mq_cleanup_mapped_device(struct mapped_device *md)
958{
959 if (md->tag_set) {
960 blk_mq_free_tag_set(md->tag_set);
961 kfree(md->tag_set);
962 }
963}
964
965module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
966MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
967
968module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
969MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
970
971module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
972MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
973
974module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
975MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");