blob: 28955b94d2b26f47d7c54217d84c2a8a11af692a [file] [log] [blame]
Mike Snitzer4cc96132016-05-12 16:28:10 -04001/*
2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-core.h"
8#include "dm-rq.h"
9
10#include <linux/elevator.h> /* for rq_end_sector() */
11#include <linux/blk-mq.h>
12
13#define DM_MSG_PREFIX "core-rq"
14
15#define DM_MQ_NR_HW_QUEUES 1
16#define DM_MQ_QUEUE_DEPTH 2048
17static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
18static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
19
20/*
21 * Request-based DM's mempools' reserved IOs set by the user.
22 */
23#define RESERVED_REQUEST_BASED_IOS 256
24static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
25
Bart Van Asscheb23df0d2016-11-18 14:27:42 -080026static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT);
Mike Snitzer4cc96132016-05-12 16:28:10 -040027
28bool dm_use_blk_mq_default(void)
29{
30 return use_blk_mq;
31}
32
33bool dm_use_blk_mq(struct mapped_device *md)
34{
35 return md->use_blk_mq;
36}
37EXPORT_SYMBOL_GPL(dm_use_blk_mq);
38
39unsigned dm_get_reserved_rq_based_ios(void)
40{
41 return __dm_get_module_param(&reserved_rq_based_ios,
42 RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
43}
44EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
45
46static unsigned dm_get_blk_mq_nr_hw_queues(void)
47{
48 return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
49}
50
51static unsigned dm_get_blk_mq_queue_depth(void)
52{
53 return __dm_get_module_param(&dm_mq_queue_depth,
54 DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
55}
56
57int dm_request_based(struct mapped_device *md)
58{
59 return blk_queue_stackable(md->queue);
60}
61
62static void dm_old_start_queue(struct request_queue *q)
63{
64 unsigned long flags;
65
66 spin_lock_irqsave(q->queue_lock, flags);
67 if (blk_queue_stopped(q))
68 blk_start_queue(q);
69 spin_unlock_irqrestore(q->queue_lock, flags);
70}
71
Mike Snitzer9dbeaea2016-09-01 11:59:33 -040072static void dm_mq_start_queue(struct request_queue *q)
73{
Mike Snitzer9dbeaea2016-09-01 11:59:33 -040074 blk_mq_start_stopped_hw_queues(q, true);
75 blk_mq_kick_requeue_list(q);
76}
77
Mike Snitzer4cc96132016-05-12 16:28:10 -040078void dm_start_queue(struct request_queue *q)
79{
80 if (!q->mq_ops)
81 dm_old_start_queue(q);
Mike Snitzer9dbeaea2016-09-01 11:59:33 -040082 else
83 dm_mq_start_queue(q);
Mike Snitzer4cc96132016-05-12 16:28:10 -040084}
85
86static void dm_old_stop_queue(struct request_queue *q)
87{
88 unsigned long flags;
89
90 spin_lock_irqsave(q->queue_lock, flags);
Bart Van Asschec533f242016-08-31 15:17:24 -070091 if (!blk_queue_stopped(q))
92 blk_stop_queue(q);
Mike Snitzer4cc96132016-05-12 16:28:10 -040093 spin_unlock_irqrestore(q->queue_lock, flags);
94}
95
Bart Van Assche2397a152016-08-31 15:18:11 -070096static void dm_mq_stop_queue(struct request_queue *q)
97{
Bart Van Asschef0d33ab2016-10-28 17:22:00 -070098 if (blk_mq_queue_stopped(q))
Mike Snitzer4cc96132016-05-12 16:28:10 -040099 return;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400100
Bart Van Assche7b17c2f2016-10-28 17:22:16 -0700101 blk_mq_quiesce_queue(q);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400102}
103
104void dm_stop_queue(struct request_queue *q)
105{
106 if (!q->mq_ops)
107 dm_old_stop_queue(q);
Bart Van Assche2397a152016-08-31 15:18:11 -0700108 else
109 dm_mq_stop_queue(q);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400110}
111
Mike Snitzer4cc96132016-05-12 16:28:10 -0400112/*
113 * Partial completion handling for request-based dm
114 */
115static void end_clone_bio(struct bio *clone)
116{
117 struct dm_rq_clone_bio_info *info =
118 container_of(clone, struct dm_rq_clone_bio_info, clone);
119 struct dm_rq_target_io *tio = info->tio;
120 struct bio *bio = info->orig;
121 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
122 int error = clone->bi_error;
123
124 bio_put(clone);
125
126 if (tio->error)
127 /*
128 * An error has already been detected on the request.
129 * Once error occurred, just let clone->end_io() handle
130 * the remainder.
131 */
132 return;
133 else if (error) {
134 /*
135 * Don't notice the error to the upper layer yet.
136 * The error handling decision is made by the target driver,
137 * when the request is completed.
138 */
139 tio->error = error;
140 return;
141 }
142
143 /*
144 * I/O for the bio successfully completed.
145 * Notice the data completion to the upper layer.
146 */
147
148 /*
149 * bios are processed from the head of the list.
150 * So the completing bio should always be rq->bio.
151 * If it's not, something wrong is happening.
152 */
153 if (tio->orig->bio != bio)
154 DMERR("bio completion is going in the middle of the request");
155
156 /*
157 * Update the original request.
158 * Do not use blk_end_request() here, because it may complete
159 * the original request before the clone, and break the ordering.
160 */
161 blk_update_request(tio->orig, 0, nr_bytes);
162}
163
164static struct dm_rq_target_io *tio_from_request(struct request *rq)
165{
Christoph Hellwigeb8db832017-01-22 18:32:46 +0100166 return blk_mq_rq_to_pdu(rq);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400167}
168
169static void rq_end_stats(struct mapped_device *md, struct request *orig)
170{
171 if (unlikely(dm_stats_used(&md->stats))) {
172 struct dm_rq_target_io *tio = tio_from_request(orig);
173 tio->duration_jiffies = jiffies - tio->duration_jiffies;
174 dm_stats_account_io(&md->stats, rq_data_dir(orig),
175 blk_rq_pos(orig), tio->n_sectors, true,
176 tio->duration_jiffies, &tio->stats_aux);
177 }
178}
179
180/*
181 * Don't touch any member of the md after calling this function because
182 * the md may be freed in dm_put() at the end of this function.
183 * Or do dm_get() before calling this function and dm_put() later.
184 */
185static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
186{
Bart Van Assched15bb3a2016-11-11 17:05:27 -0800187 struct request_queue *q = md->queue;
188 unsigned long flags;
189
Mike Snitzer4cc96132016-05-12 16:28:10 -0400190 atomic_dec(&md->pending[rw]);
191
192 /* nudge anyone waiting on suspend queue */
193 if (!md_in_flight(md))
194 wake_up(&md->wait);
195
196 /*
197 * Run this off this callpath, as drivers could invoke end_io while
198 * inside their request_fn (and holding the queue lock). Calling
199 * back into ->request_fn() could deadlock attempting to grab the
200 * queue lock again.
201 */
Bart Van Assched15bb3a2016-11-11 17:05:27 -0800202 if (!q->mq_ops && run_queue) {
203 spin_lock_irqsave(q->queue_lock, flags);
204 blk_run_queue_async(q);
205 spin_unlock_irqrestore(q->queue_lock, flags);
206 }
Mike Snitzer4cc96132016-05-12 16:28:10 -0400207
208 /*
209 * dm_put() must be at the end of this function. See the comment above
210 */
211 dm_put(md);
212}
213
Mike Snitzer4cc96132016-05-12 16:28:10 -0400214/*
215 * Complete the clone and the original request.
216 * Must be called without clone's queue lock held,
217 * see end_clone_request() for more details.
218 */
219static void dm_end_request(struct request *clone, int error)
220{
221 int rw = rq_data_dir(clone);
222 struct dm_rq_target_io *tio = clone->end_io_data;
223 struct mapped_device *md = tio->md;
224 struct request *rq = tio->orig;
225
Christoph Hellwigeb8db832017-01-22 18:32:46 +0100226 blk_rq_unprep_clone(clone);
227 tio->ti->type->release_clone_rq(clone);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400228
Mike Snitzer4cc96132016-05-12 16:28:10 -0400229 rq_end_stats(md, rq);
230 if (!rq->q->mq_ops)
231 blk_end_request_all(rq, error);
232 else
233 blk_mq_end_request(rq, error);
234 rq_completed(md, rw, true);
235}
236
Mike Snitzer4cc96132016-05-12 16:28:10 -0400237/*
238 * Requeue the original request of a clone.
239 */
240static void dm_old_requeue_request(struct request *rq)
241{
242 struct request_queue *q = rq->q;
243 unsigned long flags;
244
245 spin_lock_irqsave(q->queue_lock, flags);
246 blk_requeue_request(q, rq);
247 blk_run_queue_async(q);
248 spin_unlock_irqrestore(q->queue_lock, flags);
249}
250
Mike Snitzere0c10752016-09-14 10:36:39 -0400251static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400252{
Bart Van Assche52d7f1b2016-10-28 17:20:32 -0700253 blk_mq_delay_kick_requeue_list(q, msecs);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400254}
255
Mike Snitzere0c10752016-09-14 10:36:39 -0400256void dm_mq_kick_requeue_list(struct mapped_device *md)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400257{
Mike Snitzere0c10752016-09-14 10:36:39 -0400258 __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
259}
260EXPORT_SYMBOL(dm_mq_kick_requeue_list);
261
262static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
263{
Bart Van Assche2b053ac2016-10-28 17:21:41 -0700264 blk_mq_requeue_request(rq, false);
Mike Snitzere0c10752016-09-14 10:36:39 -0400265 __dm_mq_kick_requeue_list(rq->q, msecs);
266}
267
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400268static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400269{
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400270 struct mapped_device *md = tio->md;
271 struct request *rq = tio->orig;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400272 int rw = rq_data_dir(rq);
273
274 rq_end_stats(md, rq);
Christoph Hellwigeb8db832017-01-22 18:32:46 +0100275 if (tio->clone) {
276 blk_rq_unprep_clone(tio->clone);
277 tio->ti->type->release_clone_rq(tio->clone);
278 }
Mike Snitzer4cc96132016-05-12 16:28:10 -0400279
280 if (!rq->q->mq_ops)
281 dm_old_requeue_request(rq);
282 else
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400283 dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400284
285 rq_completed(md, rw, false);
286}
287
288static void dm_done(struct request *clone, int error, bool mapped)
289{
290 int r = error;
291 struct dm_rq_target_io *tio = clone->end_io_data;
292 dm_request_endio_fn rq_end_io = NULL;
293
294 if (tio->ti) {
295 rq_end_io = tio->ti->type->rq_end_io;
296
297 if (mapped && rq_end_io)
298 r = rq_end_io(tio->ti, clone, error, &tio->info);
299 }
300
301 if (unlikely(r == -EREMOTEIO && (req_op(clone) == REQ_OP_WRITE_SAME) &&
302 !clone->q->limits.max_write_same_sectors))
303 disable_write_same(tio->md);
304
305 if (r <= 0)
306 /* The target wants to complete the I/O */
307 dm_end_request(clone, r);
308 else if (r == DM_ENDIO_INCOMPLETE)
309 /* The target will handle the I/O */
310 return;
311 else if (r == DM_ENDIO_REQUEUE)
312 /* The target wants to requeue the I/O */
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400313 dm_requeue_original_request(tio, false);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400314 else {
315 DMWARN("unimplemented target endio return value: %d", r);
316 BUG();
317 }
318}
319
320/*
321 * Request completion handler for request-based dm
322 */
323static void dm_softirq_done(struct request *rq)
324{
325 bool mapped = true;
326 struct dm_rq_target_io *tio = tio_from_request(rq);
327 struct request *clone = tio->clone;
328 int rw;
329
330 if (!clone) {
Jens Axboe61febef2017-02-24 13:19:32 -0700331 struct mapped_device *md = tio->md;
332
333 rq_end_stats(md, rq);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400334 rw = rq_data_dir(rq);
Christoph Hellwigeb8db832017-01-22 18:32:46 +0100335 if (!rq->q->mq_ops)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400336 blk_end_request_all(rq, tio->error);
Christoph Hellwigeb8db832017-01-22 18:32:46 +0100337 else
Mike Snitzer4cc96132016-05-12 16:28:10 -0400338 blk_mq_end_request(rq, tio->error);
Jens Axboe61febef2017-02-24 13:19:32 -0700339 rq_completed(md, rw, false);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400340 return;
341 }
342
Christoph Hellwige8064022016-10-20 15:12:13 +0200343 if (rq->rq_flags & RQF_FAILED)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400344 mapped = false;
345
346 dm_done(clone, tio->error, mapped);
347}
348
349/*
350 * Complete the clone and the original request with the error status
351 * through softirq context.
352 */
353static void dm_complete_request(struct request *rq, int error)
354{
355 struct dm_rq_target_io *tio = tio_from_request(rq);
356
357 tio->error = error;
358 if (!rq->q->mq_ops)
359 blk_complete_request(rq);
360 else
361 blk_mq_complete_request(rq, error);
362}
363
364/*
365 * Complete the not-mapped clone and the original request with the error status
366 * through softirq context.
367 * Target's rq_end_io() function isn't called.
368 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
369 */
370static void dm_kill_unmapped_request(struct request *rq, int error)
371{
Christoph Hellwige8064022016-10-20 15:12:13 +0200372 rq->rq_flags |= RQF_FAILED;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400373 dm_complete_request(rq, error);
374}
375
376/*
377 * Called with the clone's queue lock held (in the case of .request_fn)
378 */
379static void end_clone_request(struct request *clone, int error)
380{
381 struct dm_rq_target_io *tio = clone->end_io_data;
382
Mike Snitzer4cc96132016-05-12 16:28:10 -0400383 /*
384 * Actual request completion is done in a softirq context which doesn't
385 * hold the clone's queue lock. Otherwise, deadlock could occur because:
386 * - another request may be submitted by the upper level driver
387 * of the stacking during the completion
388 * - the submission which requires queue lock may be done
389 * against this clone's queue
390 */
391 dm_complete_request(tio->orig, error);
392}
393
394static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
395{
396 int r;
397
398 if (blk_queue_io_stat(clone->q))
Christoph Hellwige8064022016-10-20 15:12:13 +0200399 clone->rq_flags |= RQF_IO_STAT;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400400
401 clone->start_time = jiffies;
402 r = blk_insert_cloned_request(clone->q, clone);
403 if (r)
404 /* must complete clone in terms of original request */
405 dm_complete_request(rq, r);
406}
407
408static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
409 void *data)
410{
411 struct dm_rq_target_io *tio = data;
412 struct dm_rq_clone_bio_info *info =
413 container_of(bio, struct dm_rq_clone_bio_info, clone);
414
415 info->orig = bio_orig;
416 info->tio = tio;
417 bio->bi_end_io = end_clone_bio;
418
419 return 0;
420}
421
422static int setup_clone(struct request *clone, struct request *rq,
423 struct dm_rq_target_io *tio, gfp_t gfp_mask)
424{
425 int r;
426
427 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask,
428 dm_rq_bio_constructor, tio);
429 if (r)
430 return r;
431
Mike Snitzer4cc96132016-05-12 16:28:10 -0400432 clone->end_io = end_clone_request;
433 clone->end_io_data = tio;
434
435 tio->clone = clone;
436
437 return 0;
438}
439
Mike Snitzer4cc96132016-05-12 16:28:10 -0400440static void map_tio_request(struct kthread_work *work);
441
442static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
443 struct mapped_device *md)
444{
445 tio->md = md;
446 tio->ti = NULL;
447 tio->clone = NULL;
448 tio->orig = rq;
449 tio->error = 0;
450 /*
451 * Avoid initializing info for blk-mq; it passes
452 * target-specific data through info.ptr
453 * (see: dm_mq_init_request)
454 */
455 if (!md->init_tio_pdu)
456 memset(&tio->info, 0, sizeof(tio->info));
457 if (md->kworker_task)
Petr Mladek39891442016-10-11 13:55:20 -0700458 kthread_init_work(&tio->work, map_tio_request);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400459}
460
Mike Snitzer4cc96132016-05-12 16:28:10 -0400461/*
462 * Returns:
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400463 * DM_MAPIO_* : the request has been processed as indicated
464 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
Mike Snitzer4cc96132016-05-12 16:28:10 -0400465 * < 0 : the request was completed due to failure
466 */
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400467static int map_request(struct dm_rq_target_io *tio)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400468{
469 int r;
470 struct dm_target *ti = tio->ti;
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400471 struct mapped_device *md = tio->md;
472 struct request *rq = tio->orig;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400473 struct request *clone = NULL;
474
Christoph Hellwigeb8db832017-01-22 18:32:46 +0100475 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400476 switch (r) {
477 case DM_MAPIO_SUBMITTED:
478 /* The target has taken the I/O to submit by itself later */
479 break;
480 case DM_MAPIO_REMAPPED:
Christoph Hellwigeb8db832017-01-22 18:32:46 +0100481 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
482 /* -ENOMEM */
483 ti->type->release_clone_rq(clone);
484 return DM_MAPIO_REQUEUE;
485 }
486
Mike Snitzer4cc96132016-05-12 16:28:10 -0400487 /* The target has remapped the I/O so dispatch it */
488 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
489 blk_rq_pos(rq));
490 dm_dispatch_clone_request(clone, rq);
491 break;
492 case DM_MAPIO_REQUEUE:
493 /* The target wants to requeue the I/O */
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400494 break;
495 case DM_MAPIO_DELAY_REQUEUE:
496 /* The target wants to requeue the I/O after a delay */
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400497 dm_requeue_original_request(tio, true);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400498 break;
499 default:
500 if (r > 0) {
501 DMWARN("unimplemented target map return value: %d", r);
502 BUG();
503 }
504
505 /* The target wants to complete the I/O */
506 dm_kill_unmapped_request(rq, r);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400507 }
508
Mike Snitzera8ac51e2016-09-09 19:24:57 -0400509 return r;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400510}
511
512static void dm_start_request(struct mapped_device *md, struct request *orig)
513{
514 if (!orig->q->mq_ops)
515 blk_start_request(orig);
516 else
517 blk_mq_start_request(orig);
518 atomic_inc(&md->pending[rq_data_dir(orig)]);
519
520 if (md->seq_rq_merge_deadline_usecs) {
521 md->last_rq_pos = rq_end_sector(orig);
522 md->last_rq_rw = rq_data_dir(orig);
523 md->last_rq_start_time = ktime_get();
524 }
525
526 if (unlikely(dm_stats_used(&md->stats))) {
527 struct dm_rq_target_io *tio = tio_from_request(orig);
528 tio->duration_jiffies = jiffies;
529 tio->n_sectors = blk_rq_sectors(orig);
530 dm_stats_account_io(&md->stats, rq_data_dir(orig),
531 blk_rq_pos(orig), tio->n_sectors, false, 0,
532 &tio->stats_aux);
533 }
534
535 /*
536 * Hold the md reference here for the in-flight I/O.
537 * We can't rely on the reference count by device opener,
538 * because the device may be closed during the request completion
539 * when all bios are completed.
540 * See the comment in rq_completed() too.
541 */
542 dm_get(md);
543}
544
Christoph Hellwigeb8db832017-01-22 18:32:46 +0100545static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
546{
547 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
548
549 /*
550 * Must initialize md member of tio, otherwise it won't
551 * be available in dm_mq_queue_rq.
552 */
553 tio->md = md;
554
555 if (md->init_tio_pdu) {
556 /* target-specific per-io data is immediately after the tio */
557 tio->info.ptr = tio + 1;
558 }
559
560 return 0;
561}
562
563static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
564{
565 return __dm_rq_init_rq(q->rq_alloc_data, rq);
566}
567
Mike Snitzer4cc96132016-05-12 16:28:10 -0400568static void map_tio_request(struct kthread_work *work)
569{
570 struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400571
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400572 if (map_request(tio) == DM_MAPIO_REQUEUE)
573 dm_requeue_original_request(tio, false);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400574}
575
576ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
577{
578 return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
579}
580
581#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
582
583ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
584 const char *buf, size_t count)
585{
586 unsigned deadline;
587
Mike Snitzere83068a2016-05-24 21:16:51 -0400588 if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400589 return count;
590
591 if (kstrtouint(buf, 10, &deadline))
592 return -EINVAL;
593
594 if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
595 deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
596
597 md->seq_rq_merge_deadline_usecs = deadline;
598
599 return count;
600}
601
602static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md)
603{
604 ktime_t kt_deadline;
605
606 if (!md->seq_rq_merge_deadline_usecs)
607 return false;
608
609 kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
610 kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
611
612 return !ktime_after(ktime_get(), kt_deadline);
613}
614
615/*
616 * q->request_fn for old request-based dm.
617 * Called with the queue lock held.
618 */
619static void dm_old_request_fn(struct request_queue *q)
620{
621 struct mapped_device *md = q->queuedata;
622 struct dm_target *ti = md->immutable_target;
623 struct request *rq;
624 struct dm_rq_target_io *tio;
625 sector_t pos = 0;
626
627 if (unlikely(!ti)) {
628 int srcu_idx;
629 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
630
Mike Snitzer4087a1f2017-01-25 16:24:52 +0100631 if (unlikely(!map)) {
632 dm_put_live_table(md, srcu_idx);
633 return;
634 }
Mike Snitzer4cc96132016-05-12 16:28:10 -0400635 ti = dm_table_find_target(map, pos);
636 dm_put_live_table(md, srcu_idx);
637 }
638
639 /*
640 * For suspend, check blk_queue_stopped() and increment
641 * ->pending within a single queue_lock not to increment the
642 * number of in-flight I/Os after the queue is stopped in
643 * dm_suspend().
644 */
645 while (!blk_queue_stopped(q)) {
646 rq = blk_peek_request(q);
647 if (!rq)
648 return;
649
650 /* always use block 0 to find the target for flushes for now */
651 pos = 0;
652 if (req_op(rq) != REQ_OP_FLUSH)
653 pos = blk_rq_pos(rq);
654
655 if ((dm_old_request_peeked_before_merge_deadline(md) &&
Ming Lei4f9c74c2016-11-11 20:05:36 +0800656 md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
Mike Snitzer4cc96132016-05-12 16:28:10 -0400657 md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
658 (ti->type->busy && ti->type->busy(ti))) {
Tahsin Erdoganbd9f55e2016-07-15 06:27:08 -0700659 blk_delay_queue(q, 10);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400660 return;
661 }
662
663 dm_start_request(md, rq);
664
665 tio = tio_from_request(rq);
Christoph Hellwigeb8db832017-01-22 18:32:46 +0100666 init_tio(tio, rq, md);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400667 /* Establish tio->ti before queuing work (map_tio_request) */
668 tio->ti = ti;
Petr Mladek39891442016-10-11 13:55:20 -0700669 kthread_queue_work(&md->kworker, &tio->work);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400670 BUG_ON(!irqs_disabled());
671 }
672}
673
674/*
675 * Fully initialize a .request_fn request-based queue.
676 */
Christoph Hellwigeb8db832017-01-22 18:32:46 +0100677int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400678{
Christoph Hellwigeb8db832017-01-22 18:32:46 +0100679 struct dm_target *immutable_tgt;
680
Mike Snitzer4cc96132016-05-12 16:28:10 -0400681 /* Fully initialize the queue */
Christoph Hellwigeb8db832017-01-22 18:32:46 +0100682 md->queue->cmd_size = sizeof(struct dm_rq_target_io);
683 md->queue->rq_alloc_data = md;
Christoph Hellwig5ea708d2017-01-03 14:52:44 +0300684 md->queue->request_fn = dm_old_request_fn;
Christoph Hellwigeb8db832017-01-22 18:32:46 +0100685 md->queue->init_rq_fn = dm_rq_init_rq;
686
687 immutable_tgt = dm_table_get_immutable_target(t);
688 if (immutable_tgt && immutable_tgt->per_io_data_size) {
689 /* any target-specific per-io data is immediately after the tio */
690 md->queue->cmd_size += immutable_tgt->per_io_data_size;
691 md->init_tio_pdu = true;
692 }
Christoph Hellwig5ea708d2017-01-03 14:52:44 +0300693 if (blk_init_allocated_queue(md->queue) < 0)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400694 return -EINVAL;
695
696 /* disable dm_old_request_fn's merge heuristic by default */
697 md->seq_rq_merge_deadline_usecs = 0;
698
699 dm_init_normal_md_queue(md);
700 blk_queue_softirq_done(md->queue, dm_softirq_done);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400701
702 /* Initialize the request-based DM worker thread */
Petr Mladek39891442016-10-11 13:55:20 -0700703 kthread_init_worker(&md->kworker);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400704 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
705 "kdmwork-%s", dm_device_name(md));
Mike Snitzer937fa622016-10-18 14:02:04 -0400706 if (IS_ERR(md->kworker_task)) {
707 int error = PTR_ERR(md->kworker_task);
708 md->kworker_task = NULL;
709 return error;
710 }
Mike Snitzer4cc96132016-05-12 16:28:10 -0400711
712 elv_register_queue(md->queue);
713
714 return 0;
715}
716
717static int dm_mq_init_request(void *data, struct request *rq,
718 unsigned int hctx_idx, unsigned int request_idx,
719 unsigned int numa_node)
720{
Christoph Hellwigeb8db832017-01-22 18:32:46 +0100721 return __dm_rq_init_rq(data, rq);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400722}
723
724static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
725 const struct blk_mq_queue_data *bd)
726{
727 struct request *rq = bd->rq;
728 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
729 struct mapped_device *md = tio->md;
730 struct dm_target *ti = md->immutable_target;
731
732 if (unlikely(!ti)) {
733 int srcu_idx;
734 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
735
736 ti = dm_table_find_target(map, 0);
737 dm_put_live_table(md, srcu_idx);
738 }
739
740 if (ti->type->busy && ti->type->busy(ti))
741 return BLK_MQ_RQ_QUEUE_BUSY;
742
743 dm_start_request(md, rq);
744
745 /* Init tio using md established in .init_request */
746 init_tio(tio, rq, md);
747
748 /*
749 * Establish tio->ti before calling map_request().
750 */
751 tio->ti = ti;
752
753 /* Direct call is fine since .queue_rq allows allocations */
Mike Snitzerfbc39b42016-09-13 12:16:14 -0400754 if (map_request(tio) == DM_MAPIO_REQUEUE) {
Mike Snitzer4cc96132016-05-12 16:28:10 -0400755 /* Undo dm_start_request() before requeuing */
756 rq_end_stats(md, rq);
757 rq_completed(md, rq_data_dir(rq), false);
758 return BLK_MQ_RQ_QUEUE_BUSY;
759 }
760
761 return BLK_MQ_RQ_QUEUE_OK;
762}
763
764static struct blk_mq_ops dm_mq_ops = {
765 .queue_rq = dm_mq_queue_rq,
Mike Snitzer4cc96132016-05-12 16:28:10 -0400766 .complete = dm_softirq_done,
767 .init_request = dm_mq_init_request,
768};
769
Mike Snitzere83068a2016-05-24 21:16:51 -0400770int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
Mike Snitzer4cc96132016-05-12 16:28:10 -0400771{
772 struct request_queue *q;
Mike Snitzere83068a2016-05-24 21:16:51 -0400773 struct dm_target *immutable_tgt;
Mike Snitzer4cc96132016-05-12 16:28:10 -0400774 int err;
775
Mike Snitzere83068a2016-05-24 21:16:51 -0400776 if (!dm_table_all_blk_mq_devices(t)) {
Mike Snitzer4cc96132016-05-12 16:28:10 -0400777 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
778 return -EINVAL;
779 }
780
781 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
782 if (!md->tag_set)
783 return -ENOMEM;
784
785 md->tag_set->ops = &dm_mq_ops;
786 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
787 md->tag_set->numa_node = md->numa_node_id;
788 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
789 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
790 md->tag_set->driver_data = md;
791
792 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
Mike Snitzere83068a2016-05-24 21:16:51 -0400793 immutable_tgt = dm_table_get_immutable_target(t);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400794 if (immutable_tgt && immutable_tgt->per_io_data_size) {
795 /* any target-specific per-io data is immediately after the tio */
796 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
797 md->init_tio_pdu = true;
798 }
799
800 err = blk_mq_alloc_tag_set(md->tag_set);
801 if (err)
802 goto out_kfree_tag_set;
803
804 q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
805 if (IS_ERR(q)) {
806 err = PTR_ERR(q);
807 goto out_tag_set;
808 }
809 dm_init_md_queue(md);
810
811 /* backfill 'mq' sysfs registration normally done in blk_register_queue */
Matias Bjørlingb21d5b32016-09-16 14:25:06 +0200812 blk_mq_register_dev(disk_to_dev(md->disk), q);
Mike Snitzer4cc96132016-05-12 16:28:10 -0400813
814 return 0;
815
816out_tag_set:
817 blk_mq_free_tag_set(md->tag_set);
818out_kfree_tag_set:
819 kfree(md->tag_set);
820
821 return err;
822}
823
824void dm_mq_cleanup_mapped_device(struct mapped_device *md)
825{
826 if (md->tag_set) {
827 blk_mq_free_tag_set(md->tag_set);
828 kfree(md->tag_set);
829 }
830}
831
832module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
833MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
834
835module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
836MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
837
838module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
839MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
840
841module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
842MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");