Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2016 Red Hat, Inc. All rights reserved. |
| 3 | * |
| 4 | * This file is released under the GPL. |
| 5 | */ |
| 6 | |
| 7 | #include "dm-core.h" |
| 8 | #include "dm-rq.h" |
| 9 | |
| 10 | #include <linux/elevator.h> /* for rq_end_sector() */ |
| 11 | #include <linux/blk-mq.h> |
| 12 | |
| 13 | #define DM_MSG_PREFIX "core-rq" |
| 14 | |
| 15 | #define DM_MQ_NR_HW_QUEUES 1 |
| 16 | #define DM_MQ_QUEUE_DEPTH 2048 |
| 17 | static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES; |
| 18 | static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH; |
| 19 | |
| 20 | /* |
| 21 | * Request-based DM's mempools' reserved IOs set by the user. |
| 22 | */ |
| 23 | #define RESERVED_REQUEST_BASED_IOS 256 |
| 24 | static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; |
| 25 | |
Bart Van Assche | b23df0d | 2016-11-18 14:27:42 -0800 | [diff] [blame] | 26 | static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 27 | |
| 28 | bool dm_use_blk_mq_default(void) |
| 29 | { |
| 30 | return use_blk_mq; |
| 31 | } |
| 32 | |
| 33 | bool dm_use_blk_mq(struct mapped_device *md) |
| 34 | { |
| 35 | return md->use_blk_mq; |
| 36 | } |
| 37 | EXPORT_SYMBOL_GPL(dm_use_blk_mq); |
| 38 | |
| 39 | unsigned dm_get_reserved_rq_based_ios(void) |
| 40 | { |
| 41 | return __dm_get_module_param(&reserved_rq_based_ios, |
| 42 | RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS); |
| 43 | } |
| 44 | EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); |
| 45 | |
| 46 | static unsigned dm_get_blk_mq_nr_hw_queues(void) |
| 47 | { |
| 48 | return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32); |
| 49 | } |
| 50 | |
| 51 | static unsigned dm_get_blk_mq_queue_depth(void) |
| 52 | { |
| 53 | return __dm_get_module_param(&dm_mq_queue_depth, |
| 54 | DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH); |
| 55 | } |
| 56 | |
| 57 | int dm_request_based(struct mapped_device *md) |
| 58 | { |
| 59 | return blk_queue_stackable(md->queue); |
| 60 | } |
| 61 | |
| 62 | static void dm_old_start_queue(struct request_queue *q) |
| 63 | { |
| 64 | unsigned long flags; |
| 65 | |
| 66 | spin_lock_irqsave(q->queue_lock, flags); |
| 67 | if (blk_queue_stopped(q)) |
| 68 | blk_start_queue(q); |
| 69 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 70 | } |
| 71 | |
Mike Snitzer | 9dbeaea | 2016-09-01 11:59:33 -0400 | [diff] [blame] | 72 | static void dm_mq_start_queue(struct request_queue *q) |
| 73 | { |
Mike Snitzer | 9dbeaea | 2016-09-01 11:59:33 -0400 | [diff] [blame] | 74 | blk_mq_start_stopped_hw_queues(q, true); |
| 75 | blk_mq_kick_requeue_list(q); |
| 76 | } |
| 77 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 78 | void dm_start_queue(struct request_queue *q) |
| 79 | { |
| 80 | if (!q->mq_ops) |
| 81 | dm_old_start_queue(q); |
Mike Snitzer | 9dbeaea | 2016-09-01 11:59:33 -0400 | [diff] [blame] | 82 | else |
| 83 | dm_mq_start_queue(q); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 84 | } |
| 85 | |
| 86 | static void dm_old_stop_queue(struct request_queue *q) |
| 87 | { |
| 88 | unsigned long flags; |
| 89 | |
| 90 | spin_lock_irqsave(q->queue_lock, flags); |
Bart Van Assche | c533f24 | 2016-08-31 15:17:24 -0700 | [diff] [blame] | 91 | if (!blk_queue_stopped(q)) |
| 92 | blk_stop_queue(q); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 93 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 94 | } |
| 95 | |
Bart Van Assche | 2397a15 | 2016-08-31 15:18:11 -0700 | [diff] [blame] | 96 | static void dm_mq_stop_queue(struct request_queue *q) |
| 97 | { |
Bart Van Assche | f0d33ab | 2016-10-28 17:22:00 -0700 | [diff] [blame] | 98 | if (blk_mq_queue_stopped(q)) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 99 | return; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 100 | |
Bart Van Assche | 7b17c2f | 2016-10-28 17:22:16 -0700 | [diff] [blame] | 101 | blk_mq_quiesce_queue(q); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | void dm_stop_queue(struct request_queue *q) |
| 105 | { |
| 106 | if (!q->mq_ops) |
| 107 | dm_old_stop_queue(q); |
Bart Van Assche | 2397a15 | 2016-08-31 15:18:11 -0700 | [diff] [blame] | 108 | else |
| 109 | dm_mq_stop_queue(q); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 110 | } |
| 111 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 112 | /* |
| 113 | * Partial completion handling for request-based dm |
| 114 | */ |
| 115 | static void end_clone_bio(struct bio *clone) |
| 116 | { |
| 117 | struct dm_rq_clone_bio_info *info = |
| 118 | container_of(clone, struct dm_rq_clone_bio_info, clone); |
| 119 | struct dm_rq_target_io *tio = info->tio; |
| 120 | struct bio *bio = info->orig; |
| 121 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; |
| 122 | int error = clone->bi_error; |
| 123 | |
| 124 | bio_put(clone); |
| 125 | |
| 126 | if (tio->error) |
| 127 | /* |
| 128 | * An error has already been detected on the request. |
| 129 | * Once error occurred, just let clone->end_io() handle |
| 130 | * the remainder. |
| 131 | */ |
| 132 | return; |
| 133 | else if (error) { |
| 134 | /* |
| 135 | * Don't notice the error to the upper layer yet. |
| 136 | * The error handling decision is made by the target driver, |
| 137 | * when the request is completed. |
| 138 | */ |
| 139 | tio->error = error; |
| 140 | return; |
| 141 | } |
| 142 | |
| 143 | /* |
| 144 | * I/O for the bio successfully completed. |
| 145 | * Notice the data completion to the upper layer. |
| 146 | */ |
| 147 | |
| 148 | /* |
| 149 | * bios are processed from the head of the list. |
| 150 | * So the completing bio should always be rq->bio. |
| 151 | * If it's not, something wrong is happening. |
| 152 | */ |
| 153 | if (tio->orig->bio != bio) |
| 154 | DMERR("bio completion is going in the middle of the request"); |
| 155 | |
| 156 | /* |
| 157 | * Update the original request. |
| 158 | * Do not use blk_end_request() here, because it may complete |
| 159 | * the original request before the clone, and break the ordering. |
| 160 | */ |
| 161 | blk_update_request(tio->orig, 0, nr_bytes); |
| 162 | } |
| 163 | |
| 164 | static struct dm_rq_target_io *tio_from_request(struct request *rq) |
| 165 | { |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 166 | return blk_mq_rq_to_pdu(rq); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 167 | } |
| 168 | |
| 169 | static void rq_end_stats(struct mapped_device *md, struct request *orig) |
| 170 | { |
| 171 | if (unlikely(dm_stats_used(&md->stats))) { |
| 172 | struct dm_rq_target_io *tio = tio_from_request(orig); |
| 173 | tio->duration_jiffies = jiffies - tio->duration_jiffies; |
| 174 | dm_stats_account_io(&md->stats, rq_data_dir(orig), |
| 175 | blk_rq_pos(orig), tio->n_sectors, true, |
| 176 | tio->duration_jiffies, &tio->stats_aux); |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | /* |
| 181 | * Don't touch any member of the md after calling this function because |
| 182 | * the md may be freed in dm_put() at the end of this function. |
| 183 | * Or do dm_get() before calling this function and dm_put() later. |
| 184 | */ |
| 185 | static void rq_completed(struct mapped_device *md, int rw, bool run_queue) |
| 186 | { |
Bart Van Assche | d15bb3a | 2016-11-11 17:05:27 -0800 | [diff] [blame] | 187 | struct request_queue *q = md->queue; |
| 188 | unsigned long flags; |
| 189 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 190 | atomic_dec(&md->pending[rw]); |
| 191 | |
| 192 | /* nudge anyone waiting on suspend queue */ |
| 193 | if (!md_in_flight(md)) |
| 194 | wake_up(&md->wait); |
| 195 | |
| 196 | /* |
| 197 | * Run this off this callpath, as drivers could invoke end_io while |
| 198 | * inside their request_fn (and holding the queue lock). Calling |
| 199 | * back into ->request_fn() could deadlock attempting to grab the |
| 200 | * queue lock again. |
| 201 | */ |
Bart Van Assche | d15bb3a | 2016-11-11 17:05:27 -0800 | [diff] [blame] | 202 | if (!q->mq_ops && run_queue) { |
| 203 | spin_lock_irqsave(q->queue_lock, flags); |
| 204 | blk_run_queue_async(q); |
| 205 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 206 | } |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 207 | |
| 208 | /* |
| 209 | * dm_put() must be at the end of this function. See the comment above |
| 210 | */ |
| 211 | dm_put(md); |
| 212 | } |
| 213 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 214 | /* |
| 215 | * Complete the clone and the original request. |
| 216 | * Must be called without clone's queue lock held, |
| 217 | * see end_clone_request() for more details. |
| 218 | */ |
| 219 | static void dm_end_request(struct request *clone, int error) |
| 220 | { |
| 221 | int rw = rq_data_dir(clone); |
| 222 | struct dm_rq_target_io *tio = clone->end_io_data; |
| 223 | struct mapped_device *md = tio->md; |
| 224 | struct request *rq = tio->orig; |
| 225 | |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 226 | blk_rq_unprep_clone(clone); |
| 227 | tio->ti->type->release_clone_rq(clone); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 228 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 229 | rq_end_stats(md, rq); |
| 230 | if (!rq->q->mq_ops) |
| 231 | blk_end_request_all(rq, error); |
| 232 | else |
| 233 | blk_mq_end_request(rq, error); |
| 234 | rq_completed(md, rw, true); |
| 235 | } |
| 236 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 237 | /* |
| 238 | * Requeue the original request of a clone. |
| 239 | */ |
| 240 | static void dm_old_requeue_request(struct request *rq) |
| 241 | { |
| 242 | struct request_queue *q = rq->q; |
| 243 | unsigned long flags; |
| 244 | |
| 245 | spin_lock_irqsave(q->queue_lock, flags); |
| 246 | blk_requeue_request(q, rq); |
| 247 | blk_run_queue_async(q); |
| 248 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 249 | } |
| 250 | |
Mike Snitzer | e0c1075 | 2016-09-14 10:36:39 -0400 | [diff] [blame] | 251 | static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 252 | { |
Bart Van Assche | 52d7f1b | 2016-10-28 17:20:32 -0700 | [diff] [blame] | 253 | blk_mq_delay_kick_requeue_list(q, msecs); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 254 | } |
| 255 | |
Mike Snitzer | e0c1075 | 2016-09-14 10:36:39 -0400 | [diff] [blame] | 256 | void dm_mq_kick_requeue_list(struct mapped_device *md) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 257 | { |
Mike Snitzer | e0c1075 | 2016-09-14 10:36:39 -0400 | [diff] [blame] | 258 | __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0); |
| 259 | } |
| 260 | EXPORT_SYMBOL(dm_mq_kick_requeue_list); |
| 261 | |
| 262 | static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs) |
| 263 | { |
Bart Van Assche | 2b053ac | 2016-10-28 17:21:41 -0700 | [diff] [blame] | 264 | blk_mq_requeue_request(rq, false); |
Mike Snitzer | e0c1075 | 2016-09-14 10:36:39 -0400 | [diff] [blame] | 265 | __dm_mq_kick_requeue_list(rq->q, msecs); |
| 266 | } |
| 267 | |
Mike Snitzer | fbc39b4 | 2016-09-13 12:16:14 -0400 | [diff] [blame] | 268 | static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 269 | { |
Mike Snitzer | fbc39b4 | 2016-09-13 12:16:14 -0400 | [diff] [blame] | 270 | struct mapped_device *md = tio->md; |
| 271 | struct request *rq = tio->orig; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 272 | int rw = rq_data_dir(rq); |
| 273 | |
| 274 | rq_end_stats(md, rq); |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 275 | if (tio->clone) { |
| 276 | blk_rq_unprep_clone(tio->clone); |
| 277 | tio->ti->type->release_clone_rq(tio->clone); |
| 278 | } |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 279 | |
| 280 | if (!rq->q->mq_ops) |
| 281 | dm_old_requeue_request(rq); |
| 282 | else |
Mike Snitzer | a8ac51e | 2016-09-09 19:24:57 -0400 | [diff] [blame] | 283 | dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 284 | |
| 285 | rq_completed(md, rw, false); |
| 286 | } |
| 287 | |
| 288 | static void dm_done(struct request *clone, int error, bool mapped) |
| 289 | { |
| 290 | int r = error; |
| 291 | struct dm_rq_target_io *tio = clone->end_io_data; |
| 292 | dm_request_endio_fn rq_end_io = NULL; |
| 293 | |
| 294 | if (tio->ti) { |
| 295 | rq_end_io = tio->ti->type->rq_end_io; |
| 296 | |
| 297 | if (mapped && rq_end_io) |
| 298 | r = rq_end_io(tio->ti, clone, error, &tio->info); |
| 299 | } |
| 300 | |
Christoph Hellwig | ac62d62 | 2017-04-05 19:21:05 +0200 | [diff] [blame^] | 301 | if (unlikely(r == -EREMOTEIO)) { |
| 302 | if (req_op(clone) == REQ_OP_WRITE_SAME && |
| 303 | !clone->q->limits.max_write_same_sectors) |
| 304 | disable_write_same(tio->md); |
| 305 | if (req_op(clone) == REQ_OP_WRITE_ZEROES && |
| 306 | !clone->q->limits.max_write_zeroes_sectors) |
| 307 | disable_write_zeroes(tio->md); |
| 308 | } |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 309 | |
| 310 | if (r <= 0) |
| 311 | /* The target wants to complete the I/O */ |
| 312 | dm_end_request(clone, r); |
| 313 | else if (r == DM_ENDIO_INCOMPLETE) |
| 314 | /* The target will handle the I/O */ |
| 315 | return; |
| 316 | else if (r == DM_ENDIO_REQUEUE) |
| 317 | /* The target wants to requeue the I/O */ |
Mike Snitzer | fbc39b4 | 2016-09-13 12:16:14 -0400 | [diff] [blame] | 318 | dm_requeue_original_request(tio, false); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 319 | else { |
| 320 | DMWARN("unimplemented target endio return value: %d", r); |
| 321 | BUG(); |
| 322 | } |
| 323 | } |
| 324 | |
| 325 | /* |
| 326 | * Request completion handler for request-based dm |
| 327 | */ |
| 328 | static void dm_softirq_done(struct request *rq) |
| 329 | { |
| 330 | bool mapped = true; |
| 331 | struct dm_rq_target_io *tio = tio_from_request(rq); |
| 332 | struct request *clone = tio->clone; |
| 333 | int rw; |
| 334 | |
| 335 | if (!clone) { |
Jens Axboe | 61febef | 2017-02-24 13:19:32 -0700 | [diff] [blame] | 336 | struct mapped_device *md = tio->md; |
| 337 | |
| 338 | rq_end_stats(md, rq); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 339 | rw = rq_data_dir(rq); |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 340 | if (!rq->q->mq_ops) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 341 | blk_end_request_all(rq, tio->error); |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 342 | else |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 343 | blk_mq_end_request(rq, tio->error); |
Jens Axboe | 61febef | 2017-02-24 13:19:32 -0700 | [diff] [blame] | 344 | rq_completed(md, rw, false); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 345 | return; |
| 346 | } |
| 347 | |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 348 | if (rq->rq_flags & RQF_FAILED) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 349 | mapped = false; |
| 350 | |
| 351 | dm_done(clone, tio->error, mapped); |
| 352 | } |
| 353 | |
| 354 | /* |
| 355 | * Complete the clone and the original request with the error status |
| 356 | * through softirq context. |
| 357 | */ |
| 358 | static void dm_complete_request(struct request *rq, int error) |
| 359 | { |
| 360 | struct dm_rq_target_io *tio = tio_from_request(rq); |
| 361 | |
| 362 | tio->error = error; |
| 363 | if (!rq->q->mq_ops) |
| 364 | blk_complete_request(rq); |
| 365 | else |
| 366 | blk_mq_complete_request(rq, error); |
| 367 | } |
| 368 | |
| 369 | /* |
| 370 | * Complete the not-mapped clone and the original request with the error status |
| 371 | * through softirq context. |
| 372 | * Target's rq_end_io() function isn't called. |
| 373 | * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. |
| 374 | */ |
| 375 | static void dm_kill_unmapped_request(struct request *rq, int error) |
| 376 | { |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 377 | rq->rq_flags |= RQF_FAILED; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 378 | dm_complete_request(rq, error); |
| 379 | } |
| 380 | |
| 381 | /* |
| 382 | * Called with the clone's queue lock held (in the case of .request_fn) |
| 383 | */ |
| 384 | static void end_clone_request(struct request *clone, int error) |
| 385 | { |
| 386 | struct dm_rq_target_io *tio = clone->end_io_data; |
| 387 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 388 | /* |
| 389 | * Actual request completion is done in a softirq context which doesn't |
| 390 | * hold the clone's queue lock. Otherwise, deadlock could occur because: |
| 391 | * - another request may be submitted by the upper level driver |
| 392 | * of the stacking during the completion |
| 393 | * - the submission which requires queue lock may be done |
| 394 | * against this clone's queue |
| 395 | */ |
| 396 | dm_complete_request(tio->orig, error); |
| 397 | } |
| 398 | |
| 399 | static void dm_dispatch_clone_request(struct request *clone, struct request *rq) |
| 400 | { |
| 401 | int r; |
| 402 | |
| 403 | if (blk_queue_io_stat(clone->q)) |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 404 | clone->rq_flags |= RQF_IO_STAT; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 405 | |
| 406 | clone->start_time = jiffies; |
| 407 | r = blk_insert_cloned_request(clone->q, clone); |
| 408 | if (r) |
| 409 | /* must complete clone in terms of original request */ |
| 410 | dm_complete_request(rq, r); |
| 411 | } |
| 412 | |
| 413 | static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, |
| 414 | void *data) |
| 415 | { |
| 416 | struct dm_rq_target_io *tio = data; |
| 417 | struct dm_rq_clone_bio_info *info = |
| 418 | container_of(bio, struct dm_rq_clone_bio_info, clone); |
| 419 | |
| 420 | info->orig = bio_orig; |
| 421 | info->tio = tio; |
| 422 | bio->bi_end_io = end_clone_bio; |
| 423 | |
| 424 | return 0; |
| 425 | } |
| 426 | |
| 427 | static int setup_clone(struct request *clone, struct request *rq, |
| 428 | struct dm_rq_target_io *tio, gfp_t gfp_mask) |
| 429 | { |
| 430 | int r; |
| 431 | |
| 432 | r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, |
| 433 | dm_rq_bio_constructor, tio); |
| 434 | if (r) |
| 435 | return r; |
| 436 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 437 | clone->end_io = end_clone_request; |
| 438 | clone->end_io_data = tio; |
| 439 | |
| 440 | tio->clone = clone; |
| 441 | |
| 442 | return 0; |
| 443 | } |
| 444 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 445 | static void map_tio_request(struct kthread_work *work); |
| 446 | |
| 447 | static void init_tio(struct dm_rq_target_io *tio, struct request *rq, |
| 448 | struct mapped_device *md) |
| 449 | { |
| 450 | tio->md = md; |
| 451 | tio->ti = NULL; |
| 452 | tio->clone = NULL; |
| 453 | tio->orig = rq; |
| 454 | tio->error = 0; |
| 455 | /* |
| 456 | * Avoid initializing info for blk-mq; it passes |
| 457 | * target-specific data through info.ptr |
| 458 | * (see: dm_mq_init_request) |
| 459 | */ |
| 460 | if (!md->init_tio_pdu) |
| 461 | memset(&tio->info, 0, sizeof(tio->info)); |
| 462 | if (md->kworker_task) |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 463 | kthread_init_work(&tio->work, map_tio_request); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 464 | } |
| 465 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 466 | /* |
| 467 | * Returns: |
Mike Snitzer | a8ac51e | 2016-09-09 19:24:57 -0400 | [diff] [blame] | 468 | * DM_MAPIO_* : the request has been processed as indicated |
| 469 | * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 470 | * < 0 : the request was completed due to failure |
| 471 | */ |
Mike Snitzer | fbc39b4 | 2016-09-13 12:16:14 -0400 | [diff] [blame] | 472 | static int map_request(struct dm_rq_target_io *tio) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 473 | { |
| 474 | int r; |
| 475 | struct dm_target *ti = tio->ti; |
Mike Snitzer | fbc39b4 | 2016-09-13 12:16:14 -0400 | [diff] [blame] | 476 | struct mapped_device *md = tio->md; |
| 477 | struct request *rq = tio->orig; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 478 | struct request *clone = NULL; |
| 479 | |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 480 | r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 481 | switch (r) { |
| 482 | case DM_MAPIO_SUBMITTED: |
| 483 | /* The target has taken the I/O to submit by itself later */ |
| 484 | break; |
| 485 | case DM_MAPIO_REMAPPED: |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 486 | if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { |
| 487 | /* -ENOMEM */ |
| 488 | ti->type->release_clone_rq(clone); |
| 489 | return DM_MAPIO_REQUEUE; |
| 490 | } |
| 491 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 492 | /* The target has remapped the I/O so dispatch it */ |
| 493 | trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), |
| 494 | blk_rq_pos(rq)); |
| 495 | dm_dispatch_clone_request(clone, rq); |
| 496 | break; |
| 497 | case DM_MAPIO_REQUEUE: |
| 498 | /* The target wants to requeue the I/O */ |
Mike Snitzer | a8ac51e | 2016-09-09 19:24:57 -0400 | [diff] [blame] | 499 | break; |
| 500 | case DM_MAPIO_DELAY_REQUEUE: |
| 501 | /* The target wants to requeue the I/O after a delay */ |
Mike Snitzer | fbc39b4 | 2016-09-13 12:16:14 -0400 | [diff] [blame] | 502 | dm_requeue_original_request(tio, true); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 503 | break; |
| 504 | default: |
| 505 | if (r > 0) { |
| 506 | DMWARN("unimplemented target map return value: %d", r); |
| 507 | BUG(); |
| 508 | } |
| 509 | |
| 510 | /* The target wants to complete the I/O */ |
| 511 | dm_kill_unmapped_request(rq, r); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 512 | } |
| 513 | |
Mike Snitzer | a8ac51e | 2016-09-09 19:24:57 -0400 | [diff] [blame] | 514 | return r; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 515 | } |
| 516 | |
| 517 | static void dm_start_request(struct mapped_device *md, struct request *orig) |
| 518 | { |
| 519 | if (!orig->q->mq_ops) |
| 520 | blk_start_request(orig); |
| 521 | else |
| 522 | blk_mq_start_request(orig); |
| 523 | atomic_inc(&md->pending[rq_data_dir(orig)]); |
| 524 | |
| 525 | if (md->seq_rq_merge_deadline_usecs) { |
| 526 | md->last_rq_pos = rq_end_sector(orig); |
| 527 | md->last_rq_rw = rq_data_dir(orig); |
| 528 | md->last_rq_start_time = ktime_get(); |
| 529 | } |
| 530 | |
| 531 | if (unlikely(dm_stats_used(&md->stats))) { |
| 532 | struct dm_rq_target_io *tio = tio_from_request(orig); |
| 533 | tio->duration_jiffies = jiffies; |
| 534 | tio->n_sectors = blk_rq_sectors(orig); |
| 535 | dm_stats_account_io(&md->stats, rq_data_dir(orig), |
| 536 | blk_rq_pos(orig), tio->n_sectors, false, 0, |
| 537 | &tio->stats_aux); |
| 538 | } |
| 539 | |
| 540 | /* |
| 541 | * Hold the md reference here for the in-flight I/O. |
| 542 | * We can't rely on the reference count by device opener, |
| 543 | * because the device may be closed during the request completion |
| 544 | * when all bios are completed. |
| 545 | * See the comment in rq_completed() too. |
| 546 | */ |
| 547 | dm_get(md); |
| 548 | } |
| 549 | |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 550 | static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq) |
| 551 | { |
| 552 | struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); |
| 553 | |
| 554 | /* |
| 555 | * Must initialize md member of tio, otherwise it won't |
| 556 | * be available in dm_mq_queue_rq. |
| 557 | */ |
| 558 | tio->md = md; |
| 559 | |
| 560 | if (md->init_tio_pdu) { |
| 561 | /* target-specific per-io data is immediately after the tio */ |
| 562 | tio->info.ptr = tio + 1; |
| 563 | } |
| 564 | |
| 565 | return 0; |
| 566 | } |
| 567 | |
| 568 | static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp) |
| 569 | { |
| 570 | return __dm_rq_init_rq(q->rq_alloc_data, rq); |
| 571 | } |
| 572 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 573 | static void map_tio_request(struct kthread_work *work) |
| 574 | { |
| 575 | struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 576 | |
Mike Snitzer | fbc39b4 | 2016-09-13 12:16:14 -0400 | [diff] [blame] | 577 | if (map_request(tio) == DM_MAPIO_REQUEUE) |
| 578 | dm_requeue_original_request(tio, false); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 579 | } |
| 580 | |
| 581 | ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) |
| 582 | { |
| 583 | return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs); |
| 584 | } |
| 585 | |
| 586 | #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000 |
| 587 | |
| 588 | ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, |
| 589 | const char *buf, size_t count) |
| 590 | { |
| 591 | unsigned deadline; |
| 592 | |
Mike Snitzer | e83068a | 2016-05-24 21:16:51 -0400 | [diff] [blame] | 593 | if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 594 | return count; |
| 595 | |
| 596 | if (kstrtouint(buf, 10, &deadline)) |
| 597 | return -EINVAL; |
| 598 | |
| 599 | if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS) |
| 600 | deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS; |
| 601 | |
| 602 | md->seq_rq_merge_deadline_usecs = deadline; |
| 603 | |
| 604 | return count; |
| 605 | } |
| 606 | |
| 607 | static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md) |
| 608 | { |
| 609 | ktime_t kt_deadline; |
| 610 | |
| 611 | if (!md->seq_rq_merge_deadline_usecs) |
| 612 | return false; |
| 613 | |
| 614 | kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC); |
| 615 | kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline); |
| 616 | |
| 617 | return !ktime_after(ktime_get(), kt_deadline); |
| 618 | } |
| 619 | |
| 620 | /* |
| 621 | * q->request_fn for old request-based dm. |
| 622 | * Called with the queue lock held. |
| 623 | */ |
| 624 | static void dm_old_request_fn(struct request_queue *q) |
| 625 | { |
| 626 | struct mapped_device *md = q->queuedata; |
| 627 | struct dm_target *ti = md->immutable_target; |
| 628 | struct request *rq; |
| 629 | struct dm_rq_target_io *tio; |
| 630 | sector_t pos = 0; |
| 631 | |
| 632 | if (unlikely(!ti)) { |
| 633 | int srcu_idx; |
| 634 | struct dm_table *map = dm_get_live_table(md, &srcu_idx); |
| 635 | |
Mike Snitzer | 4087a1f | 2017-01-25 16:24:52 +0100 | [diff] [blame] | 636 | if (unlikely(!map)) { |
| 637 | dm_put_live_table(md, srcu_idx); |
| 638 | return; |
| 639 | } |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 640 | ti = dm_table_find_target(map, pos); |
| 641 | dm_put_live_table(md, srcu_idx); |
| 642 | } |
| 643 | |
| 644 | /* |
| 645 | * For suspend, check blk_queue_stopped() and increment |
| 646 | * ->pending within a single queue_lock not to increment the |
| 647 | * number of in-flight I/Os after the queue is stopped in |
| 648 | * dm_suspend(). |
| 649 | */ |
| 650 | while (!blk_queue_stopped(q)) { |
| 651 | rq = blk_peek_request(q); |
| 652 | if (!rq) |
| 653 | return; |
| 654 | |
| 655 | /* always use block 0 to find the target for flushes for now */ |
| 656 | pos = 0; |
| 657 | if (req_op(rq) != REQ_OP_FLUSH) |
| 658 | pos = blk_rq_pos(rq); |
| 659 | |
| 660 | if ((dm_old_request_peeked_before_merge_deadline(md) && |
Ming Lei | 4f9c74c | 2016-11-11 20:05:36 +0800 | [diff] [blame] | 661 | md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) && |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 662 | md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) || |
| 663 | (ti->type->busy && ti->type->busy(ti))) { |
Tahsin Erdogan | bd9f55e | 2016-07-15 06:27:08 -0700 | [diff] [blame] | 664 | blk_delay_queue(q, 10); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 665 | return; |
| 666 | } |
| 667 | |
| 668 | dm_start_request(md, rq); |
| 669 | |
| 670 | tio = tio_from_request(rq); |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 671 | init_tio(tio, rq, md); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 672 | /* Establish tio->ti before queuing work (map_tio_request) */ |
| 673 | tio->ti = ti; |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 674 | kthread_queue_work(&md->kworker, &tio->work); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 675 | BUG_ON(!irqs_disabled()); |
| 676 | } |
| 677 | } |
| 678 | |
| 679 | /* |
| 680 | * Fully initialize a .request_fn request-based queue. |
| 681 | */ |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 682 | int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 683 | { |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 684 | struct dm_target *immutable_tgt; |
| 685 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 686 | /* Fully initialize the queue */ |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 687 | md->queue->cmd_size = sizeof(struct dm_rq_target_io); |
| 688 | md->queue->rq_alloc_data = md; |
Christoph Hellwig | 5ea708d | 2017-01-03 14:52:44 +0300 | [diff] [blame] | 689 | md->queue->request_fn = dm_old_request_fn; |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 690 | md->queue->init_rq_fn = dm_rq_init_rq; |
| 691 | |
| 692 | immutable_tgt = dm_table_get_immutable_target(t); |
| 693 | if (immutable_tgt && immutable_tgt->per_io_data_size) { |
| 694 | /* any target-specific per-io data is immediately after the tio */ |
| 695 | md->queue->cmd_size += immutable_tgt->per_io_data_size; |
| 696 | md->init_tio_pdu = true; |
| 697 | } |
Christoph Hellwig | 5ea708d | 2017-01-03 14:52:44 +0300 | [diff] [blame] | 698 | if (blk_init_allocated_queue(md->queue) < 0) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 699 | return -EINVAL; |
| 700 | |
| 701 | /* disable dm_old_request_fn's merge heuristic by default */ |
| 702 | md->seq_rq_merge_deadline_usecs = 0; |
| 703 | |
| 704 | dm_init_normal_md_queue(md); |
| 705 | blk_queue_softirq_done(md->queue, dm_softirq_done); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 706 | |
| 707 | /* Initialize the request-based DM worker thread */ |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 708 | kthread_init_worker(&md->kworker); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 709 | md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker, |
| 710 | "kdmwork-%s", dm_device_name(md)); |
Mike Snitzer | 937fa62 | 2016-10-18 14:02:04 -0400 | [diff] [blame] | 711 | if (IS_ERR(md->kworker_task)) { |
| 712 | int error = PTR_ERR(md->kworker_task); |
| 713 | md->kworker_task = NULL; |
| 714 | return error; |
| 715 | } |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 716 | |
| 717 | elv_register_queue(md->queue); |
| 718 | |
| 719 | return 0; |
| 720 | } |
| 721 | |
| 722 | static int dm_mq_init_request(void *data, struct request *rq, |
| 723 | unsigned int hctx_idx, unsigned int request_idx, |
| 724 | unsigned int numa_node) |
| 725 | { |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 726 | return __dm_rq_init_rq(data, rq); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 727 | } |
| 728 | |
| 729 | static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
| 730 | const struct blk_mq_queue_data *bd) |
| 731 | { |
| 732 | struct request *rq = bd->rq; |
| 733 | struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); |
| 734 | struct mapped_device *md = tio->md; |
| 735 | struct dm_target *ti = md->immutable_target; |
| 736 | |
| 737 | if (unlikely(!ti)) { |
| 738 | int srcu_idx; |
| 739 | struct dm_table *map = dm_get_live_table(md, &srcu_idx); |
| 740 | |
| 741 | ti = dm_table_find_target(map, 0); |
| 742 | dm_put_live_table(md, srcu_idx); |
| 743 | } |
| 744 | |
| 745 | if (ti->type->busy && ti->type->busy(ti)) |
| 746 | return BLK_MQ_RQ_QUEUE_BUSY; |
| 747 | |
| 748 | dm_start_request(md, rq); |
| 749 | |
| 750 | /* Init tio using md established in .init_request */ |
| 751 | init_tio(tio, rq, md); |
| 752 | |
| 753 | /* |
| 754 | * Establish tio->ti before calling map_request(). |
| 755 | */ |
| 756 | tio->ti = ti; |
| 757 | |
| 758 | /* Direct call is fine since .queue_rq allows allocations */ |
Mike Snitzer | fbc39b4 | 2016-09-13 12:16:14 -0400 | [diff] [blame] | 759 | if (map_request(tio) == DM_MAPIO_REQUEUE) { |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 760 | /* Undo dm_start_request() before requeuing */ |
| 761 | rq_end_stats(md, rq); |
| 762 | rq_completed(md, rq_data_dir(rq), false); |
Bart Van Assche | 6077c2d | 2017-04-07 11:16:54 -0700 | [diff] [blame] | 763 | blk_mq_delay_run_hw_queue(hctx, 100/*ms*/); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 764 | return BLK_MQ_RQ_QUEUE_BUSY; |
| 765 | } |
| 766 | |
| 767 | return BLK_MQ_RQ_QUEUE_OK; |
| 768 | } |
| 769 | |
Eric Biggers | f363b08 | 2017-03-30 13:39:16 -0700 | [diff] [blame] | 770 | static const struct blk_mq_ops dm_mq_ops = { |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 771 | .queue_rq = dm_mq_queue_rq, |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 772 | .complete = dm_softirq_done, |
| 773 | .init_request = dm_mq_init_request, |
| 774 | }; |
| 775 | |
Mike Snitzer | e83068a | 2016-05-24 21:16:51 -0400 | [diff] [blame] | 776 | int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 777 | { |
| 778 | struct request_queue *q; |
Mike Snitzer | e83068a | 2016-05-24 21:16:51 -0400 | [diff] [blame] | 779 | struct dm_target *immutable_tgt; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 780 | int err; |
| 781 | |
Mike Snitzer | e83068a | 2016-05-24 21:16:51 -0400 | [diff] [blame] | 782 | if (!dm_table_all_blk_mq_devices(t)) { |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 783 | DMERR("request-based dm-mq may only be stacked on blk-mq device(s)"); |
| 784 | return -EINVAL; |
| 785 | } |
| 786 | |
| 787 | md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); |
| 788 | if (!md->tag_set) |
| 789 | return -ENOMEM; |
| 790 | |
| 791 | md->tag_set->ops = &dm_mq_ops; |
| 792 | md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); |
| 793 | md->tag_set->numa_node = md->numa_node_id; |
| 794 | md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; |
| 795 | md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); |
| 796 | md->tag_set->driver_data = md; |
| 797 | |
| 798 | md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); |
Mike Snitzer | e83068a | 2016-05-24 21:16:51 -0400 | [diff] [blame] | 799 | immutable_tgt = dm_table_get_immutable_target(t); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 800 | if (immutable_tgt && immutable_tgt->per_io_data_size) { |
| 801 | /* any target-specific per-io data is immediately after the tio */ |
| 802 | md->tag_set->cmd_size += immutable_tgt->per_io_data_size; |
| 803 | md->init_tio_pdu = true; |
| 804 | } |
| 805 | |
| 806 | err = blk_mq_alloc_tag_set(md->tag_set); |
| 807 | if (err) |
| 808 | goto out_kfree_tag_set; |
| 809 | |
| 810 | q = blk_mq_init_allocated_queue(md->tag_set, md->queue); |
| 811 | if (IS_ERR(q)) { |
| 812 | err = PTR_ERR(q); |
| 813 | goto out_tag_set; |
| 814 | } |
| 815 | dm_init_md_queue(md); |
| 816 | |
| 817 | /* backfill 'mq' sysfs registration normally done in blk_register_queue */ |
Matias Bjørling | b21d5b3 | 2016-09-16 14:25:06 +0200 | [diff] [blame] | 818 | blk_mq_register_dev(disk_to_dev(md->disk), q); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 819 | |
| 820 | return 0; |
| 821 | |
| 822 | out_tag_set: |
| 823 | blk_mq_free_tag_set(md->tag_set); |
| 824 | out_kfree_tag_set: |
| 825 | kfree(md->tag_set); |
| 826 | |
| 827 | return err; |
| 828 | } |
| 829 | |
| 830 | void dm_mq_cleanup_mapped_device(struct mapped_device *md) |
| 831 | { |
| 832 | if (md->tag_set) { |
| 833 | blk_mq_free_tag_set(md->tag_set); |
| 834 | kfree(md->tag_set); |
| 835 | } |
| 836 | } |
| 837 | |
| 838 | module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); |
| 839 | MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); |
| 840 | |
| 841 | module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR); |
| 842 | MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices"); |
| 843 | |
| 844 | module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR); |
| 845 | MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices"); |
| 846 | |
| 847 | module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR); |
| 848 | MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices"); |