Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 2003 Russell King, All Rights Reserved. |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 3 | * Copyright 2006-2007 Pierre Ossman |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License version 2 as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | */ |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 10 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/module.h> |
| 12 | #include <linux/blkdev.h> |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 13 | #include <linux/freezer.h> |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 14 | #include <linux/kthread.h> |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 15 | #include <linux/scatterlist.h> |
Santosh Shilimkar | 8e0cb8a | 2013-07-29 14:20:15 +0100 | [diff] [blame] | 16 | #include <linux/dma-mapping.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
| 18 | #include <linux/mmc/card.h> |
| 19 | #include <linux/mmc/host.h> |
Linus Walleij | 29eb7bd | 2016-09-20 11:34:38 +0200 | [diff] [blame] | 20 | |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 21 | #include "queue.h" |
Linus Walleij | 29eb7bd | 2016-09-20 11:34:38 +0200 | [diff] [blame] | 22 | #include "block.h" |
Ulf Hansson | 55244c5 | 2017-01-13 14:14:08 +0100 | [diff] [blame] | 23 | #include "core.h" |
Ulf Hansson | 4facdde | 2017-01-13 14:14:14 +0100 | [diff] [blame] | 24 | #include "card.h" |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 25 | #include "host.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 27 | static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) |
| 28 | { |
| 29 | /* Allow only 1 DCMD at a time */ |
| 30 | return mq->in_flight[MMC_ISSUE_DCMD]; |
| 31 | } |
| 32 | |
| 33 | void mmc_cqe_check_busy(struct mmc_queue *mq) |
| 34 | { |
| 35 | if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) |
| 36 | mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; |
| 37 | |
| 38 | mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL; |
| 39 | } |
| 40 | |
| 41 | static inline bool mmc_cqe_can_dcmd(struct mmc_host *host) |
| 42 | { |
| 43 | return host->caps2 & MMC_CAP2_CQE_DCMD; |
| 44 | } |
| 45 | |
Colin Ian King | 15ff294 | 2017-11-30 11:37:38 +0000 | [diff] [blame] | 46 | static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host, |
| 47 | struct request *req) |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 48 | { |
| 49 | switch (req_op(req)) { |
| 50 | case REQ_OP_DRV_IN: |
| 51 | case REQ_OP_DRV_OUT: |
| 52 | case REQ_OP_DISCARD: |
| 53 | case REQ_OP_SECURE_ERASE: |
| 54 | return MMC_ISSUE_SYNC; |
| 55 | case REQ_OP_FLUSH: |
| 56 | return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC; |
| 57 | default: |
| 58 | return MMC_ISSUE_ASYNC; |
| 59 | } |
| 60 | } |
| 61 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 62 | enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) |
| 63 | { |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 64 | struct mmc_host *host = mq->card->host; |
| 65 | |
| 66 | if (mq->use_cqe) |
| 67 | return mmc_cqe_issue_type(host, req); |
| 68 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 69 | if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) |
| 70 | return MMC_ISSUE_ASYNC; |
| 71 | |
| 72 | return MMC_ISSUE_SYNC; |
| 73 | } |
| 74 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 75 | static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq) |
| 76 | { |
| 77 | if (!mq->recovery_needed) { |
| 78 | mq->recovery_needed = true; |
| 79 | schedule_work(&mq->recovery_work); |
| 80 | } |
| 81 | } |
| 82 | |
| 83 | void mmc_cqe_recovery_notifier(struct mmc_request *mrq) |
| 84 | { |
| 85 | struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, |
| 86 | brq.mrq); |
| 87 | struct request *req = mmc_queue_req_to_req(mqrq); |
| 88 | struct request_queue *q = req->q; |
| 89 | struct mmc_queue *mq = q->queuedata; |
| 90 | unsigned long flags; |
| 91 | |
| 92 | spin_lock_irqsave(q->queue_lock, flags); |
| 93 | __mmc_cqe_recovery_notifier(mq); |
| 94 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 95 | } |
| 96 | |
| 97 | static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) |
| 98 | { |
| 99 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); |
| 100 | struct mmc_request *mrq = &mqrq->brq.mrq; |
| 101 | struct mmc_queue *mq = req->q->queuedata; |
| 102 | struct mmc_host *host = mq->card->host; |
| 103 | enum mmc_issue_type issue_type = mmc_issue_type(mq, req); |
| 104 | bool recovery_needed = false; |
| 105 | |
| 106 | switch (issue_type) { |
| 107 | case MMC_ISSUE_ASYNC: |
| 108 | case MMC_ISSUE_DCMD: |
| 109 | if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) { |
| 110 | if (recovery_needed) |
| 111 | __mmc_cqe_recovery_notifier(mq); |
| 112 | return BLK_EH_RESET_TIMER; |
| 113 | } |
Christoph Hellwig | ad73d6f | 2018-05-29 15:52:35 +0200 | [diff] [blame] | 114 | /* No timeout (XXX: huh? comment doesn't make much sense) */ |
| 115 | blk_mq_complete_request(req); |
| 116 | return BLK_EH_DONE; |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 117 | default: |
| 118 | /* Timeout is handled by mmc core */ |
| 119 | return BLK_EH_RESET_TIMER; |
| 120 | } |
| 121 | } |
| 122 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 123 | static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req, |
| 124 | bool reserved) |
| 125 | { |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 126 | struct request_queue *q = req->q; |
| 127 | struct mmc_queue *mq = q->queuedata; |
| 128 | unsigned long flags; |
| 129 | int ret; |
| 130 | |
| 131 | spin_lock_irqsave(q->queue_lock, flags); |
| 132 | |
| 133 | if (mq->recovery_needed || !mq->use_cqe) |
| 134 | ret = BLK_EH_RESET_TIMER; |
| 135 | else |
| 136 | ret = mmc_cqe_timed_out(req); |
| 137 | |
| 138 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 139 | |
| 140 | return ret; |
| 141 | } |
| 142 | |
| 143 | static void mmc_mq_recovery_handler(struct work_struct *work) |
| 144 | { |
| 145 | struct mmc_queue *mq = container_of(work, struct mmc_queue, |
| 146 | recovery_work); |
| 147 | struct request_queue *q = mq->queue; |
| 148 | |
| 149 | mmc_get_card(mq->card, &mq->ctx); |
| 150 | |
| 151 | mq->in_recovery = true; |
| 152 | |
Adrian Hunter | 10f21df4 | 2017-11-29 15:41:07 +0200 | [diff] [blame] | 153 | if (mq->use_cqe) |
| 154 | mmc_blk_cqe_recovery(mq); |
| 155 | else |
| 156 | mmc_blk_mq_recovery(mq); |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 157 | |
| 158 | mq->in_recovery = false; |
| 159 | |
| 160 | spin_lock_irq(q->queue_lock); |
| 161 | mq->recovery_needed = false; |
| 162 | spin_unlock_irq(q->queue_lock); |
| 163 | |
| 164 | mmc_put_card(mq->card, &mq->ctx); |
| 165 | |
| 166 | blk_mq_run_hw_queues(q, true); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 167 | } |
| 168 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 169 | static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 170 | { |
| 171 | struct scatterlist *sg; |
| 172 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 173 | sg = kmalloc_array(sg_len, sizeof(*sg), gfp); |
Adrian Hunter | 7b410d0 | 2017-03-13 14:36:36 +0200 | [diff] [blame] | 174 | if (sg) |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 175 | sg_init_table(sg, sg_len); |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 176 | |
| 177 | return sg; |
| 178 | } |
| 179 | |
Adrian Hunter | e056a1b | 2011-06-28 17:16:02 +0300 | [diff] [blame] | 180 | static void mmc_queue_setup_discard(struct request_queue *q, |
| 181 | struct mmc_card *card) |
| 182 | { |
| 183 | unsigned max_discard; |
| 184 | |
| 185 | max_discard = mmc_calc_max_discard(card); |
| 186 | if (!max_discard) |
| 187 | return; |
| 188 | |
Bart Van Assche | 8b904b5 | 2018-03-07 17:10:10 -0800 | [diff] [blame] | 189 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); |
Jens Axboe | 2bb4cd5 | 2015-07-14 08:15:12 -0600 | [diff] [blame] | 190 | blk_queue_max_discard_sectors(q, max_discard); |
Adrian Hunter | e056a1b | 2011-06-28 17:16:02 +0300 | [diff] [blame] | 191 | q->limits.discard_granularity = card->pref_erase << 9; |
| 192 | /* granularity must not be greater than max. discard */ |
| 193 | if (card->pref_erase > max_discard) |
| 194 | q->limits.discard_granularity = 0; |
Maya Erez | 775a936 | 2013-04-18 15:41:55 +0300 | [diff] [blame] | 195 | if (mmc_can_secure_erase_trim(card)) |
Bart Van Assche | 8b904b5 | 2018-03-07 17:10:10 -0800 | [diff] [blame] | 196 | blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); |
Adrian Hunter | e056a1b | 2011-06-28 17:16:02 +0300 | [diff] [blame] | 197 | } |
| 198 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 199 | /** |
| 200 | * mmc_init_request() - initialize the MMC-specific per-request data |
| 201 | * @q: the request queue |
| 202 | * @req: the request |
| 203 | * @gfp: memory allocation policy |
| 204 | */ |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 205 | static int __mmc_init_request(struct mmc_queue *mq, struct request *req, |
| 206 | gfp_t gfp) |
Adrian Hunter | f2b8b52 | 2016-11-29 12:09:12 +0200 | [diff] [blame] | 207 | { |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 208 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 209 | struct mmc_card *card = mq->card; |
| 210 | struct mmc_host *host = card->host; |
Adrian Hunter | c853982 | 2016-11-29 12:09:11 +0200 | [diff] [blame] | 211 | |
Linus Walleij | de3ee99 | 2017-09-20 10:56:14 +0200 | [diff] [blame] | 212 | mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); |
| 213 | if (!mq_rq->sg) |
| 214 | return -ENOMEM; |
Adrian Hunter | 64e29e42 | 2016-11-29 12:09:13 +0200 | [diff] [blame] | 215 | |
Adrian Hunter | c5bda0c | 2016-11-29 12:09:15 +0200 | [diff] [blame] | 216 | return 0; |
| 217 | } |
Adrian Hunter | 64e29e42 | 2016-11-29 12:09:13 +0200 | [diff] [blame] | 218 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 219 | static void mmc_exit_request(struct request_queue *q, struct request *req) |
Adrian Hunter | c5bda0c | 2016-11-29 12:09:15 +0200 | [diff] [blame] | 220 | { |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 221 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
Adrian Hunter | 64e29e42 | 2016-11-29 12:09:13 +0200 | [diff] [blame] | 222 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 223 | kfree(mq_rq->sg); |
| 224 | mq_rq->sg = NULL; |
Adrian Hunter | c09949c | 2016-11-29 12:09:14 +0200 | [diff] [blame] | 225 | } |
| 226 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 227 | static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req, |
| 228 | unsigned int hctx_idx, unsigned int numa_node) |
| 229 | { |
| 230 | return __mmc_init_request(set->driver_data, req, GFP_KERNEL); |
| 231 | } |
| 232 | |
| 233 | static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, |
| 234 | unsigned int hctx_idx) |
| 235 | { |
| 236 | struct mmc_queue *mq = set->driver_data; |
| 237 | |
| 238 | mmc_exit_request(mq->queue, req); |
| 239 | } |
| 240 | |
| 241 | /* |
| 242 | * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests |
| 243 | * will not be dispatched in parallel. |
| 244 | */ |
| 245 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
| 246 | const struct blk_mq_queue_data *bd) |
| 247 | { |
| 248 | struct request *req = bd->rq; |
| 249 | struct request_queue *q = req->q; |
| 250 | struct mmc_queue *mq = q->queuedata; |
| 251 | struct mmc_card *card = mq->card; |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 252 | struct mmc_host *host = card->host; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 253 | enum mmc_issue_type issue_type; |
| 254 | enum mmc_issued issued; |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 255 | bool get_card, cqe_retune_ok; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 256 | int ret; |
| 257 | |
| 258 | if (mmc_card_removed(mq->card)) { |
| 259 | req->rq_flags |= RQF_QUIET; |
| 260 | return BLK_STS_IOERR; |
| 261 | } |
| 262 | |
| 263 | issue_type = mmc_issue_type(mq, req); |
| 264 | |
| 265 | spin_lock_irq(q->queue_lock); |
| 266 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 267 | if (mq->recovery_needed) { |
| 268 | spin_unlock_irq(q->queue_lock); |
| 269 | return BLK_STS_RESOURCE; |
| 270 | } |
| 271 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 272 | switch (issue_type) { |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 273 | case MMC_ISSUE_DCMD: |
| 274 | if (mmc_cqe_dcmd_busy(mq)) { |
| 275 | mq->cqe_busy |= MMC_CQE_DCMD_BUSY; |
| 276 | spin_unlock_irq(q->queue_lock); |
| 277 | return BLK_STS_RESOURCE; |
| 278 | } |
| 279 | break; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 280 | case MMC_ISSUE_ASYNC: |
| 281 | break; |
| 282 | default: |
| 283 | /* |
| 284 | * Timeouts are handled by mmc core, and we don't have a host |
| 285 | * API to abort requests, so we can't handle the timeout anyway. |
| 286 | * However, when the timeout happens, blk_mq_complete_request() |
| 287 | * no longer works (to stop the request disappearing under us). |
| 288 | * To avoid racing with that, set a large timeout. |
| 289 | */ |
| 290 | req->timeout = 600 * HZ; |
| 291 | break; |
| 292 | } |
| 293 | |
| 294 | mq->in_flight[issue_type] += 1; |
| 295 | get_card = (mmc_tot_in_flight(mq) == 1); |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 296 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 297 | |
| 298 | spin_unlock_irq(q->queue_lock); |
| 299 | |
| 300 | if (!(req->rq_flags & RQF_DONTPREP)) { |
| 301 | req_to_mmc_queue_req(req)->retries = 0; |
| 302 | req->rq_flags |= RQF_DONTPREP; |
| 303 | } |
| 304 | |
| 305 | if (get_card) |
| 306 | mmc_get_card(card, &mq->ctx); |
| 307 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 308 | if (mq->use_cqe) { |
| 309 | host->retune_now = host->need_retune && cqe_retune_ok && |
| 310 | !host->hold_retune; |
| 311 | } |
| 312 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 313 | blk_mq_start_request(req); |
| 314 | |
| 315 | issued = mmc_blk_mq_issue_rq(mq, req); |
| 316 | |
| 317 | switch (issued) { |
| 318 | case MMC_REQ_BUSY: |
| 319 | ret = BLK_STS_RESOURCE; |
| 320 | break; |
| 321 | case MMC_REQ_FAILED_TO_START: |
| 322 | ret = BLK_STS_IOERR; |
| 323 | break; |
| 324 | default: |
| 325 | ret = BLK_STS_OK; |
| 326 | break; |
| 327 | } |
| 328 | |
| 329 | if (issued != MMC_REQ_STARTED) { |
| 330 | bool put_card = false; |
| 331 | |
| 332 | spin_lock_irq(q->queue_lock); |
| 333 | mq->in_flight[issue_type] -= 1; |
| 334 | if (mmc_tot_in_flight(mq) == 0) |
| 335 | put_card = true; |
| 336 | spin_unlock_irq(q->queue_lock); |
| 337 | if (put_card) |
| 338 | mmc_put_card(card, &mq->ctx); |
| 339 | } |
| 340 | |
| 341 | return ret; |
| 342 | } |
| 343 | |
| 344 | static const struct blk_mq_ops mmc_mq_ops = { |
| 345 | .queue_rq = mmc_mq_queue_rq, |
| 346 | .init_request = mmc_mq_init_request, |
| 347 | .exit_request = mmc_mq_exit_request, |
| 348 | .complete = mmc_blk_mq_complete, |
| 349 | .timeout = mmc_mq_timed_out, |
| 350 | }; |
| 351 | |
Adrian Hunter | c8b5fd0 | 2017-09-22 15:36:57 +0300 | [diff] [blame] | 352 | static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) |
| 353 | { |
| 354 | struct mmc_host *host = card->host; |
| 355 | u64 limit = BLK_BOUNCE_HIGH; |
| 356 | |
| 357 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
| 358 | limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; |
| 359 | |
Bart Van Assche | 8b904b5 | 2018-03-07 17:10:10 -0800 | [diff] [blame] | 360 | blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); |
| 361 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); |
Adrian Hunter | c8b5fd0 | 2017-09-22 15:36:57 +0300 | [diff] [blame] | 362 | if (mmc_can_erase(card)) |
| 363 | mmc_queue_setup_discard(mq->queue, card); |
| 364 | |
| 365 | blk_queue_bounce_limit(mq->queue, limit); |
| 366 | blk_queue_max_hw_sectors(mq->queue, |
| 367 | min(host->max_blk_count, host->max_req_size / 512)); |
| 368 | blk_queue_max_segments(mq->queue, host->max_segs); |
| 369 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
| 370 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 371 | INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 372 | INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); |
| 373 | |
| 374 | mutex_init(&mq->complete_lock); |
| 375 | |
| 376 | init_waitqueue_head(&mq->wait); |
| 377 | } |
| 378 | |
| 379 | static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth, |
| 380 | const struct blk_mq_ops *mq_ops, spinlock_t *lock) |
| 381 | { |
| 382 | int ret; |
| 383 | |
| 384 | memset(&mq->tag_set, 0, sizeof(mq->tag_set)); |
| 385 | mq->tag_set.ops = mq_ops; |
| 386 | mq->tag_set.queue_depth = q_depth; |
| 387 | mq->tag_set.numa_node = NUMA_NO_NODE; |
| 388 | mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE | |
| 389 | BLK_MQ_F_BLOCKING; |
| 390 | mq->tag_set.nr_hw_queues = 1; |
| 391 | mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); |
| 392 | mq->tag_set.driver_data = mq; |
| 393 | |
| 394 | ret = blk_mq_alloc_tag_set(&mq->tag_set); |
| 395 | if (ret) |
| 396 | return ret; |
| 397 | |
| 398 | mq->queue = blk_mq_init_queue(&mq->tag_set); |
| 399 | if (IS_ERR(mq->queue)) { |
| 400 | ret = PTR_ERR(mq->queue); |
| 401 | goto free_tag_set; |
| 402 | } |
| 403 | |
| 404 | mq->queue->queue_lock = lock; |
| 405 | mq->queue->queuedata = mq; |
| 406 | |
| 407 | return 0; |
| 408 | |
| 409 | free_tag_set: |
| 410 | blk_mq_free_tag_set(&mq->tag_set); |
| 411 | |
| 412 | return ret; |
| 413 | } |
| 414 | |
| 415 | /* Set queue depth to get a reasonable value for q->nr_requests */ |
| 416 | #define MMC_QUEUE_DEPTH 64 |
| 417 | |
| 418 | static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card, |
| 419 | spinlock_t *lock) |
| 420 | { |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 421 | struct mmc_host *host = card->host; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 422 | int q_depth; |
| 423 | int ret; |
| 424 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 425 | /* |
| 426 | * The queue depth for CQE must match the hardware because the request |
| 427 | * tag is used to index the hardware queue. |
| 428 | */ |
| 429 | if (mq->use_cqe) |
| 430 | q_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); |
| 431 | else |
| 432 | q_depth = MMC_QUEUE_DEPTH; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 433 | |
| 434 | ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock); |
| 435 | if (ret) |
| 436 | return ret; |
| 437 | |
| 438 | blk_queue_rq_timeout(mq->queue, 60 * HZ); |
| 439 | |
| 440 | mmc_setup_queue(mq, card); |
| 441 | |
| 442 | return 0; |
Adrian Hunter | c8b5fd0 | 2017-09-22 15:36:57 +0300 | [diff] [blame] | 443 | } |
| 444 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | /** |
| 446 | * mmc_init_queue - initialise a queue structure. |
| 447 | * @mq: mmc queue |
| 448 | * @card: mmc card to attach this queue |
| 449 | * @lock: queue lock |
Adrian Hunter | d09408a | 2011-06-23 13:40:28 +0300 | [diff] [blame] | 450 | * @subname: partition subname |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | * |
| 452 | * Initialise a MMC card request queue. |
| 453 | */ |
Adrian Hunter | d09408a | 2011-06-23 13:40:28 +0300 | [diff] [blame] | 454 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, |
| 455 | spinlock_t *lock, const char *subname) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | { |
| 457 | struct mmc_host *host = card->host; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | mq->card = card; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 460 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 461 | mq->use_cqe = host->cqe_enabled; |
| 462 | |
Adrian Hunter | 0fbfd12 | 2017-11-29 15:41:18 +0200 | [diff] [blame] | 463 | return mmc_mq_init(mq, card, lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | |
Adrian Hunter | 0fbfd12 | 2017-11-29 15:41:18 +0200 | [diff] [blame] | 466 | void mmc_queue_suspend(struct mmc_queue *mq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | { |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 468 | blk_mq_quiesce_queue(mq->queue); |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 469 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 470 | /* |
| 471 | * The host remains claimed while there are outstanding requests, so |
| 472 | * simply claiming and releasing here ensures there are none. |
| 473 | */ |
| 474 | mmc_claim_host(mq->card->host); |
| 475 | mmc_release_host(mq->card->host); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | |
Adrian Hunter | 0fbfd12 | 2017-11-29 15:41:18 +0200 | [diff] [blame] | 478 | void mmc_queue_resume(struct mmc_queue *mq) |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 479 | { |
| 480 | blk_mq_unquiesce_queue(mq->queue); |
| 481 | } |
| 482 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 483 | void mmc_cleanup_queue(struct mmc_queue *mq) |
| 484 | { |
| 485 | struct request_queue *q = mq->queue; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 486 | |
Adrian Hunter | 0fbfd12 | 2017-11-29 15:41:18 +0200 | [diff] [blame] | 487 | /* |
| 488 | * The legacy code handled the possibility of being suspended, |
| 489 | * so do that here too. |
| 490 | */ |
| 491 | if (blk_queue_quiesced(q)) |
| 492 | blk_mq_unquiesce_queue(q); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 493 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 494 | blk_cleanup_queue(q); |
| 495 | |
| 496 | /* |
| 497 | * A request can be completed before the next request, potentially |
| 498 | * leaving a complete_work with nothing to do. Such a work item might |
| 499 | * still be queued at this point. Flush it. |
| 500 | */ |
| 501 | flush_work(&mq->complete_work); |
| 502 | |
| 503 | mq->card = NULL; |
| 504 | } |
| 505 | |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 506 | /* |
| 507 | * Prepare the sg list(s) to be handed of to the host driver |
| 508 | */ |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 509 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 510 | { |
Linus Walleij | 67e69d5 | 2017-05-19 15:37:27 +0200 | [diff] [blame] | 511 | struct request *req = mmc_queue_req_to_req(mqrq); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 512 | |
Linus Walleij | de3ee99 | 2017-09-20 10:56:14 +0200 | [diff] [blame] | 513 | return blk_rq_map_sg(mq->queue, req, mqrq->sg); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 514 | } |