Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Pierre Ossman | 70f1048 | 2007-07-11 20:04:50 +0200 | [diff] [blame] | 2 | * linux/drivers/mmc/card/queue.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2003 Russell King, All Rights Reserved. |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 5 | * Copyright 2006-2007 Pierre Ossman |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | */ |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 12 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/module.h> |
| 14 | #include <linux/blkdev.h> |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 15 | #include <linux/freezer.h> |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 16 | #include <linux/kthread.h> |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 17 | #include <linux/scatterlist.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
| 19 | #include <linux/mmc/card.h> |
| 20 | #include <linux/mmc/host.h> |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 21 | #include "queue.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 23 | #define MMC_QUEUE_BOUNCESZ 65536 |
| 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
| 26 | /* |
Tatyana Brokhman | 0cc7640 | 2012-10-07 09:52:16 +0200 | [diff] [blame] | 27 | * Based on benchmark tests the default num of requests to trigger the write |
| 28 | * packing was determined, to keep the read latency as low as possible and |
| 29 | * manage to keep the high write throughput. |
| 30 | */ |
| 31 | #define DEFAULT_NUM_REQS_TO_START_PACK 17 |
| 32 | |
| 33 | /* |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 34 | * Prepare a MMC request. This just filters out odd stuff. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | */ |
| 36 | static int mmc_prep_request(struct request_queue *q, struct request *req) |
| 37 | { |
Sujit Reddy Thumma | a8ad82c | 2011-12-08 14:05:50 +0530 | [diff] [blame] | 38 | struct mmc_queue *mq = q->queuedata; |
| 39 | |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 40 | /* |
Adrian Hunter | bd788c9 | 2010-08-11 14:17:47 -0700 | [diff] [blame] | 41 | * We only like normal block requests and discards. |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 42 | */ |
Adrian Hunter | bd788c9 | 2010-08-11 14:17:47 -0700 | [diff] [blame] | 43 | if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | blk_dump_rq_flags(req, "MMC bad request"); |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 45 | return BLKPREP_KILL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | } |
| 47 | |
Sujit Reddy Thumma | a8ad82c | 2011-12-08 14:05:50 +0530 | [diff] [blame] | 48 | if (mq && mmc_card_removed(mq->card)) |
| 49 | return BLKPREP_KILL; |
| 50 | |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 51 | req->cmd_flags |= REQ_DONTPREP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 53 | return BLKPREP_OK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | } |
| 55 | |
| 56 | static int mmc_queue_thread(void *d) |
| 57 | { |
| 58 | struct mmc_queue *mq = d; |
| 59 | struct request_queue *q = mq->queue; |
Maya Erez | 5f36069 | 2012-10-10 03:47:54 +0200 | [diff] [blame] | 60 | struct mmc_card *card = mq->card; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 62 | current->flags |= PF_MEMALLOC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | down(&mq->thread_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | do { |
Lee Susman | d50afb5 | 2013-01-09 14:48:47 +0200 | [diff] [blame] | 66 | struct mmc_queue_req *tmp; |
Stephen Boyd | 3f113aa | 2013-02-11 14:30:14 -0800 | [diff] [blame] | 67 | struct request *req = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | |
| 69 | spin_lock_irq(q->queue_lock); |
| 70 | set_current_state(TASK_INTERRUPTIBLE); |
Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 71 | req = blk_fetch_request(q); |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 72 | mq->mqrq_cur->req = req; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | spin_unlock_irq(q->queue_lock); |
| 74 | |
Per Forlin | ee8a43a | 2011-07-01 18:55:33 +0200 | [diff] [blame] | 75 | if (req || mq->mqrq_prev->req) { |
| 76 | set_current_state(TASK_RUNNING); |
| 77 | mq->issue_fn(mq, req); |
Konstantin Dorfman | 69bd0fb | 2012-12-12 16:12:08 +0200 | [diff] [blame] | 78 | if (mq->flags & MMC_QUEUE_NEW_REQUEST) { |
Konstantin Dorfman | 69bd0fb | 2012-12-12 16:12:08 +0200 | [diff] [blame] | 79 | continue; /* fetch again */ |
Konstantin Dorfman | e9382e6 | 2013-03-10 17:39:16 +0200 | [diff] [blame] | 80 | } else if ((mq->flags & MMC_QUEUE_URGENT_REQUEST) && |
| 81 | (mq->mqrq_cur->req && |
| 82 | !(mq->mqrq_cur->req->cmd_flags & REQ_URGENT))) { |
| 83 | /* |
| 84 | * clean current request when urgent request |
| 85 | * processing in progress and current request is |
| 86 | * not urgent (all existing requests completed |
| 87 | * or reinserted to the block layer |
| 88 | */ |
Konstantin Dorfman | 9b0f5ec | 2013-02-12 13:19:48 +0200 | [diff] [blame] | 89 | mq->mqrq_cur->brq.mrq.data = NULL; |
| 90 | mq->mqrq_cur->req = NULL; |
Konstantin Dorfman | 69bd0fb | 2012-12-12 16:12:08 +0200 | [diff] [blame] | 91 | } |
Seungwon Jeon | 09f2153 | 2012-09-28 19:12:53 +0900 | [diff] [blame] | 92 | |
| 93 | /* |
| 94 | * Current request becomes previous request |
| 95 | * and vice versa. |
| 96 | */ |
| 97 | mq->mqrq_prev->brq.mrq.data = NULL; |
| 98 | mq->mqrq_prev->req = NULL; |
| 99 | tmp = mq->mqrq_prev; |
| 100 | mq->mqrq_prev = mq->mqrq_cur; |
| 101 | mq->mqrq_cur = tmp; |
Per Forlin | ee8a43a | 2011-07-01 18:55:33 +0200 | [diff] [blame] | 102 | } else { |
Vitaly Wool | 7b30d28 | 2006-12-07 20:08:02 +0100 | [diff] [blame] | 103 | if (kthread_should_stop()) { |
| 104 | set_current_state(TASK_RUNNING); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | break; |
Vitaly Wool | 7b30d28 | 2006-12-07 20:08:02 +0100 | [diff] [blame] | 106 | } |
Maya Erez | 5f36069 | 2012-10-10 03:47:54 +0200 | [diff] [blame] | 107 | mmc_start_delayed_bkops(card); |
Konstantin Dorfman | e9382e6 | 2013-03-10 17:39:16 +0200 | [diff] [blame] | 108 | mq->card->host->context_info.is_urgent = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | up(&mq->thread_sem); |
| 110 | schedule(); |
| 111 | down(&mq->thread_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | } while (1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | up(&mq->thread_sem); |
| 115 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | return 0; |
| 117 | } |
| 118 | |
| 119 | /* |
| 120 | * Generic MMC request handler. This is called for any queue on a |
| 121 | * particular host. When the host is not busy, we look for a request |
| 122 | * on any queue on this host, and attempt to issue it. This may |
| 123 | * not be the queue we were asked to process. |
| 124 | */ |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 125 | static void mmc_request(struct request_queue *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | { |
| 127 | struct mmc_queue *mq = q->queuedata; |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 128 | struct request *req; |
Konstantin Dorfman | 69bd0fb | 2012-12-12 16:12:08 +0200 | [diff] [blame] | 129 | unsigned long flags; |
| 130 | struct mmc_context_info *cntx; |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 131 | |
| 132 | if (!mq) { |
Adrian Hunter | 5fa83ce | 2010-01-08 14:43:00 -0800 | [diff] [blame] | 133 | while ((req = blk_fetch_request(q)) != NULL) { |
| 134 | req->cmd_flags |= REQ_QUIET; |
Tejun Heo | 296b2f6 | 2009-05-08 11:54:15 +0900 | [diff] [blame] | 135 | __blk_end_request_all(req, -EIO); |
Adrian Hunter | 5fa83ce | 2010-01-08 14:43:00 -0800 | [diff] [blame] | 136 | } |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 137 | return; |
| 138 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | |
Konstantin Dorfman | 69bd0fb | 2012-12-12 16:12:08 +0200 | [diff] [blame] | 140 | cntx = &mq->card->host->context_info; |
| 141 | if (!mq->mqrq_cur->req && mq->mqrq_prev->req) { |
| 142 | /* |
| 143 | * New MMC request arrived when MMC thread may be |
| 144 | * blocked on the previous request to be complete |
| 145 | * with no current request fetched |
| 146 | */ |
| 147 | spin_lock_irqsave(&cntx->lock, flags); |
| 148 | if (cntx->is_waiting_last_req) { |
| 149 | cntx->is_new_req = true; |
| 150 | wake_up_interruptible(&cntx->wait); |
Lee Susman | d50afb5 | 2013-01-09 14:48:47 +0200 | [diff] [blame] | 151 | } |
Konstantin Dorfman | 69bd0fb | 2012-12-12 16:12:08 +0200 | [diff] [blame] | 152 | spin_unlock_irqrestore(&cntx->lock, flags); |
Lee Susman | d50afb5 | 2013-01-09 14:48:47 +0200 | [diff] [blame] | 153 | } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 154 | wake_up_process(mq->thread); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | } |
| 156 | |
Konstantin Dorfman | 9b0f5ec | 2013-02-12 13:19:48 +0200 | [diff] [blame] | 157 | /* |
| 158 | * mmc_urgent_request() - Urgent MMC request handler. |
| 159 | * @q: request queue. |
| 160 | * |
| 161 | * This is called when block layer has urgent request for delivery. When mmc |
| 162 | * context is waiting for the current request to complete, it will be awaken, |
| 163 | * current request may be interrupted and re-inserted back to block device |
| 164 | * request queue. The next fetched request should be urgent request, this |
| 165 | * will be ensured by block i/o scheduler. |
| 166 | */ |
| 167 | static void mmc_urgent_request(struct request_queue *q) |
| 168 | { |
| 169 | unsigned long flags; |
| 170 | struct mmc_queue *mq = q->queuedata; |
| 171 | struct mmc_context_info *cntx; |
| 172 | |
| 173 | if (!mq) { |
| 174 | mmc_request(q); |
| 175 | return; |
| 176 | } |
| 177 | cntx = &mq->card->host->context_info; |
| 178 | |
| 179 | /* critical section with mmc_wait_data_done() */ |
| 180 | spin_lock_irqsave(&cntx->lock, flags); |
| 181 | |
| 182 | /* do stop flow only when mmc thread is waiting for done */ |
Konstantin Dorfman | e9382e6 | 2013-03-10 17:39:16 +0200 | [diff] [blame] | 183 | if (mq->mqrq_cur->req || mq->mqrq_prev->req) { |
Konstantin Dorfman | 9b0f5ec | 2013-02-12 13:19:48 +0200 | [diff] [blame] | 184 | /* |
| 185 | * Urgent request must be executed alone |
| 186 | * so disable the write packing |
| 187 | */ |
| 188 | mmc_blk_disable_wr_packing(mq); |
| 189 | cntx->is_urgent = true; |
| 190 | spin_unlock_irqrestore(&cntx->lock, flags); |
| 191 | wake_up_interruptible(&cntx->wait); |
| 192 | } else { |
| 193 | spin_unlock_irqrestore(&cntx->lock, flags); |
| 194 | mmc_request(q); |
| 195 | } |
| 196 | } |
| 197 | |
Venkatraman S | 7513cd7 | 2011-08-23 21:16:02 +0530 | [diff] [blame] | 198 | static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 199 | { |
| 200 | struct scatterlist *sg; |
| 201 | |
| 202 | sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL); |
| 203 | if (!sg) |
| 204 | *err = -ENOMEM; |
| 205 | else { |
| 206 | *err = 0; |
| 207 | sg_init_table(sg, sg_len); |
| 208 | } |
| 209 | |
| 210 | return sg; |
| 211 | } |
| 212 | |
Adrian Hunter | e056a1b | 2011-06-28 17:16:02 +0300 | [diff] [blame] | 213 | static void mmc_queue_setup_discard(struct request_queue *q, |
| 214 | struct mmc_card *card) |
| 215 | { |
| 216 | unsigned max_discard; |
| 217 | |
| 218 | max_discard = mmc_calc_max_discard(card); |
| 219 | if (!max_discard) |
| 220 | return; |
| 221 | |
| 222 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); |
| 223 | q->limits.max_discard_sectors = max_discard; |
Adrian Hunter | 7194efb | 2012-04-05 14:45:47 +0300 | [diff] [blame] | 224 | if (card->erased_byte == 0 && !mmc_can_discard(card)) |
Adrian Hunter | e056a1b | 2011-06-28 17:16:02 +0300 | [diff] [blame] | 225 | q->limits.discard_zeroes_data = 1; |
| 226 | q->limits.discard_granularity = card->pref_erase << 9; |
| 227 | /* granularity must not be greater than max. discard */ |
| 228 | if (card->pref_erase > max_discard) |
| 229 | q->limits.discard_granularity = 0; |
Maya Erez | 463bb95 | 2012-05-24 23:46:29 +0300 | [diff] [blame] | 230 | if (mmc_can_secure_erase_trim(card)) |
Adrian Hunter | e056a1b | 2011-06-28 17:16:02 +0300 | [diff] [blame] | 231 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); |
| 232 | } |
| 233 | |
Maya Erez | 463bb95 | 2012-05-24 23:46:29 +0300 | [diff] [blame] | 234 | static void mmc_queue_setup_sanitize(struct request_queue *q) |
| 235 | { |
| 236 | queue_flag_set_unlocked(QUEUE_FLAG_SANITIZE, q); |
| 237 | } |
| 238 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | /** |
| 240 | * mmc_init_queue - initialise a queue structure. |
| 241 | * @mq: mmc queue |
| 242 | * @card: mmc card to attach this queue |
| 243 | * @lock: queue lock |
Adrian Hunter | d09408a | 2011-06-23 13:40:28 +0300 | [diff] [blame] | 244 | * @subname: partition subname |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | * |
| 246 | * Initialise a MMC card request queue. |
| 247 | */ |
Adrian Hunter | d09408a | 2011-06-23 13:40:28 +0300 | [diff] [blame] | 248 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, |
| 249 | spinlock_t *lock, const char *subname) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | { |
| 251 | struct mmc_host *host = card->host; |
| 252 | u64 limit = BLK_BOUNCE_HIGH; |
| 253 | int ret; |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 254 | struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; |
Per Forlin | 04296b7 | 2011-07-01 18:55:31 +0200 | [diff] [blame] | 255 | struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | |
Greg Kroah-Hartman | fcaf71f | 2006-09-12 17:00:10 +0200 | [diff] [blame] | 257 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
| 258 | limit = *mmc_dev(host)->dma_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | |
| 260 | mq->card = card; |
| 261 | mq->queue = blk_init_queue(mmc_request, lock); |
| 262 | if (!mq->queue) |
| 263 | return -ENOMEM; |
| 264 | |
Konstantin Dorfman | 9b0f5ec | 2013-02-12 13:19:48 +0200 | [diff] [blame] | 265 | if ((host->caps2 & MMC_CAP2_STOP_REQUEST) && |
| 266 | host->ops->stop_request && |
| 267 | mq->card->ext_csd.hpi) |
| 268 | blk_urgent_request(mq->queue, mmc_urgent_request); |
| 269 | |
Maya Erez | 47b3792 | 2012-10-29 20:19:01 +0200 | [diff] [blame] | 270 | memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur)); |
| 271 | memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev)); |
| 272 | |
Seungwon Jeon | 53f8f57 | 2012-09-27 15:00:26 +0200 | [diff] [blame] | 273 | INIT_LIST_HEAD(&mqrq_cur->packed_list); |
| 274 | INIT_LIST_HEAD(&mqrq_prev->packed_list); |
| 275 | |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 276 | mq->mqrq_cur = mqrq_cur; |
Per Forlin | 04296b7 | 2011-07-01 18:55:31 +0200 | [diff] [blame] | 277 | mq->mqrq_prev = mqrq_prev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | mq->queue->queuedata = mq; |
Yaniv Gardi | ec7a9ac | 2012-11-28 14:52:52 +0200 | [diff] [blame] | 279 | mq->num_wr_reqs_to_start_packing = |
| 280 | min_t(int, (int)card->ext_csd.max_packed_writes, |
| 281 | DEFAULT_NUM_REQS_TO_START_PACK); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 283 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
Pierre Ossman | 8dddfe1 | 2008-10-14 20:04:46 +0200 | [diff] [blame] | 284 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); |
Adrian Hunter | e056a1b | 2011-06-28 17:16:02 +0300 | [diff] [blame] | 285 | if (mmc_can_erase(card)) |
| 286 | mmc_queue_setup_discard(mq->queue, card); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 287 | |
Maya Erez | 463bb95 | 2012-05-24 23:46:29 +0300 | [diff] [blame] | 288 | if ((mmc_can_sanitize(card) && (host->caps2 & MMC_CAP2_SANITIZE))) |
| 289 | mmc_queue_setup_sanitize(mq->queue); |
| 290 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 291 | #ifdef CONFIG_MMC_BLOCK_BOUNCE |
Martin K. Petersen | a36274e | 2010-09-10 01:33:59 -0400 | [diff] [blame] | 292 | if (host->max_segs == 1) { |
Pierre Ossman | aafabfa | 2007-08-09 14:28:02 +0200 | [diff] [blame] | 293 | unsigned int bouncesz; |
| 294 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 295 | bouncesz = MMC_QUEUE_BOUNCESZ; |
| 296 | |
| 297 | if (bouncesz > host->max_req_size) |
| 298 | bouncesz = host->max_req_size; |
| 299 | if (bouncesz > host->max_seg_size) |
| 300 | bouncesz = host->max_seg_size; |
Pierre Ossman | f3eb0aa | 2008-08-16 21:34:02 +0200 | [diff] [blame] | 301 | if (bouncesz > (host->max_blk_count * 512)) |
| 302 | bouncesz = host->max_blk_count * 512; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 303 | |
Pierre Ossman | f3eb0aa | 2008-08-16 21:34:02 +0200 | [diff] [blame] | 304 | if (bouncesz > 512) { |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 305 | mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
| 306 | if (!mqrq_cur->bounce_buf) { |
Girish K S | a3c76eb | 2011-10-11 11:44:09 +0530 | [diff] [blame] | 307 | pr_warning("%s: unable to " |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 308 | "allocate bounce cur buffer\n", |
Pierre Ossman | f3eb0aa | 2008-08-16 21:34:02 +0200 | [diff] [blame] | 309 | mmc_card_name(card)); |
| 310 | } |
Per Forlin | 04296b7 | 2011-07-01 18:55:31 +0200 | [diff] [blame] | 311 | mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
| 312 | if (!mqrq_prev->bounce_buf) { |
Girish K S | a3c76eb | 2011-10-11 11:44:09 +0530 | [diff] [blame] | 313 | pr_warning("%s: unable to " |
Per Forlin | 04296b7 | 2011-07-01 18:55:31 +0200 | [diff] [blame] | 314 | "allocate bounce prev buffer\n", |
| 315 | mmc_card_name(card)); |
| 316 | kfree(mqrq_cur->bounce_buf); |
| 317 | mqrq_cur->bounce_buf = NULL; |
| 318 | } |
Pierre Ossman | f3eb0aa | 2008-08-16 21:34:02 +0200 | [diff] [blame] | 319 | } |
| 320 | |
Per Forlin | 04296b7 | 2011-07-01 18:55:31 +0200 | [diff] [blame] | 321 | if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) { |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 322 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); |
Martin K. Petersen | 086fa5f | 2010-02-26 00:20:38 -0500 | [diff] [blame] | 323 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); |
Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 324 | blk_queue_max_segments(mq->queue, bouncesz / 512); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 325 | blk_queue_max_segment_size(mq->queue, bouncesz); |
| 326 | |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 327 | mqrq_cur->sg = mmc_alloc_sg(1, &ret); |
| 328 | if (ret) |
Pierre Ossman | aafabfa | 2007-08-09 14:28:02 +0200 | [diff] [blame] | 329 | goto cleanup_queue; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 330 | |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 331 | mqrq_cur->bounce_sg = |
| 332 | mmc_alloc_sg(bouncesz / 512, &ret); |
| 333 | if (ret) |
Pierre Ossman | aafabfa | 2007-08-09 14:28:02 +0200 | [diff] [blame] | 334 | goto cleanup_queue; |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 335 | |
Per Forlin | 04296b7 | 2011-07-01 18:55:31 +0200 | [diff] [blame] | 336 | mqrq_prev->sg = mmc_alloc_sg(1, &ret); |
| 337 | if (ret) |
| 338 | goto cleanup_queue; |
| 339 | |
| 340 | mqrq_prev->bounce_sg = |
| 341 | mmc_alloc_sg(bouncesz / 512, &ret); |
| 342 | if (ret) |
| 343 | goto cleanup_queue; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 344 | } |
| 345 | } |
| 346 | #endif |
| 347 | |
Per Forlin | 04296b7 | 2011-07-01 18:55:31 +0200 | [diff] [blame] | 348 | if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) { |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 349 | blk_queue_bounce_limit(mq->queue, limit); |
Martin K. Petersen | 086fa5f | 2010-02-26 00:20:38 -0500 | [diff] [blame] | 350 | blk_queue_max_hw_sectors(mq->queue, |
Pierre Ossman | f3eb0aa | 2008-08-16 21:34:02 +0200 | [diff] [blame] | 351 | min(host->max_blk_count, host->max_req_size / 512)); |
Martin K. Petersen | a36274e | 2010-09-10 01:33:59 -0400 | [diff] [blame] | 352 | blk_queue_max_segments(mq->queue, host->max_segs); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 353 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
| 354 | |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 355 | mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); |
| 356 | if (ret) |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 357 | goto cleanup_queue; |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 358 | |
Per Forlin | 04296b7 | 2011-07-01 18:55:31 +0200 | [diff] [blame] | 359 | |
| 360 | mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); |
| 361 | if (ret) |
| 362 | goto cleanup_queue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | } |
| 364 | |
Thomas Gleixner | 632cf92 | 2010-09-14 07:12:35 -0400 | [diff] [blame] | 365 | sema_init(&mq->thread_sem, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | |
Adrian Hunter | d09408a | 2011-06-23 13:40:28 +0300 | [diff] [blame] | 367 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", |
| 368 | host->index, subname ? subname : ""); |
Ethan Du | de528fa | 2010-09-30 18:40:27 -0400 | [diff] [blame] | 369 | |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 370 | if (IS_ERR(mq->thread)) { |
| 371 | ret = PTR_ERR(mq->thread); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 372 | goto free_bounce_sg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | } |
| 374 | |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 375 | return 0; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 376 | free_bounce_sg: |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 377 | kfree(mqrq_cur->bounce_sg); |
| 378 | mqrq_cur->bounce_sg = NULL; |
Per Forlin | 04296b7 | 2011-07-01 18:55:31 +0200 | [diff] [blame] | 379 | kfree(mqrq_prev->bounce_sg); |
| 380 | mqrq_prev->bounce_sg = NULL; |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 381 | |
Pierre Ossman | aafabfa | 2007-08-09 14:28:02 +0200 | [diff] [blame] | 382 | cleanup_queue: |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 383 | kfree(mqrq_cur->sg); |
| 384 | mqrq_cur->sg = NULL; |
| 385 | kfree(mqrq_cur->bounce_buf); |
| 386 | mqrq_cur->bounce_buf = NULL; |
| 387 | |
Per Forlin | 04296b7 | 2011-07-01 18:55:31 +0200 | [diff] [blame] | 388 | kfree(mqrq_prev->sg); |
| 389 | mqrq_prev->sg = NULL; |
| 390 | kfree(mqrq_prev->bounce_buf); |
| 391 | mqrq_prev->bounce_buf = NULL; |
| 392 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | blk_cleanup_queue(mq->queue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | return ret; |
| 395 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | |
| 397 | void mmc_cleanup_queue(struct mmc_queue *mq) |
| 398 | { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 399 | struct request_queue *q = mq->queue; |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 400 | unsigned long flags; |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 401 | struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; |
Per Forlin | 04296b7 | 2011-07-01 18:55:31 +0200 | [diff] [blame] | 402 | struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 403 | |
Pierre Ossman | d2b46f6 | 2007-04-28 16:52:12 +0200 | [diff] [blame] | 404 | /* Make sure the queue isn't suspended, as that will deadlock */ |
| 405 | mmc_queue_resume(mq); |
| 406 | |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 407 | /* Then terminate our worker thread */ |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 408 | kthread_stop(mq->thread); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | |
Adrian Hunter | 5fa83ce | 2010-01-08 14:43:00 -0800 | [diff] [blame] | 410 | /* Empty the queue */ |
| 411 | spin_lock_irqsave(q->queue_lock, flags); |
| 412 | q->queuedata = NULL; |
| 413 | blk_start_queue(q); |
| 414 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 415 | |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 416 | kfree(mqrq_cur->bounce_sg); |
| 417 | mqrq_cur->bounce_sg = NULL; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 418 | |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 419 | kfree(mqrq_cur->sg); |
| 420 | mqrq_cur->sg = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 422 | kfree(mqrq_cur->bounce_buf); |
| 423 | mqrq_cur->bounce_buf = NULL; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 424 | |
Per Forlin | 04296b7 | 2011-07-01 18:55:31 +0200 | [diff] [blame] | 425 | kfree(mqrq_prev->bounce_sg); |
| 426 | mqrq_prev->bounce_sg = NULL; |
| 427 | |
| 428 | kfree(mqrq_prev->sg); |
| 429 | mqrq_prev->sg = NULL; |
| 430 | |
| 431 | kfree(mqrq_prev->bounce_buf); |
| 432 | mqrq_prev->bounce_buf = NULL; |
| 433 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | mq->card = NULL; |
| 435 | } |
| 436 | EXPORT_SYMBOL(mmc_cleanup_queue); |
| 437 | |
| 438 | /** |
| 439 | * mmc_queue_suspend - suspend a MMC request queue |
| 440 | * @mq: MMC queue to suspend |
| 441 | * |
| 442 | * Stop the block request queue, and wait for our thread to |
| 443 | * complete any outstanding requests. This ensures that we |
| 444 | * won't suspend while a request is being processed. |
| 445 | */ |
Subhash Jadavani | d1c87f1 | 2013-02-26 17:32:58 +0530 | [diff] [blame] | 446 | int mmc_queue_suspend(struct mmc_queue *mq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 448 | struct request_queue *q = mq->queue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | unsigned long flags; |
Subhash Jadavani | d1c87f1 | 2013-02-26 17:32:58 +0530 | [diff] [blame] | 450 | int rc = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | |
| 452 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { |
| 453 | mq->flags |= MMC_QUEUE_SUSPENDED; |
| 454 | |
| 455 | spin_lock_irqsave(q->queue_lock, flags); |
| 456 | blk_stop_queue(q); |
| 457 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 458 | |
Subhash Jadavani | d1c87f1 | 2013-02-26 17:32:58 +0530 | [diff] [blame] | 459 | rc = down_trylock(&mq->thread_sem); |
| 460 | if (rc) { |
| 461 | /* |
| 462 | * Failed to take the lock so better to abort the |
| 463 | * suspend because mmcqd thread is processing requests. |
| 464 | */ |
| 465 | mq->flags &= ~MMC_QUEUE_SUSPENDED; |
| 466 | spin_lock_irqsave(q->queue_lock, flags); |
| 467 | blk_start_queue(q); |
| 468 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 469 | rc = -EBUSY; |
| 470 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | } |
Subhash Jadavani | d1c87f1 | 2013-02-26 17:32:58 +0530 | [diff] [blame] | 472 | return rc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | |
| 475 | /** |
| 476 | * mmc_queue_resume - resume a previously suspended MMC request queue |
| 477 | * @mq: MMC queue to resume |
| 478 | */ |
| 479 | void mmc_queue_resume(struct mmc_queue *mq) |
| 480 | { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 481 | struct request_queue *q = mq->queue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | unsigned long flags; |
| 483 | |
| 484 | if (mq->flags & MMC_QUEUE_SUSPENDED) { |
| 485 | mq->flags &= ~MMC_QUEUE_SUSPENDED; |
| 486 | |
| 487 | up(&mq->thread_sem); |
| 488 | |
| 489 | spin_lock_irqsave(q->queue_lock, flags); |
| 490 | blk_start_queue(q); |
| 491 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 492 | } |
| 493 | } |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 494 | |
Seungwon Jeon | 53f8f57 | 2012-09-27 15:00:26 +0200 | [diff] [blame] | 495 | static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, |
| 496 | struct mmc_queue_req *mqrq, |
| 497 | struct scatterlist *sg) |
| 498 | { |
| 499 | struct scatterlist *__sg; |
| 500 | unsigned int sg_len = 0; |
| 501 | struct request *req; |
| 502 | enum mmc_packed_cmd cmd; |
| 503 | |
| 504 | cmd = mqrq->packed_cmd; |
| 505 | |
| 506 | if (cmd == MMC_PACKED_WRITE) { |
| 507 | __sg = sg; |
| 508 | sg_set_buf(__sg, mqrq->packed_cmd_hdr, |
| 509 | sizeof(mqrq->packed_cmd_hdr)); |
| 510 | sg_len++; |
| 511 | __sg->page_link &= ~0x02; |
| 512 | } |
| 513 | |
| 514 | __sg = sg + sg_len; |
| 515 | list_for_each_entry(req, &mqrq->packed_list, queuelist) { |
| 516 | sg_len += blk_rq_map_sg(mq->queue, req, __sg); |
| 517 | __sg = sg + (sg_len - 1); |
| 518 | (__sg++)->page_link &= ~0x02; |
| 519 | } |
| 520 | sg_mark_end(sg + (sg_len - 1)); |
| 521 | return sg_len; |
| 522 | } |
| 523 | |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 524 | /* |
| 525 | * Prepare the sg list(s) to be handed of to the host driver |
| 526 | */ |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 527 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 528 | { |
| 529 | unsigned int sg_len; |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 530 | size_t buflen; |
| 531 | struct scatterlist *sg; |
| 532 | int i; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 533 | |
Seungwon Jeon | 53f8f57 | 2012-09-27 15:00:26 +0200 | [diff] [blame] | 534 | if (!mqrq->bounce_buf) { |
| 535 | if (!list_empty(&mqrq->packed_list)) |
| 536 | return mmc_queue_packed_map_sg(mq, mqrq, mqrq->sg); |
| 537 | else |
| 538 | return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); |
| 539 | } |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 540 | |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 541 | BUG_ON(!mqrq->bounce_sg); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 542 | |
Seungwon Jeon | 53f8f57 | 2012-09-27 15:00:26 +0200 | [diff] [blame] | 543 | if (!list_empty(&mqrq->packed_list)) |
| 544 | sg_len = mmc_queue_packed_map_sg(mq, mqrq, mqrq->bounce_sg); |
| 545 | else |
| 546 | sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 547 | |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 548 | mqrq->bounce_sg_len = sg_len; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 549 | |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 550 | buflen = 0; |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 551 | for_each_sg(mqrq->bounce_sg, sg, sg_len, i) |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 552 | buflen += sg->length; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 553 | |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 554 | sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 555 | |
| 556 | return 1; |
| 557 | } |
| 558 | |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 559 | /* |
| 560 | * If writing, bounce the data to the buffer before the request |
| 561 | * is sent to the host driver |
| 562 | */ |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 563 | void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 564 | { |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 565 | if (!mqrq->bounce_buf) |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 566 | return; |
| 567 | |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 568 | if (rq_data_dir(mqrq->req) != WRITE) |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 569 | return; |
| 570 | |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 571 | sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
| 572 | mqrq->bounce_buf, mqrq->sg[0].length); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 573 | } |
| 574 | |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 575 | /* |
| 576 | * If reading, bounce the data from the buffer after the request |
| 577 | * has been handled by the host driver |
| 578 | */ |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 579 | void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 580 | { |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 581 | if (!mqrq->bounce_buf) |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 582 | return; |
| 583 | |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 584 | if (rq_data_dir(mqrq->req) != READ) |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 585 | return; |
| 586 | |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 587 | sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, |
| 588 | mqrq->bounce_buf, mqrq->sg[0].length); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 589 | } |