blob: 9f7d3f4935fc7e86cc278d06a35cefb7e3c4942e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Pierre Ossman70f10482007-07-11 20:04:50 +02002 * linux/drivers/mmc/card/queue.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
Pierre Ossman98ac2162006-12-23 20:03:02 +01005 * Copyright 2006-2007 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
14#include <linux/blkdev.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070015#include <linux/freezer.h>
Christoph Hellwig87598a22006-11-13 20:23:52 +010016#include <linux/kthread.h>
Jens Axboe45711f12007-10-22 21:19:53 +020017#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
Pierre Ossman98ac2162006-12-23 20:03:02 +010021#include "queue.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Pierre Ossman98ccf142007-05-12 00:26:16 +020023#define MMC_QUEUE_BOUNCESZ 65536
24
Christoph Hellwig87598a22006-11-13 20:23:52 +010025#define MMC_QUEUE_SUSPENDED (1 << 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
27/*
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020028 * Prepare a MMC request. This just filters out odd stuff.
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 */
30static int mmc_prep_request(struct request_queue *q, struct request *req)
31{
Sujit Reddy Thummacfefa142011-12-08 14:05:50 +053032 struct mmc_queue *mq = q->queuedata;
33
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020034 /*
Adrian Hunterbd788c92010-08-11 14:17:47 -070035 * We only like normal block requests and discards.
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020036 */
Adrian Hunterbd788c92010-08-11 14:17:47 -070037 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 blk_dump_rq_flags(req, "MMC bad request");
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020039 return BLKPREP_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 }
41
Sujit Reddy Thummacfefa142011-12-08 14:05:50 +053042 if (mq && mmc_card_removed(mq->card))
43 return BLKPREP_KILL;
44
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020045 req->cmd_flags |= REQ_DONTPREP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020047 return BLKPREP_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048}
49
50static int mmc_queue_thread(void *d)
51{
52 struct mmc_queue *mq = d;
53 struct request_queue *q = mq->queue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054 struct request *req;
55
Rafael J. Wysocki83144182007-07-17 04:03:35 -070056 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 do {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060 req = NULL; /* Must be set to NULL at each iteration */
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
62 spin_lock_irq(q->queue_lock);
63 set_current_state(TASK_INTERRUPTIBLE);
Jens Axboe7eaceac2011-03-10 08:52:07 +010064 req = blk_fetch_request(q);
Per Forlincb86e7b2011-07-09 17:12:36 -040065 mq->mqrq_cur->req = req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 spin_unlock_irq(q->queue_lock);
67
68 if (!req) {
Vitaly Wool7b30d282006-12-07 20:08:02 +010069 if (kthread_should_stop()) {
70 set_current_state(TASK_RUNNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 break;
Vitaly Wool7b30d282006-12-07 20:08:02 +010072 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 up(&mq->thread_sem);
74 schedule();
75 down(&mq->thread_sem);
76 continue;
77 }
78 set_current_state(TASK_RUNNING);
Per Forlincb86e7b2011-07-09 17:12:36 -040079 mq->issue_fn(mq, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 } while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 up(&mq->thread_sem);
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 return 0;
84}
85
86/*
87 * Generic MMC request handler. This is called for any queue on a
88 * particular host. When the host is not busy, we look for a request
89 * on any queue on this host, and attempt to issue it. This may
90 * not be the queue we were asked to process.
91 */
Jens Axboe165125e2007-07-24 09:28:11 +020092static void mmc_request(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
94 struct mmc_queue *mq = q->queuedata;
Pierre Ossman89b4e132006-11-14 22:08:16 +010095 struct request *req;
Pierre Ossman89b4e132006-11-14 22:08:16 +010096
97 if (!mq) {
Adrian Hunter5fa83ce2010-01-08 14:43:00 -080098 while ((req = blk_fetch_request(q)) != NULL) {
99 req->cmd_flags |= REQ_QUIET;
Tejun Heo296b2f62009-05-08 11:54:15 +0900100 __blk_end_request_all(req, -EIO);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800101 }
Pierre Ossman89b4e132006-11-14 22:08:16 +0100102 return;
103 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
Per Forlincb86e7b2011-07-09 17:12:36 -0400105 if (!mq->mqrq_cur->req)
Christoph Hellwig87598a22006-11-13 20:23:52 +0100106 wake_up_process(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107}
108
Per Forlincb86e7b2011-07-09 17:12:36 -0400109struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
110{
111 struct scatterlist *sg;
112
113 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
114 if (!sg)
115 *err = -ENOMEM;
116 else {
117 *err = 0;
118 sg_init_table(sg, sg_len);
119 }
120
121 return sg;
122}
123
Adrian Hunter81306ad2011-06-28 17:16:02 +0300124static void mmc_queue_setup_discard(struct request_queue *q,
125 struct mmc_card *card)
126{
127 unsigned max_discard;
128
129 max_discard = mmc_calc_max_discard(card);
130 if (!max_discard)
131 return;
132
133 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
134 q->limits.max_discard_sectors = max_discard;
135 if (card->erased_byte == 0)
136 q->limits.discard_zeroes_data = 1;
137 q->limits.discard_granularity = card->pref_erase << 9;
138 /* granularity must not be greater than max. discard */
139 if (card->pref_erase > max_discard)
140 q->limits.discard_granularity = 0;
141 if (mmc_can_secure_erase_trim(card))
142 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
143}
144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145/**
146 * mmc_init_queue - initialise a queue structure.
147 * @mq: mmc queue
148 * @card: mmc card to attach this queue
149 * @lock: queue lock
Adrian Hunterd09408a2011-06-23 13:40:28 +0300150 * @subname: partition subname
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 *
152 * Initialise a MMC card request queue.
153 */
Adrian Hunterd09408a2011-06-23 13:40:28 +0300154int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
155 spinlock_t *lock, const char *subname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
157 struct mmc_host *host = card->host;
158 u64 limit = BLK_BOUNCE_HIGH;
159 int ret;
Per Forlincb86e7b2011-07-09 17:12:36 -0400160 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Greg Kroah-Hartmanfcaf71f2006-09-12 17:00:10 +0200162 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
163 limit = *mmc_dev(host)->dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165 mq->card = card;
166 mq->queue = blk_init_queue(mmc_request, lock);
167 if (!mq->queue)
168 return -ENOMEM;
169
Per Forlincb86e7b2011-07-09 17:12:36 -0400170 memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
171 mq->mqrq_cur = mqrq_cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 mq->queue->queuedata = mq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Pierre Ossman98ccf142007-05-12 00:26:16 +0200174 blk_queue_prep_rq(mq->queue, mmc_prep_request);
Pierre Ossman8dddfe12008-10-14 20:04:46 +0200175 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
Adrian Hunter81306ad2011-06-28 17:16:02 +0300176 if (mmc_can_erase(card))
177 mmc_queue_setup_discard(mq->queue, card);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200178
179#ifdef CONFIG_MMC_BLOCK_BOUNCE
Martin K. Petersena36274e2010-09-10 01:33:59 -0400180 if (host->max_segs == 1) {
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200181 unsigned int bouncesz;
182
Pierre Ossman98ccf142007-05-12 00:26:16 +0200183 bouncesz = MMC_QUEUE_BOUNCESZ;
184
185 if (bouncesz > host->max_req_size)
186 bouncesz = host->max_req_size;
187 if (bouncesz > host->max_seg_size)
188 bouncesz = host->max_seg_size;
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200189 if (bouncesz > (host->max_blk_count * 512))
190 bouncesz = host->max_blk_count * 512;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200191
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200192 if (bouncesz > 512) {
Per Forlincb86e7b2011-07-09 17:12:36 -0400193 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
194 if (!mqrq_cur->bounce_buf) {
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200195 printk(KERN_WARNING "%s: unable to "
Per Forlincb86e7b2011-07-09 17:12:36 -0400196 "allocate bounce cur buffer\n",
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200197 mmc_card_name(card));
198 }
199 }
200
Per Forlincb86e7b2011-07-09 17:12:36 -0400201 if (mqrq_cur->bounce_buf) {
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200202 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500203 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500204 blk_queue_max_segments(mq->queue, bouncesz / 512);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200205 blk_queue_max_segment_size(mq->queue, bouncesz);
206
Per Forlincb86e7b2011-07-09 17:12:36 -0400207 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
208 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200209 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200210
Per Forlincb86e7b2011-07-09 17:12:36 -0400211 mqrq_cur->bounce_sg =
212 mmc_alloc_sg(bouncesz / 512, &ret);
213 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200214 goto cleanup_queue;
Per Forlincb86e7b2011-07-09 17:12:36 -0400215
Pierre Ossman98ccf142007-05-12 00:26:16 +0200216 }
217 }
218#endif
219
Per Forlincb86e7b2011-07-09 17:12:36 -0400220 if (!mqrq_cur->bounce_buf) {
Pierre Ossman98ccf142007-05-12 00:26:16 +0200221 blk_queue_bounce_limit(mq->queue, limit);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500222 blk_queue_max_hw_sectors(mq->queue,
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200223 min(host->max_blk_count, host->max_req_size / 512));
Martin K. Petersena36274e2010-09-10 01:33:59 -0400224 blk_queue_max_segments(mq->queue, host->max_segs);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200225 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
226
Per Forlincb86e7b2011-07-09 17:12:36 -0400227 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
228 if (ret)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200229 goto cleanup_queue;
Per Forlincb86e7b2011-07-09 17:12:36 -0400230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 }
232
Thomas Gleixner632cf922010-09-14 07:12:35 -0400233 sema_init(&mq->thread_sem, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
Adrian Hunterd09408a2011-06-23 13:40:28 +0300235 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
236 host->index, subname ? subname : "");
Ethan Dude528fa2010-09-30 18:40:27 -0400237
Christoph Hellwig87598a22006-11-13 20:23:52 +0100238 if (IS_ERR(mq->thread)) {
239 ret = PTR_ERR(mq->thread);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200240 goto free_bounce_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 }
242
Christoph Hellwig87598a22006-11-13 20:23:52 +0100243 return 0;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200244 free_bounce_sg:
Per Forlincb86e7b2011-07-09 17:12:36 -0400245 kfree(mqrq_cur->bounce_sg);
246 mqrq_cur->bounce_sg = NULL;
247
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200248 cleanup_queue:
Per Forlincb86e7b2011-07-09 17:12:36 -0400249 kfree(mqrq_cur->sg);
250 mqrq_cur->sg = NULL;
251 kfree(mqrq_cur->bounce_buf);
252 mqrq_cur->bounce_buf = NULL;
253
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 blk_cleanup_queue(mq->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 return ret;
256}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
258void mmc_cleanup_queue(struct mmc_queue *mq)
259{
Jens Axboe165125e2007-07-24 09:28:11 +0200260 struct request_queue *q = mq->queue;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100261 unsigned long flags;
Per Forlincb86e7b2011-07-09 17:12:36 -0400262 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100263
Pierre Ossmand2b46f62007-04-28 16:52:12 +0200264 /* Make sure the queue isn't suspended, as that will deadlock */
265 mmc_queue_resume(mq);
266
Pierre Ossman89b4e132006-11-14 22:08:16 +0100267 /* Then terminate our worker thread */
Christoph Hellwig87598a22006-11-13 20:23:52 +0100268 kthread_stop(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800270 /* Empty the queue */
271 spin_lock_irqsave(q->queue_lock, flags);
272 q->queuedata = NULL;
273 blk_start_queue(q);
274 spin_unlock_irqrestore(q->queue_lock, flags);
275
Per Forlincb86e7b2011-07-09 17:12:36 -0400276 kfree(mqrq_cur->bounce_sg);
277 mqrq_cur->bounce_sg = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200278
Per Forlincb86e7b2011-07-09 17:12:36 -0400279 kfree(mqrq_cur->sg);
280 mqrq_cur->sg = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
Per Forlincb86e7b2011-07-09 17:12:36 -0400282 kfree(mqrq_cur->bounce_buf);
283 mqrq_cur->bounce_buf = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 mq->card = NULL;
286}
287EXPORT_SYMBOL(mmc_cleanup_queue);
288
289/**
290 * mmc_queue_suspend - suspend a MMC request queue
291 * @mq: MMC queue to suspend
292 *
293 * Stop the block request queue, and wait for our thread to
294 * complete any outstanding requests. This ensures that we
295 * won't suspend while a request is being processed.
296 */
297void mmc_queue_suspend(struct mmc_queue *mq)
298{
Jens Axboe165125e2007-07-24 09:28:11 +0200299 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 unsigned long flags;
301
302 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
303 mq->flags |= MMC_QUEUE_SUSPENDED;
304
305 spin_lock_irqsave(q->queue_lock, flags);
306 blk_stop_queue(q);
307 spin_unlock_irqrestore(q->queue_lock, flags);
308
309 down(&mq->thread_sem);
310 }
311}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
313/**
314 * mmc_queue_resume - resume a previously suspended MMC request queue
315 * @mq: MMC queue to resume
316 */
317void mmc_queue_resume(struct mmc_queue *mq)
318{
Jens Axboe165125e2007-07-24 09:28:11 +0200319 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 unsigned long flags;
321
322 if (mq->flags & MMC_QUEUE_SUSPENDED) {
323 mq->flags &= ~MMC_QUEUE_SUSPENDED;
324
325 up(&mq->thread_sem);
326
327 spin_lock_irqsave(q->queue_lock, flags);
328 blk_start_queue(q);
329 spin_unlock_irqrestore(q->queue_lock, flags);
330 }
331}
Pierre Ossman98ac2162006-12-23 20:03:02 +0100332
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200333/*
334 * Prepare the sg list(s) to be handed of to the host driver
335 */
Per Forlincb86e7b2011-07-09 17:12:36 -0400336unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200337{
338 unsigned int sg_len;
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200339 size_t buflen;
340 struct scatterlist *sg;
341 int i;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200342
Per Forlincb86e7b2011-07-09 17:12:36 -0400343 if (!mqrq->bounce_buf)
344 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200345
Per Forlincb86e7b2011-07-09 17:12:36 -0400346 BUG_ON(!mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200347
Per Forlincb86e7b2011-07-09 17:12:36 -0400348 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200349
Per Forlincb86e7b2011-07-09 17:12:36 -0400350 mqrq->bounce_sg_len = sg_len;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200351
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200352 buflen = 0;
Per Forlincb86e7b2011-07-09 17:12:36 -0400353 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200354 buflen += sg->length;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200355
Per Forlincb86e7b2011-07-09 17:12:36 -0400356 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200357
358 return 1;
359}
360
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200361/*
362 * If writing, bounce the data to the buffer before the request
363 * is sent to the host driver
364 */
Per Forlincb86e7b2011-07-09 17:12:36 -0400365void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200366{
Per Forlincb86e7b2011-07-09 17:12:36 -0400367 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200368 return;
369
Per Forlincb86e7b2011-07-09 17:12:36 -0400370 if (rq_data_dir(mqrq->req) != WRITE)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200371 return;
372
Per Forlincb86e7b2011-07-09 17:12:36 -0400373 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
374 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200375}
376
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200377/*
378 * If reading, bounce the data from the buffer after the request
379 * has been handled by the host driver
380 */
Per Forlincb86e7b2011-07-09 17:12:36 -0400381void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200382{
Per Forlincb86e7b2011-07-09 17:12:36 -0400383 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200384 return;
385
Per Forlincb86e7b2011-07-09 17:12:36 -0400386 if (rq_data_dir(mqrq->req) != READ)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200387 return;
388
Per Forlincb86e7b2011-07-09 17:12:36 -0400389 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
390 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200391}