blob: e360a979857d297e1468a7c9aa41b86a975a2f82 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Pierre Ossman70f10482007-07-11 20:04:50 +02002 * linux/drivers/mmc/card/queue.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
Pierre Ossman98ac2162006-12-23 20:03:02 +01005 * Copyright 2006-2007 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
14#include <linux/blkdev.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070015#include <linux/freezer.h>
Christoph Hellwig87598a22006-11-13 20:23:52 +010016#include <linux/kthread.h>
Jens Axboe45711f12007-10-22 21:19:53 +020017#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
Pierre Ossman98ac2162006-12-23 20:03:02 +010021#include "queue.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Pierre Ossman98ccf142007-05-12 00:26:16 +020023#define MMC_QUEUE_BOUNCESZ 65536
24
Christoph Hellwig87598a22006-11-13 20:23:52 +010025#define MMC_QUEUE_SUSPENDED (1 << 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
27/*
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020028 * Prepare a MMC request. This just filters out odd stuff.
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 */
30static int mmc_prep_request(struct request_queue *q, struct request *req)
31{
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +053032 struct mmc_queue *mq = q->queuedata;
33
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020034 /*
Adrian Hunterbd788c92010-08-11 14:17:47 -070035 * We only like normal block requests and discards.
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020036 */
Adrian Hunterbd788c92010-08-11 14:17:47 -070037 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 blk_dump_rq_flags(req, "MMC bad request");
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020039 return BLKPREP_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 }
41
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +053042 if (mq && mmc_card_removed(mq->card))
43 return BLKPREP_KILL;
44
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020045 req->cmd_flags |= REQ_DONTPREP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020047 return BLKPREP_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048}
49
50static int mmc_queue_thread(void *d)
51{
52 struct mmc_queue *mq = d;
53 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Rafael J. Wysocki83144182007-07-17 04:03:35 -070055 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 do {
59 struct request *req = NULL;
Per Forlinee8a43a2011-07-01 18:55:33 +020060 struct mmc_queue_req *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
62 spin_lock_irq(q->queue_lock);
63 set_current_state(TASK_INTERRUPTIBLE);
Jens Axboe7eaceac2011-03-10 08:52:07 +010064 req = blk_fetch_request(q);
Per Forlin97868a22011-07-09 17:12:36 -040065 mq->mqrq_cur->req = req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 spin_unlock_irq(q->queue_lock);
67
Per Forlinee8a43a2011-07-01 18:55:33 +020068 if (req || mq->mqrq_prev->req) {
69 set_current_state(TASK_RUNNING);
70 mq->issue_fn(mq, req);
71 } else {
Vitaly Wool7b30d282006-12-07 20:08:02 +010072 if (kthread_should_stop()) {
73 set_current_state(TASK_RUNNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 break;
Vitaly Wool7b30d282006-12-07 20:08:02 +010075 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 up(&mq->thread_sem);
77 schedule();
78 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
Per Forlinee8a43a2011-07-01 18:55:33 +020081 /* Current request becomes previous request and vice versa. */
82 mq->mqrq_prev->brq.mrq.data = NULL;
83 mq->mqrq_prev->req = NULL;
84 tmp = mq->mqrq_prev;
85 mq->mqrq_prev = mq->mqrq_cur;
86 mq->mqrq_cur = tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 } while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 up(&mq->thread_sem);
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 return 0;
91}
92
93/*
94 * Generic MMC request handler. This is called for any queue on a
95 * particular host. When the host is not busy, we look for a request
96 * on any queue on this host, and attempt to issue it. This may
97 * not be the queue we were asked to process.
98 */
Venkatraman S1b50f5f2012-04-13 17:54:11 +053099static void mmc_request_fn(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100{
101 struct mmc_queue *mq = q->queuedata;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100102 struct request *req;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100103
104 if (!mq) {
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800105 while ((req = blk_fetch_request(q)) != NULL) {
106 req->cmd_flags |= REQ_QUIET;
Tejun Heo296b2f62009-05-08 11:54:15 +0900107 __blk_end_request_all(req, -EIO);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800108 }
Pierre Ossman89b4e132006-11-14 22:08:16 +0100109 return;
110 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Per Forlinee8a43a2011-07-01 18:55:33 +0200112 if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
Christoph Hellwig87598a22006-11-13 20:23:52 +0100113 wake_up_process(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114}
115
Venkatraman S7513cd72011-08-23 21:16:02 +0530116static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
Per Forlin97868a22011-07-09 17:12:36 -0400117{
118 struct scatterlist *sg;
119
120 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
121 if (!sg)
122 *err = -ENOMEM;
123 else {
124 *err = 0;
125 sg_init_table(sg, sg_len);
126 }
127
128 return sg;
129}
130
Adrian Huntere056a1b2011-06-28 17:16:02 +0300131static void mmc_queue_setup_discard(struct request_queue *q,
132 struct mmc_card *card)
133{
134 unsigned max_discard;
135
136 max_discard = mmc_calc_max_discard(card);
137 if (!max_discard)
138 return;
139
140 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
141 q->limits.max_discard_sectors = max_discard;
Adrian Hunter7194efb2012-04-05 14:45:47 +0300142 if (card->erased_byte == 0 && !mmc_can_discard(card))
Adrian Huntere056a1b2011-06-28 17:16:02 +0300143 q->limits.discard_zeroes_data = 1;
144 q->limits.discard_granularity = card->pref_erase << 9;
145 /* granularity must not be greater than max. discard */
146 if (card->pref_erase > max_discard)
147 q->limits.discard_granularity = 0;
Kyungmin Parkd9ddd622011-10-14 14:15:48 +0900148 if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
Adrian Huntere056a1b2011-06-28 17:16:02 +0300149 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
150}
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152/**
153 * mmc_init_queue - initialise a queue structure.
154 * @mq: mmc queue
155 * @card: mmc card to attach this queue
156 * @lock: queue lock
Adrian Hunterd09408a2011-06-23 13:40:28 +0300157 * @subname: partition subname
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 *
159 * Initialise a MMC card request queue.
160 */
Adrian Hunterd09408a2011-06-23 13:40:28 +0300161int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
162 spinlock_t *lock, const char *subname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163{
164 struct mmc_host *host = card->host;
165 u64 limit = BLK_BOUNCE_HIGH;
166 int ret;
Per Forlin97868a22011-07-09 17:12:36 -0400167 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
Per Forlin04296b72011-07-01 18:55:31 +0200168 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Greg Kroah-Hartmanfcaf71f2006-09-12 17:00:10 +0200170 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
171 limit = *mmc_dev(host)->dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
173 mq->card = card;
Venkatraman S1b50f5f2012-04-13 17:54:11 +0530174 mq->queue = blk_init_queue(mmc_request_fn, lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 if (!mq->queue)
176 return -ENOMEM;
177
Per Forlin97868a22011-07-09 17:12:36 -0400178 mq->mqrq_cur = mqrq_cur;
Per Forlin04296b72011-07-01 18:55:31 +0200179 mq->mqrq_prev = mqrq_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 mq->queue->queuedata = mq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
Pierre Ossman98ccf142007-05-12 00:26:16 +0200182 blk_queue_prep_rq(mq->queue, mmc_prep_request);
Pierre Ossman8dddfe12008-10-14 20:04:46 +0200183 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
Adrian Huntere056a1b2011-06-28 17:16:02 +0300184 if (mmc_can_erase(card))
185 mmc_queue_setup_discard(mq->queue, card);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200186
187#ifdef CONFIG_MMC_BLOCK_BOUNCE
Martin K. Petersena36274e2010-09-10 01:33:59 -0400188 if (host->max_segs == 1) {
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200189 unsigned int bouncesz;
190
Pierre Ossman98ccf142007-05-12 00:26:16 +0200191 bouncesz = MMC_QUEUE_BOUNCESZ;
192
193 if (bouncesz > host->max_req_size)
194 bouncesz = host->max_req_size;
195 if (bouncesz > host->max_seg_size)
196 bouncesz = host->max_seg_size;
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200197 if (bouncesz > (host->max_blk_count * 512))
198 bouncesz = host->max_blk_count * 512;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200199
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200200 if (bouncesz > 512) {
Per Forlin97868a22011-07-09 17:12:36 -0400201 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
202 if (!mqrq_cur->bounce_buf) {
Girish K Sa3c76eb2011-10-11 11:44:09 +0530203 pr_warning("%s: unable to "
Per Forlin97868a22011-07-09 17:12:36 -0400204 "allocate bounce cur buffer\n",
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200205 mmc_card_name(card));
206 }
Per Forlin04296b72011-07-01 18:55:31 +0200207 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
208 if (!mqrq_prev->bounce_buf) {
Girish K Sa3c76eb2011-10-11 11:44:09 +0530209 pr_warning("%s: unable to "
Per Forlin04296b72011-07-01 18:55:31 +0200210 "allocate bounce prev buffer\n",
211 mmc_card_name(card));
212 kfree(mqrq_cur->bounce_buf);
213 mqrq_cur->bounce_buf = NULL;
214 }
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200215 }
216
Per Forlin04296b72011-07-01 18:55:31 +0200217 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200218 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500219 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500220 blk_queue_max_segments(mq->queue, bouncesz / 512);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200221 blk_queue_max_segment_size(mq->queue, bouncesz);
222
Per Forlin97868a22011-07-09 17:12:36 -0400223 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
224 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200225 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200226
Per Forlin97868a22011-07-09 17:12:36 -0400227 mqrq_cur->bounce_sg =
228 mmc_alloc_sg(bouncesz / 512, &ret);
229 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200230 goto cleanup_queue;
Per Forlin97868a22011-07-09 17:12:36 -0400231
Per Forlin04296b72011-07-01 18:55:31 +0200232 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
233 if (ret)
234 goto cleanup_queue;
235
236 mqrq_prev->bounce_sg =
237 mmc_alloc_sg(bouncesz / 512, &ret);
238 if (ret)
239 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200240 }
241 }
242#endif
243
Per Forlin04296b72011-07-01 18:55:31 +0200244 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
Pierre Ossman98ccf142007-05-12 00:26:16 +0200245 blk_queue_bounce_limit(mq->queue, limit);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500246 blk_queue_max_hw_sectors(mq->queue,
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200247 min(host->max_blk_count, host->max_req_size / 512));
Martin K. Petersena36274e2010-09-10 01:33:59 -0400248 blk_queue_max_segments(mq->queue, host->max_segs);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200249 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
250
Per Forlin97868a22011-07-09 17:12:36 -0400251 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
252 if (ret)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200253 goto cleanup_queue;
Per Forlin97868a22011-07-09 17:12:36 -0400254
Per Forlin04296b72011-07-01 18:55:31 +0200255
256 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
257 if (ret)
258 goto cleanup_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 }
260
Thomas Gleixner632cf922010-09-14 07:12:35 -0400261 sema_init(&mq->thread_sem, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
Adrian Hunterd09408a2011-06-23 13:40:28 +0300263 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
264 host->index, subname ? subname : "");
Ethan Dude528fa2010-09-30 18:40:27 -0400265
Christoph Hellwig87598a22006-11-13 20:23:52 +0100266 if (IS_ERR(mq->thread)) {
267 ret = PTR_ERR(mq->thread);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200268 goto free_bounce_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 }
270
Christoph Hellwig87598a22006-11-13 20:23:52 +0100271 return 0;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200272 free_bounce_sg:
Per Forlin97868a22011-07-09 17:12:36 -0400273 kfree(mqrq_cur->bounce_sg);
274 mqrq_cur->bounce_sg = NULL;
Per Forlin04296b72011-07-01 18:55:31 +0200275 kfree(mqrq_prev->bounce_sg);
276 mqrq_prev->bounce_sg = NULL;
Per Forlin97868a22011-07-09 17:12:36 -0400277
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200278 cleanup_queue:
Per Forlin97868a22011-07-09 17:12:36 -0400279 kfree(mqrq_cur->sg);
280 mqrq_cur->sg = NULL;
281 kfree(mqrq_cur->bounce_buf);
282 mqrq_cur->bounce_buf = NULL;
283
Per Forlin04296b72011-07-01 18:55:31 +0200284 kfree(mqrq_prev->sg);
285 mqrq_prev->sg = NULL;
286 kfree(mqrq_prev->bounce_buf);
287 mqrq_prev->bounce_buf = NULL;
288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 blk_cleanup_queue(mq->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 return ret;
291}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
293void mmc_cleanup_queue(struct mmc_queue *mq)
294{
Jens Axboe165125e2007-07-24 09:28:11 +0200295 struct request_queue *q = mq->queue;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100296 unsigned long flags;
Per Forlin97868a22011-07-09 17:12:36 -0400297 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
Per Forlin04296b72011-07-01 18:55:31 +0200298 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100299
Pierre Ossmand2b46f62007-04-28 16:52:12 +0200300 /* Make sure the queue isn't suspended, as that will deadlock */
301 mmc_queue_resume(mq);
302
Pierre Ossman89b4e132006-11-14 22:08:16 +0100303 /* Then terminate our worker thread */
Christoph Hellwig87598a22006-11-13 20:23:52 +0100304 kthread_stop(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800306 /* Empty the queue */
307 spin_lock_irqsave(q->queue_lock, flags);
308 q->queuedata = NULL;
309 blk_start_queue(q);
310 spin_unlock_irqrestore(q->queue_lock, flags);
311
Per Forlin97868a22011-07-09 17:12:36 -0400312 kfree(mqrq_cur->bounce_sg);
313 mqrq_cur->bounce_sg = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200314
Per Forlin97868a22011-07-09 17:12:36 -0400315 kfree(mqrq_cur->sg);
316 mqrq_cur->sg = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
Per Forlin97868a22011-07-09 17:12:36 -0400318 kfree(mqrq_cur->bounce_buf);
319 mqrq_cur->bounce_buf = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200320
Per Forlin04296b72011-07-01 18:55:31 +0200321 kfree(mqrq_prev->bounce_sg);
322 mqrq_prev->bounce_sg = NULL;
323
324 kfree(mqrq_prev->sg);
325 mqrq_prev->sg = NULL;
326
327 kfree(mqrq_prev->bounce_buf);
328 mqrq_prev->bounce_buf = NULL;
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 mq->card = NULL;
331}
332EXPORT_SYMBOL(mmc_cleanup_queue);
333
334/**
335 * mmc_queue_suspend - suspend a MMC request queue
336 * @mq: MMC queue to suspend
337 *
338 * Stop the block request queue, and wait for our thread to
339 * complete any outstanding requests. This ensures that we
340 * won't suspend while a request is being processed.
341 */
342void mmc_queue_suspend(struct mmc_queue *mq)
343{
Jens Axboe165125e2007-07-24 09:28:11 +0200344 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 unsigned long flags;
346
347 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
348 mq->flags |= MMC_QUEUE_SUSPENDED;
349
350 spin_lock_irqsave(q->queue_lock, flags);
351 blk_stop_queue(q);
352 spin_unlock_irqrestore(q->queue_lock, flags);
353
354 down(&mq->thread_sem);
355 }
356}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
358/**
359 * mmc_queue_resume - resume a previously suspended MMC request queue
360 * @mq: MMC queue to resume
361 */
362void mmc_queue_resume(struct mmc_queue *mq)
363{
Jens Axboe165125e2007-07-24 09:28:11 +0200364 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 unsigned long flags;
366
367 if (mq->flags & MMC_QUEUE_SUSPENDED) {
368 mq->flags &= ~MMC_QUEUE_SUSPENDED;
369
370 up(&mq->thread_sem);
371
372 spin_lock_irqsave(q->queue_lock, flags);
373 blk_start_queue(q);
374 spin_unlock_irqrestore(q->queue_lock, flags);
375 }
376}
Pierre Ossman98ac2162006-12-23 20:03:02 +0100377
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200378/*
379 * Prepare the sg list(s) to be handed of to the host driver
380 */
Per Forlin97868a22011-07-09 17:12:36 -0400381unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200382{
383 unsigned int sg_len;
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200384 size_t buflen;
385 struct scatterlist *sg;
386 int i;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200387
Per Forlin97868a22011-07-09 17:12:36 -0400388 if (!mqrq->bounce_buf)
389 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200390
Per Forlin97868a22011-07-09 17:12:36 -0400391 BUG_ON(!mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200392
Per Forlin97868a22011-07-09 17:12:36 -0400393 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200394
Per Forlin97868a22011-07-09 17:12:36 -0400395 mqrq->bounce_sg_len = sg_len;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200396
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200397 buflen = 0;
Per Forlin97868a22011-07-09 17:12:36 -0400398 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200399 buflen += sg->length;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200400
Per Forlin97868a22011-07-09 17:12:36 -0400401 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200402
403 return 1;
404}
405
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200406/*
407 * If writing, bounce the data to the buffer before the request
408 * is sent to the host driver
409 */
Per Forlin97868a22011-07-09 17:12:36 -0400410void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200411{
Per Forlin97868a22011-07-09 17:12:36 -0400412 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200413 return;
414
Per Forlin97868a22011-07-09 17:12:36 -0400415 if (rq_data_dir(mqrq->req) != WRITE)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200416 return;
417
Per Forlin97868a22011-07-09 17:12:36 -0400418 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
419 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200420}
421
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200422/*
423 * If reading, bounce the data from the buffer after the request
424 * has been handled by the host driver
425 */
Per Forlin97868a22011-07-09 17:12:36 -0400426void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200427{
Per Forlin97868a22011-07-09 17:12:36 -0400428 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200429 return;
430
Per Forlin97868a22011-07-09 17:12:36 -0400431 if (rq_data_dir(mqrq->req) != READ)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200432 return;
433
Per Forlin97868a22011-07-09 17:12:36 -0400434 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
435 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200436}