blob: ad40660c6ec8c97b9b689f4055188c5d32bdd7fc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Pierre Ossman70f10482007-07-11 20:04:50 +02002 * linux/drivers/mmc/card/queue.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
Pierre Ossman98ac2162006-12-23 20:03:02 +01005 * Copyright 2006-2007 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
14#include <linux/blkdev.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070015#include <linux/freezer.h>
Christoph Hellwig87598a22006-11-13 20:23:52 +010016#include <linux/kthread.h>
Jens Axboe45711f12007-10-22 21:19:53 +020017#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
Pierre Ossman98ac2162006-12-23 20:03:02 +010021#include "queue.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Pierre Ossman98ccf142007-05-12 00:26:16 +020023#define MMC_QUEUE_BOUNCESZ 65536
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26/*
Tatyana Brokhman0cc76402012-10-07 09:52:16 +020027 * Based on benchmark tests the default num of requests to trigger the write
28 * packing was determined, to keep the read latency as low as possible and
29 * manage to keep the high write throughput.
30 */
31#define DEFAULT_NUM_REQS_TO_START_PACK 17
32
33/*
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020034 * Prepare a MMC request. This just filters out odd stuff.
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 */
36static int mmc_prep_request(struct request_queue *q, struct request *req)
37{
Sujit Reddy Thummaa8ad82c2011-12-08 14:05:50 +053038 struct mmc_queue *mq = q->queuedata;
39
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020040 /*
Adrian Hunterbd788c92010-08-11 14:17:47 -070041 * We only like normal block requests and discards.
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020042 */
Adrian Hunterbd788c92010-08-11 14:17:47 -070043 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 blk_dump_rq_flags(req, "MMC bad request");
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020045 return BLKPREP_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 }
47
Sujit Reddy Thummaa8ad82c2011-12-08 14:05:50 +053048 if (mq && mmc_card_removed(mq->card))
49 return BLKPREP_KILL;
50
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020051 req->cmd_flags |= REQ_DONTPREP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020053 return BLKPREP_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054}
55
56static int mmc_queue_thread(void *d)
57{
58 struct mmc_queue *mq = d;
59 struct request_queue *q = mq->queue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060 struct request *req;
Maya Erez5f360692012-10-10 03:47:54 +020061 struct mmc_card *card = mq->card;
Lee Susman66842b02012-12-19 14:28:03 +020062 struct mmc_async_event_stats *stats;
63 struct mmc_queue_req *tmp;
64
65 if (!card)
66 return 0;
67
68 stats = &mq->card->async_event_stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Rafael J. Wysocki83144182007-07-17 04:03:35 -070070 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 do {
Lee Susman66842b02012-12-19 14:28:03 +020074
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070075 req = NULL; /* Must be set to NULL at each iteration */
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77 spin_lock_irq(q->queue_lock);
78 set_current_state(TASK_INTERRUPTIBLE);
Jens Axboe7eaceac2011-03-10 08:52:07 +010079 req = blk_fetch_request(q);
Per Forlin97868a22011-07-09 17:12:36 -040080 mq->mqrq_cur->req = req;
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +020081 if (!req && mq->mqrq_prev->req &&
82 !(mq->mqrq_prev->req->cmd_flags & REQ_SANITIZE) &&
83 !(mq->mqrq_prev->req->cmd_flags & REQ_FLUSH) &&
Lee Susman66842b02012-12-19 14:28:03 +020084 !(mq->mqrq_prev->req->cmd_flags & REQ_DISCARD)) {
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +020085 card->host->context_info.is_waiting_last_req = true;
Lee Susman66842b02012-12-19 14:28:03 +020086 if (stats && stats->enabled)
87 stats->null_fetched++;
88 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 spin_unlock_irq(q->queue_lock);
90
Per Forlinee8a43a2011-07-01 18:55:33 +020091 if (req || mq->mqrq_prev->req) {
92 set_current_state(TASK_RUNNING);
93 mq->issue_fn(mq, req);
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +020094 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
95 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
Lee Susman66842b02012-12-19 14:28:03 +020096 if (stats && stats->enabled)
97 stats->fetch_due_to_new_req++;
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +020098 continue; /* fetch again */
99 }
Seungwon Jeon09f21532012-09-28 19:12:53 +0900100
101 /*
102 * Current request becomes previous request
103 * and vice versa.
104 */
105 mq->mqrq_prev->brq.mrq.data = NULL;
106 mq->mqrq_prev->req = NULL;
107 tmp = mq->mqrq_prev;
108 mq->mqrq_prev = mq->mqrq_cur;
109 mq->mqrq_cur = tmp;
Per Forlinee8a43a2011-07-01 18:55:33 +0200110 } else {
Vitaly Wool7b30d282006-12-07 20:08:02 +0100111 if (kthread_should_stop()) {
112 set_current_state(TASK_RUNNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 break;
Vitaly Wool7b30d282006-12-07 20:08:02 +0100114 }
Maya Erez5f360692012-10-10 03:47:54 +0200115 mmc_start_delayed_bkops(card);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 up(&mq->thread_sem);
117 schedule();
118 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 } while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 up(&mq->thread_sem);
122
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 return 0;
124}
125
126/*
127 * Generic MMC request handler. This is called for any queue on a
128 * particular host. When the host is not busy, we look for a request
129 * on any queue on this host, and attempt to issue it. This may
130 * not be the queue we were asked to process.
131 */
Jens Axboe165125e2007-07-24 09:28:11 +0200132static void mmc_request(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133{
134 struct mmc_queue *mq = q->queuedata;
Lee Susman66842b02012-12-19 14:28:03 +0200135 struct mmc_async_event_stats *stats;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100136 struct request *req;
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +0200137 unsigned long flags;
138 struct mmc_context_info *cntx;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100139
140 if (!mq) {
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800141 while ((req = blk_fetch_request(q)) != NULL) {
142 req->cmd_flags |= REQ_QUIET;
Tejun Heo296b2f62009-05-08 11:54:15 +0900143 __blk_end_request_all(req, -EIO);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800144 }
Pierre Ossman89b4e132006-11-14 22:08:16 +0100145 return;
146 }
Lee Susman66842b02012-12-19 14:28:03 +0200147 if (mq->card) {
148 cntx = &mq->card->host->context_info;
149 stats = &mq->card->async_event_stats;
150 } else
151 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +0200153 cntx = &mq->card->host->context_info;
Lee Susman66842b02012-12-19 14:28:03 +0200154 stats = &mq->card->async_event_stats;
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +0200155 if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
156 /*
157 * New MMC request arrived when MMC thread may be
158 * blocked on the previous request to be complete
159 * with no current request fetched
160 */
Lee Susman66842b02012-12-19 14:28:03 +0200161
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +0200162 spin_lock_irqsave(&cntx->lock, flags);
163 if (cntx->is_waiting_last_req) {
Lee Susman66842b02012-12-19 14:28:03 +0200164 if (stats && stats->enabled)
165 stats->wakeup_new++;
166 if (cntx->is_new_req)
167 if (stats->enabled)
168 stats->new_req_when_new_marked++;
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +0200169 cntx->is_new_req = true;
170 wake_up_interruptible(&cntx->wait);
Lee Susman66842b02012-12-19 14:28:03 +0200171 } else if (stats->enabled)
172 stats->q_no_waiting++;
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +0200173 spin_unlock_irqrestore(&cntx->lock, flags);
Lee Susman66842b02012-12-19 14:28:03 +0200174 } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) {
Christoph Hellwig87598a22006-11-13 20:23:52 +0100175 wake_up_process(mq->thread);
Lee Susman66842b02012-12-19 14:28:03 +0200176 if (stats->enabled)
177 stats->wakeup_mq_thread++;
178 } else if (stats->enabled)
179 stats->no_mmc_request_action++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180}
181
Venkatraman S7513cd72011-08-23 21:16:02 +0530182static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
Per Forlin97868a22011-07-09 17:12:36 -0400183{
184 struct scatterlist *sg;
185
186 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
187 if (!sg)
188 *err = -ENOMEM;
189 else {
190 *err = 0;
191 sg_init_table(sg, sg_len);
192 }
193
194 return sg;
195}
196
Adrian Huntere056a1b2011-06-28 17:16:02 +0300197static void mmc_queue_setup_discard(struct request_queue *q,
198 struct mmc_card *card)
199{
200 unsigned max_discard;
201
202 max_discard = mmc_calc_max_discard(card);
203 if (!max_discard)
204 return;
205
206 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
207 q->limits.max_discard_sectors = max_discard;
Adrian Hunter7194efb2012-04-05 14:45:47 +0300208 if (card->erased_byte == 0 && !mmc_can_discard(card))
Adrian Huntere056a1b2011-06-28 17:16:02 +0300209 q->limits.discard_zeroes_data = 1;
210 q->limits.discard_granularity = card->pref_erase << 9;
211 /* granularity must not be greater than max. discard */
212 if (card->pref_erase > max_discard)
213 q->limits.discard_granularity = 0;
Maya Erez463bb952012-05-24 23:46:29 +0300214 if (mmc_can_secure_erase_trim(card))
Adrian Huntere056a1b2011-06-28 17:16:02 +0300215 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
216}
217
Maya Erez463bb952012-05-24 23:46:29 +0300218static void mmc_queue_setup_sanitize(struct request_queue *q)
219{
220 queue_flag_set_unlocked(QUEUE_FLAG_SANITIZE, q);
221}
222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223/**
224 * mmc_init_queue - initialise a queue structure.
225 * @mq: mmc queue
226 * @card: mmc card to attach this queue
227 * @lock: queue lock
Adrian Hunterd09408a2011-06-23 13:40:28 +0300228 * @subname: partition subname
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 *
230 * Initialise a MMC card request queue.
231 */
Adrian Hunterd09408a2011-06-23 13:40:28 +0300232int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
233 spinlock_t *lock, const char *subname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234{
235 struct mmc_host *host = card->host;
236 u64 limit = BLK_BOUNCE_HIGH;
237 int ret;
Per Forlin97868a22011-07-09 17:12:36 -0400238 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
Per Forlin04296b72011-07-01 18:55:31 +0200239 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Greg Kroah-Hartmanfcaf71f2006-09-12 17:00:10 +0200241 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
242 limit = *mmc_dev(host)->dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
244 mq->card = card;
245 mq->queue = blk_init_queue(mmc_request, lock);
246 if (!mq->queue)
247 return -ENOMEM;
248
Maya Erez47b37922012-10-29 20:19:01 +0200249 memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
250 memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
251
Seungwon Jeon53f8f572012-09-27 15:00:26 +0200252 INIT_LIST_HEAD(&mqrq_cur->packed_list);
253 INIT_LIST_HEAD(&mqrq_prev->packed_list);
254
Per Forlin97868a22011-07-09 17:12:36 -0400255 mq->mqrq_cur = mqrq_cur;
Per Forlin04296b72011-07-01 18:55:31 +0200256 mq->mqrq_prev = mqrq_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 mq->queue->queuedata = mq;
Yaniv Gardiec7a9ac2012-11-28 14:52:52 +0200258 mq->num_wr_reqs_to_start_packing =
259 min_t(int, (int)card->ext_csd.max_packed_writes,
260 DEFAULT_NUM_REQS_TO_START_PACK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
Pierre Ossman98ccf142007-05-12 00:26:16 +0200262 blk_queue_prep_rq(mq->queue, mmc_prep_request);
Pierre Ossman8dddfe12008-10-14 20:04:46 +0200263 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
Adrian Huntere056a1b2011-06-28 17:16:02 +0300264 if (mmc_can_erase(card))
265 mmc_queue_setup_discard(mq->queue, card);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200266
Maya Erez463bb952012-05-24 23:46:29 +0300267 if ((mmc_can_sanitize(card) && (host->caps2 & MMC_CAP2_SANITIZE)))
268 mmc_queue_setup_sanitize(mq->queue);
269
Pierre Ossman98ccf142007-05-12 00:26:16 +0200270#ifdef CONFIG_MMC_BLOCK_BOUNCE
Martin K. Petersena36274e2010-09-10 01:33:59 -0400271 if (host->max_segs == 1) {
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200272 unsigned int bouncesz;
273
Pierre Ossman98ccf142007-05-12 00:26:16 +0200274 bouncesz = MMC_QUEUE_BOUNCESZ;
275
276 if (bouncesz > host->max_req_size)
277 bouncesz = host->max_req_size;
278 if (bouncesz > host->max_seg_size)
279 bouncesz = host->max_seg_size;
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200280 if (bouncesz > (host->max_blk_count * 512))
281 bouncesz = host->max_blk_count * 512;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200282
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200283 if (bouncesz > 512) {
Per Forlin97868a22011-07-09 17:12:36 -0400284 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
285 if (!mqrq_cur->bounce_buf) {
Girish K Sa3c76eb2011-10-11 11:44:09 +0530286 pr_warning("%s: unable to "
Per Forlin97868a22011-07-09 17:12:36 -0400287 "allocate bounce cur buffer\n",
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200288 mmc_card_name(card));
289 }
Per Forlin04296b72011-07-01 18:55:31 +0200290 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
291 if (!mqrq_prev->bounce_buf) {
Girish K Sa3c76eb2011-10-11 11:44:09 +0530292 pr_warning("%s: unable to "
Per Forlin04296b72011-07-01 18:55:31 +0200293 "allocate bounce prev buffer\n",
294 mmc_card_name(card));
295 kfree(mqrq_cur->bounce_buf);
296 mqrq_cur->bounce_buf = NULL;
297 }
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200298 }
299
Per Forlin04296b72011-07-01 18:55:31 +0200300 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200301 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500302 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500303 blk_queue_max_segments(mq->queue, bouncesz / 512);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200304 blk_queue_max_segment_size(mq->queue, bouncesz);
305
Per Forlin97868a22011-07-09 17:12:36 -0400306 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
307 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200308 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200309
Per Forlin97868a22011-07-09 17:12:36 -0400310 mqrq_cur->bounce_sg =
311 mmc_alloc_sg(bouncesz / 512, &ret);
312 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200313 goto cleanup_queue;
Per Forlin97868a22011-07-09 17:12:36 -0400314
Per Forlin04296b72011-07-01 18:55:31 +0200315 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
316 if (ret)
317 goto cleanup_queue;
318
319 mqrq_prev->bounce_sg =
320 mmc_alloc_sg(bouncesz / 512, &ret);
321 if (ret)
322 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200323 }
324 }
325#endif
326
Per Forlin04296b72011-07-01 18:55:31 +0200327 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
Pierre Ossman98ccf142007-05-12 00:26:16 +0200328 blk_queue_bounce_limit(mq->queue, limit);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500329 blk_queue_max_hw_sectors(mq->queue,
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200330 min(host->max_blk_count, host->max_req_size / 512));
Martin K. Petersena36274e2010-09-10 01:33:59 -0400331 blk_queue_max_segments(mq->queue, host->max_segs);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200332 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
333
Per Forlin97868a22011-07-09 17:12:36 -0400334 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
335 if (ret)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200336 goto cleanup_queue;
Per Forlin97868a22011-07-09 17:12:36 -0400337
Per Forlin04296b72011-07-01 18:55:31 +0200338
339 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
340 if (ret)
341 goto cleanup_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 }
343
Thomas Gleixner632cf922010-09-14 07:12:35 -0400344 sema_init(&mq->thread_sem, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
Adrian Hunterd09408a2011-06-23 13:40:28 +0300346 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
347 host->index, subname ? subname : "");
Ethan Dude528fa2010-09-30 18:40:27 -0400348
Christoph Hellwig87598a22006-11-13 20:23:52 +0100349 if (IS_ERR(mq->thread)) {
350 ret = PTR_ERR(mq->thread);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200351 goto free_bounce_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 }
353
Christoph Hellwig87598a22006-11-13 20:23:52 +0100354 return 0;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200355 free_bounce_sg:
Per Forlin97868a22011-07-09 17:12:36 -0400356 kfree(mqrq_cur->bounce_sg);
357 mqrq_cur->bounce_sg = NULL;
Per Forlin04296b72011-07-01 18:55:31 +0200358 kfree(mqrq_prev->bounce_sg);
359 mqrq_prev->bounce_sg = NULL;
Per Forlin97868a22011-07-09 17:12:36 -0400360
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200361 cleanup_queue:
Per Forlin97868a22011-07-09 17:12:36 -0400362 kfree(mqrq_cur->sg);
363 mqrq_cur->sg = NULL;
364 kfree(mqrq_cur->bounce_buf);
365 mqrq_cur->bounce_buf = NULL;
366
Per Forlin04296b72011-07-01 18:55:31 +0200367 kfree(mqrq_prev->sg);
368 mqrq_prev->sg = NULL;
369 kfree(mqrq_prev->bounce_buf);
370 mqrq_prev->bounce_buf = NULL;
371
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 blk_cleanup_queue(mq->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 return ret;
374}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
376void mmc_cleanup_queue(struct mmc_queue *mq)
377{
Jens Axboe165125e2007-07-24 09:28:11 +0200378 struct request_queue *q = mq->queue;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100379 unsigned long flags;
Per Forlin97868a22011-07-09 17:12:36 -0400380 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
Per Forlin04296b72011-07-01 18:55:31 +0200381 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100382
Pierre Ossmand2b46f62007-04-28 16:52:12 +0200383 /* Make sure the queue isn't suspended, as that will deadlock */
384 mmc_queue_resume(mq);
385
Pierre Ossman89b4e132006-11-14 22:08:16 +0100386 /* Then terminate our worker thread */
Christoph Hellwig87598a22006-11-13 20:23:52 +0100387 kthread_stop(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800389 /* Empty the queue */
390 spin_lock_irqsave(q->queue_lock, flags);
391 q->queuedata = NULL;
392 blk_start_queue(q);
393 spin_unlock_irqrestore(q->queue_lock, flags);
394
Per Forlin97868a22011-07-09 17:12:36 -0400395 kfree(mqrq_cur->bounce_sg);
396 mqrq_cur->bounce_sg = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200397
Per Forlin97868a22011-07-09 17:12:36 -0400398 kfree(mqrq_cur->sg);
399 mqrq_cur->sg = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
Per Forlin97868a22011-07-09 17:12:36 -0400401 kfree(mqrq_cur->bounce_buf);
402 mqrq_cur->bounce_buf = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200403
Per Forlin04296b72011-07-01 18:55:31 +0200404 kfree(mqrq_prev->bounce_sg);
405 mqrq_prev->bounce_sg = NULL;
406
407 kfree(mqrq_prev->sg);
408 mqrq_prev->sg = NULL;
409
410 kfree(mqrq_prev->bounce_buf);
411 mqrq_prev->bounce_buf = NULL;
412
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 mq->card = NULL;
414}
415EXPORT_SYMBOL(mmc_cleanup_queue);
416
417/**
418 * mmc_queue_suspend - suspend a MMC request queue
419 * @mq: MMC queue to suspend
420 *
421 * Stop the block request queue, and wait for our thread to
422 * complete any outstanding requests. This ensures that we
423 * won't suspend while a request is being processed.
424 */
425void mmc_queue_suspend(struct mmc_queue *mq)
426{
Jens Axboe165125e2007-07-24 09:28:11 +0200427 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 unsigned long flags;
429
430 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
431 mq->flags |= MMC_QUEUE_SUSPENDED;
432
433 spin_lock_irqsave(q->queue_lock, flags);
434 blk_stop_queue(q);
435 spin_unlock_irqrestore(q->queue_lock, flags);
436
437 down(&mq->thread_sem);
438 }
439}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
441/**
442 * mmc_queue_resume - resume a previously suspended MMC request queue
443 * @mq: MMC queue to resume
444 */
445void mmc_queue_resume(struct mmc_queue *mq)
446{
Jens Axboe165125e2007-07-24 09:28:11 +0200447 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 unsigned long flags;
449
450 if (mq->flags & MMC_QUEUE_SUSPENDED) {
451 mq->flags &= ~MMC_QUEUE_SUSPENDED;
452
453 up(&mq->thread_sem);
454
455 spin_lock_irqsave(q->queue_lock, flags);
456 blk_start_queue(q);
457 spin_unlock_irqrestore(q->queue_lock, flags);
458 }
459}
Pierre Ossman98ac2162006-12-23 20:03:02 +0100460
Seungwon Jeon53f8f572012-09-27 15:00:26 +0200461static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
462 struct mmc_queue_req *mqrq,
463 struct scatterlist *sg)
464{
465 struct scatterlist *__sg;
466 unsigned int sg_len = 0;
467 struct request *req;
468 enum mmc_packed_cmd cmd;
469
470 cmd = mqrq->packed_cmd;
471
472 if (cmd == MMC_PACKED_WRITE) {
473 __sg = sg;
474 sg_set_buf(__sg, mqrq->packed_cmd_hdr,
475 sizeof(mqrq->packed_cmd_hdr));
476 sg_len++;
477 __sg->page_link &= ~0x02;
478 }
479
480 __sg = sg + sg_len;
481 list_for_each_entry(req, &mqrq->packed_list, queuelist) {
482 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
483 __sg = sg + (sg_len - 1);
484 (__sg++)->page_link &= ~0x02;
485 }
486 sg_mark_end(sg + (sg_len - 1));
487 return sg_len;
488}
489
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200490/*
491 * Prepare the sg list(s) to be handed of to the host driver
492 */
Per Forlin97868a22011-07-09 17:12:36 -0400493unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200494{
495 unsigned int sg_len;
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200496 size_t buflen;
497 struct scatterlist *sg;
498 int i;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200499
Seungwon Jeon53f8f572012-09-27 15:00:26 +0200500 if (!mqrq->bounce_buf) {
501 if (!list_empty(&mqrq->packed_list))
502 return mmc_queue_packed_map_sg(mq, mqrq, mqrq->sg);
503 else
504 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
505 }
Pierre Ossman98ccf142007-05-12 00:26:16 +0200506
Per Forlin97868a22011-07-09 17:12:36 -0400507 BUG_ON(!mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200508
Seungwon Jeon53f8f572012-09-27 15:00:26 +0200509 if (!list_empty(&mqrq->packed_list))
510 sg_len = mmc_queue_packed_map_sg(mq, mqrq, mqrq->bounce_sg);
511 else
512 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200513
Per Forlin97868a22011-07-09 17:12:36 -0400514 mqrq->bounce_sg_len = sg_len;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200515
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200516 buflen = 0;
Per Forlin97868a22011-07-09 17:12:36 -0400517 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200518 buflen += sg->length;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200519
Per Forlin97868a22011-07-09 17:12:36 -0400520 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200521
522 return 1;
523}
524
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200525/*
526 * If writing, bounce the data to the buffer before the request
527 * is sent to the host driver
528 */
Per Forlin97868a22011-07-09 17:12:36 -0400529void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200530{
Per Forlin97868a22011-07-09 17:12:36 -0400531 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200532 return;
533
Per Forlin97868a22011-07-09 17:12:36 -0400534 if (rq_data_dir(mqrq->req) != WRITE)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200535 return;
536
Per Forlin97868a22011-07-09 17:12:36 -0400537 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
538 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200539}
540
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200541/*
542 * If reading, bounce the data from the buffer after the request
543 * has been handled by the host driver
544 */
Per Forlin97868a22011-07-09 17:12:36 -0400545void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200546{
Per Forlin97868a22011-07-09 17:12:36 -0400547 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200548 return;
549
Per Forlin97868a22011-07-09 17:12:36 -0400550 if (rq_data_dir(mqrq->req) != READ)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200551 return;
552
Per Forlin97868a22011-07-09 17:12:36 -0400553 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
554 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200555}