blob: 0e024ddf4203ff701fc06a4b9e616ce7b93d2af9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Pierre Ossman70f10482007-07-11 20:04:50 +02002 * linux/drivers/mmc/card/queue.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
Pierre Ossman98ac2162006-12-23 20:03:02 +01005 * Copyright 2006-2007 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
14#include <linux/blkdev.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070015#include <linux/freezer.h>
Christoph Hellwig87598a22006-11-13 20:23:52 +010016#include <linux/kthread.h>
Jens Axboe45711f12007-10-22 21:19:53 +020017#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
Pierre Ossman98ac2162006-12-23 20:03:02 +010021#include "queue.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Pierre Ossman98ccf142007-05-12 00:26:16 +020023#define MMC_QUEUE_BOUNCESZ 65536
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26/*
Tatyana Brokhman0cc76402012-10-07 09:52:16 +020027 * Based on benchmark tests the default num of requests to trigger the write
28 * packing was determined, to keep the read latency as low as possible and
29 * manage to keep the high write throughput.
30 */
31#define DEFAULT_NUM_REQS_TO_START_PACK 17
32
33/*
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020034 * Prepare a MMC request. This just filters out odd stuff.
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 */
36static int mmc_prep_request(struct request_queue *q, struct request *req)
37{
Sujit Reddy Thummaa8ad82c2011-12-08 14:05:50 +053038 struct mmc_queue *mq = q->queuedata;
39
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020040 /*
Adrian Hunterbd788c92010-08-11 14:17:47 -070041 * We only like normal block requests and discards.
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020042 */
Adrian Hunterbd788c92010-08-11 14:17:47 -070043 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 blk_dump_rq_flags(req, "MMC bad request");
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020045 return BLKPREP_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 }
47
Sujit Reddy Thummaa8ad82c2011-12-08 14:05:50 +053048 if (mq && mmc_card_removed(mq->card))
49 return BLKPREP_KILL;
50
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020051 req->cmd_flags |= REQ_DONTPREP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020053 return BLKPREP_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054}
55
56static int mmc_queue_thread(void *d)
57{
58 struct mmc_queue *mq = d;
59 struct request_queue *q = mq->queue;
Maya Erez5f360692012-10-10 03:47:54 +020060 struct mmc_card *card = mq->card;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Rafael J. Wysocki83144182007-07-17 04:03:35 -070062 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 do {
Lee Susmand50afb52013-01-09 14:48:47 +020066 struct mmc_queue_req *tmp;
Stephen Boyd3f113aa2013-02-11 14:30:14 -080067 struct request *req = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69 spin_lock_irq(q->queue_lock);
70 set_current_state(TASK_INTERRUPTIBLE);
Jens Axboe7eaceac2011-03-10 08:52:07 +010071 req = blk_fetch_request(q);
Per Forlin97868a22011-07-09 17:12:36 -040072 mq->mqrq_cur->req = req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 spin_unlock_irq(q->queue_lock);
74
Per Forlinee8a43a2011-07-01 18:55:33 +020075 if (req || mq->mqrq_prev->req) {
76 set_current_state(TASK_RUNNING);
77 mq->issue_fn(mq, req);
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +020078 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +020079 continue; /* fetch again */
Konstantin Dorfmane9382e62013-03-10 17:39:16 +020080 } else if ((mq->flags & MMC_QUEUE_URGENT_REQUEST) &&
81 (mq->mqrq_cur->req &&
82 !(mq->mqrq_cur->req->cmd_flags & REQ_URGENT))) {
83 /*
84 * clean current request when urgent request
85 * processing in progress and current request is
86 * not urgent (all existing requests completed
87 * or reinserted to the block layer
88 */
Konstantin Dorfman9b0f5ec2013-02-12 13:19:48 +020089 mq->mqrq_cur->brq.mrq.data = NULL;
90 mq->mqrq_cur->req = NULL;
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +020091 }
Seungwon Jeon09f21532012-09-28 19:12:53 +090092
93 /*
94 * Current request becomes previous request
95 * and vice versa.
96 */
97 mq->mqrq_prev->brq.mrq.data = NULL;
98 mq->mqrq_prev->req = NULL;
99 tmp = mq->mqrq_prev;
100 mq->mqrq_prev = mq->mqrq_cur;
101 mq->mqrq_cur = tmp;
Per Forlinee8a43a2011-07-01 18:55:33 +0200102 } else {
Vitaly Wool7b30d282006-12-07 20:08:02 +0100103 if (kthread_should_stop()) {
104 set_current_state(TASK_RUNNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 break;
Vitaly Wool7b30d282006-12-07 20:08:02 +0100106 }
Maya Erez5f360692012-10-10 03:47:54 +0200107 mmc_start_delayed_bkops(card);
Konstantin Dorfmane9382e62013-03-10 17:39:16 +0200108 mq->card->host->context_info.is_urgent = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 up(&mq->thread_sem);
110 schedule();
111 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 } while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 up(&mq->thread_sem);
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 return 0;
117}
118
119/*
120 * Generic MMC request handler. This is called for any queue on a
121 * particular host. When the host is not busy, we look for a request
122 * on any queue on this host, and attempt to issue it. This may
123 * not be the queue we were asked to process.
124 */
Jens Axboe165125e2007-07-24 09:28:11 +0200125static void mmc_request(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126{
127 struct mmc_queue *mq = q->queuedata;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100128 struct request *req;
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +0200129 unsigned long flags;
130 struct mmc_context_info *cntx;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100131
132 if (!mq) {
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800133 while ((req = blk_fetch_request(q)) != NULL) {
134 req->cmd_flags |= REQ_QUIET;
Tejun Heo296b2f62009-05-08 11:54:15 +0900135 __blk_end_request_all(req, -EIO);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800136 }
Pierre Ossman89b4e132006-11-14 22:08:16 +0100137 return;
138 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +0200140 cntx = &mq->card->host->context_info;
141 if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
142 /*
143 * New MMC request arrived when MMC thread may be
144 * blocked on the previous request to be complete
145 * with no current request fetched
146 */
147 spin_lock_irqsave(&cntx->lock, flags);
148 if (cntx->is_waiting_last_req) {
149 cntx->is_new_req = true;
150 wake_up_interruptible(&cntx->wait);
Lee Susmand50afb52013-01-09 14:48:47 +0200151 }
Konstantin Dorfman69bd0fb2012-12-12 16:12:08 +0200152 spin_unlock_irqrestore(&cntx->lock, flags);
Lee Susmand50afb52013-01-09 14:48:47 +0200153 } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
Christoph Hellwig87598a22006-11-13 20:23:52 +0100154 wake_up_process(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155}
156
Konstantin Dorfman9b0f5ec2013-02-12 13:19:48 +0200157/*
158 * mmc_urgent_request() - Urgent MMC request handler.
159 * @q: request queue.
160 *
161 * This is called when block layer has urgent request for delivery. When mmc
162 * context is waiting for the current request to complete, it will be awaken,
163 * current request may be interrupted and re-inserted back to block device
164 * request queue. The next fetched request should be urgent request, this
165 * will be ensured by block i/o scheduler.
166 */
167static void mmc_urgent_request(struct request_queue *q)
168{
169 unsigned long flags;
170 struct mmc_queue *mq = q->queuedata;
171 struct mmc_context_info *cntx;
172
173 if (!mq) {
174 mmc_request(q);
175 return;
176 }
177 cntx = &mq->card->host->context_info;
178
179 /* critical section with mmc_wait_data_done() */
180 spin_lock_irqsave(&cntx->lock, flags);
181
182 /* do stop flow only when mmc thread is waiting for done */
Konstantin Dorfmane9382e62013-03-10 17:39:16 +0200183 if (mq->mqrq_cur->req || mq->mqrq_prev->req) {
Konstantin Dorfman9b0f5ec2013-02-12 13:19:48 +0200184 /*
185 * Urgent request must be executed alone
186 * so disable the write packing
187 */
188 mmc_blk_disable_wr_packing(mq);
189 cntx->is_urgent = true;
190 spin_unlock_irqrestore(&cntx->lock, flags);
191 wake_up_interruptible(&cntx->wait);
192 } else {
193 spin_unlock_irqrestore(&cntx->lock, flags);
194 mmc_request(q);
195 }
196}
197
Venkatraman S7513cd72011-08-23 21:16:02 +0530198static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
Per Forlin97868a22011-07-09 17:12:36 -0400199{
200 struct scatterlist *sg;
201
202 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
203 if (!sg)
204 *err = -ENOMEM;
205 else {
206 *err = 0;
207 sg_init_table(sg, sg_len);
208 }
209
210 return sg;
211}
212
Adrian Huntere056a1b2011-06-28 17:16:02 +0300213static void mmc_queue_setup_discard(struct request_queue *q,
214 struct mmc_card *card)
215{
216 unsigned max_discard;
217
218 max_discard = mmc_calc_max_discard(card);
219 if (!max_discard)
220 return;
221
222 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
223 q->limits.max_discard_sectors = max_discard;
Adrian Hunter7194efb2012-04-05 14:45:47 +0300224 if (card->erased_byte == 0 && !mmc_can_discard(card))
Adrian Huntere056a1b2011-06-28 17:16:02 +0300225 q->limits.discard_zeroes_data = 1;
226 q->limits.discard_granularity = card->pref_erase << 9;
227 /* granularity must not be greater than max. discard */
228 if (card->pref_erase > max_discard)
229 q->limits.discard_granularity = 0;
Maya Erez463bb952012-05-24 23:46:29 +0300230 if (mmc_can_secure_erase_trim(card))
Adrian Huntere056a1b2011-06-28 17:16:02 +0300231 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
232}
233
Maya Erez463bb952012-05-24 23:46:29 +0300234static void mmc_queue_setup_sanitize(struct request_queue *q)
235{
236 queue_flag_set_unlocked(QUEUE_FLAG_SANITIZE, q);
237}
238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239/**
240 * mmc_init_queue - initialise a queue structure.
241 * @mq: mmc queue
242 * @card: mmc card to attach this queue
243 * @lock: queue lock
Adrian Hunterd09408a2011-06-23 13:40:28 +0300244 * @subname: partition subname
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 *
246 * Initialise a MMC card request queue.
247 */
Adrian Hunterd09408a2011-06-23 13:40:28 +0300248int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
249 spinlock_t *lock, const char *subname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250{
251 struct mmc_host *host = card->host;
252 u64 limit = BLK_BOUNCE_HIGH;
253 int ret;
Per Forlin97868a22011-07-09 17:12:36 -0400254 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
Per Forlin04296b72011-07-01 18:55:31 +0200255 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Greg Kroah-Hartmanfcaf71f2006-09-12 17:00:10 +0200257 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
258 limit = *mmc_dev(host)->dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260 mq->card = card;
261 mq->queue = blk_init_queue(mmc_request, lock);
262 if (!mq->queue)
263 return -ENOMEM;
264
Konstantin Dorfman9b0f5ec2013-02-12 13:19:48 +0200265 if ((host->caps2 & MMC_CAP2_STOP_REQUEST) &&
266 host->ops->stop_request &&
267 mq->card->ext_csd.hpi)
268 blk_urgent_request(mq->queue, mmc_urgent_request);
269
Maya Erez47b37922012-10-29 20:19:01 +0200270 memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
271 memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
272
Seungwon Jeon53f8f572012-09-27 15:00:26 +0200273 INIT_LIST_HEAD(&mqrq_cur->packed_list);
274 INIT_LIST_HEAD(&mqrq_prev->packed_list);
275
Per Forlin97868a22011-07-09 17:12:36 -0400276 mq->mqrq_cur = mqrq_cur;
Per Forlin04296b72011-07-01 18:55:31 +0200277 mq->mqrq_prev = mqrq_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 mq->queue->queuedata = mq;
Yaniv Gardiec7a9ac2012-11-28 14:52:52 +0200279 mq->num_wr_reqs_to_start_packing =
280 min_t(int, (int)card->ext_csd.max_packed_writes,
281 DEFAULT_NUM_REQS_TO_START_PACK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
Pierre Ossman98ccf142007-05-12 00:26:16 +0200283 blk_queue_prep_rq(mq->queue, mmc_prep_request);
Pierre Ossman8dddfe12008-10-14 20:04:46 +0200284 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
Adrian Huntere056a1b2011-06-28 17:16:02 +0300285 if (mmc_can_erase(card))
286 mmc_queue_setup_discard(mq->queue, card);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200287
Maya Erez463bb952012-05-24 23:46:29 +0300288 if ((mmc_can_sanitize(card) && (host->caps2 & MMC_CAP2_SANITIZE)))
289 mmc_queue_setup_sanitize(mq->queue);
290
Pierre Ossman98ccf142007-05-12 00:26:16 +0200291#ifdef CONFIG_MMC_BLOCK_BOUNCE
Martin K. Petersena36274e2010-09-10 01:33:59 -0400292 if (host->max_segs == 1) {
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200293 unsigned int bouncesz;
294
Pierre Ossman98ccf142007-05-12 00:26:16 +0200295 bouncesz = MMC_QUEUE_BOUNCESZ;
296
297 if (bouncesz > host->max_req_size)
298 bouncesz = host->max_req_size;
299 if (bouncesz > host->max_seg_size)
300 bouncesz = host->max_seg_size;
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200301 if (bouncesz > (host->max_blk_count * 512))
302 bouncesz = host->max_blk_count * 512;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200303
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200304 if (bouncesz > 512) {
Per Forlin97868a22011-07-09 17:12:36 -0400305 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
306 if (!mqrq_cur->bounce_buf) {
Girish K Sa3c76eb2011-10-11 11:44:09 +0530307 pr_warning("%s: unable to "
Per Forlin97868a22011-07-09 17:12:36 -0400308 "allocate bounce cur buffer\n",
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200309 mmc_card_name(card));
310 }
Per Forlin04296b72011-07-01 18:55:31 +0200311 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
312 if (!mqrq_prev->bounce_buf) {
Girish K Sa3c76eb2011-10-11 11:44:09 +0530313 pr_warning("%s: unable to "
Per Forlin04296b72011-07-01 18:55:31 +0200314 "allocate bounce prev buffer\n",
315 mmc_card_name(card));
316 kfree(mqrq_cur->bounce_buf);
317 mqrq_cur->bounce_buf = NULL;
318 }
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200319 }
320
Per Forlin04296b72011-07-01 18:55:31 +0200321 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200322 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500323 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500324 blk_queue_max_segments(mq->queue, bouncesz / 512);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200325 blk_queue_max_segment_size(mq->queue, bouncesz);
326
Per Forlin97868a22011-07-09 17:12:36 -0400327 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
328 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200329 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200330
Per Forlin97868a22011-07-09 17:12:36 -0400331 mqrq_cur->bounce_sg =
332 mmc_alloc_sg(bouncesz / 512, &ret);
333 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200334 goto cleanup_queue;
Per Forlin97868a22011-07-09 17:12:36 -0400335
Per Forlin04296b72011-07-01 18:55:31 +0200336 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
337 if (ret)
338 goto cleanup_queue;
339
340 mqrq_prev->bounce_sg =
341 mmc_alloc_sg(bouncesz / 512, &ret);
342 if (ret)
343 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200344 }
345 }
346#endif
347
Per Forlin04296b72011-07-01 18:55:31 +0200348 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
Pierre Ossman98ccf142007-05-12 00:26:16 +0200349 blk_queue_bounce_limit(mq->queue, limit);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500350 blk_queue_max_hw_sectors(mq->queue,
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200351 min(host->max_blk_count, host->max_req_size / 512));
Martin K. Petersena36274e2010-09-10 01:33:59 -0400352 blk_queue_max_segments(mq->queue, host->max_segs);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200353 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
354
Per Forlin97868a22011-07-09 17:12:36 -0400355 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
356 if (ret)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200357 goto cleanup_queue;
Per Forlin97868a22011-07-09 17:12:36 -0400358
Per Forlin04296b72011-07-01 18:55:31 +0200359
360 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
361 if (ret)
362 goto cleanup_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 }
364
Thomas Gleixner632cf922010-09-14 07:12:35 -0400365 sema_init(&mq->thread_sem, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Adrian Hunterd09408a2011-06-23 13:40:28 +0300367 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
368 host->index, subname ? subname : "");
Ethan Dude528fa2010-09-30 18:40:27 -0400369
Christoph Hellwig87598a22006-11-13 20:23:52 +0100370 if (IS_ERR(mq->thread)) {
371 ret = PTR_ERR(mq->thread);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200372 goto free_bounce_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 }
374
Christoph Hellwig87598a22006-11-13 20:23:52 +0100375 return 0;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200376 free_bounce_sg:
Per Forlin97868a22011-07-09 17:12:36 -0400377 kfree(mqrq_cur->bounce_sg);
378 mqrq_cur->bounce_sg = NULL;
Per Forlin04296b72011-07-01 18:55:31 +0200379 kfree(mqrq_prev->bounce_sg);
380 mqrq_prev->bounce_sg = NULL;
Per Forlin97868a22011-07-09 17:12:36 -0400381
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200382 cleanup_queue:
Per Forlin97868a22011-07-09 17:12:36 -0400383 kfree(mqrq_cur->sg);
384 mqrq_cur->sg = NULL;
385 kfree(mqrq_cur->bounce_buf);
386 mqrq_cur->bounce_buf = NULL;
387
Per Forlin04296b72011-07-01 18:55:31 +0200388 kfree(mqrq_prev->sg);
389 mqrq_prev->sg = NULL;
390 kfree(mqrq_prev->bounce_buf);
391 mqrq_prev->bounce_buf = NULL;
392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 blk_cleanup_queue(mq->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 return ret;
395}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
397void mmc_cleanup_queue(struct mmc_queue *mq)
398{
Jens Axboe165125e2007-07-24 09:28:11 +0200399 struct request_queue *q = mq->queue;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100400 unsigned long flags;
Per Forlin97868a22011-07-09 17:12:36 -0400401 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
Per Forlin04296b72011-07-01 18:55:31 +0200402 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100403
Pierre Ossmand2b46f62007-04-28 16:52:12 +0200404 /* Make sure the queue isn't suspended, as that will deadlock */
405 mmc_queue_resume(mq);
406
Pierre Ossman89b4e132006-11-14 22:08:16 +0100407 /* Then terminate our worker thread */
Christoph Hellwig87598a22006-11-13 20:23:52 +0100408 kthread_stop(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800410 /* Empty the queue */
411 spin_lock_irqsave(q->queue_lock, flags);
412 q->queuedata = NULL;
413 blk_start_queue(q);
414 spin_unlock_irqrestore(q->queue_lock, flags);
415
Per Forlin97868a22011-07-09 17:12:36 -0400416 kfree(mqrq_cur->bounce_sg);
417 mqrq_cur->bounce_sg = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200418
Per Forlin97868a22011-07-09 17:12:36 -0400419 kfree(mqrq_cur->sg);
420 mqrq_cur->sg = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
Per Forlin97868a22011-07-09 17:12:36 -0400422 kfree(mqrq_cur->bounce_buf);
423 mqrq_cur->bounce_buf = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200424
Per Forlin04296b72011-07-01 18:55:31 +0200425 kfree(mqrq_prev->bounce_sg);
426 mqrq_prev->bounce_sg = NULL;
427
428 kfree(mqrq_prev->sg);
429 mqrq_prev->sg = NULL;
430
431 kfree(mqrq_prev->bounce_buf);
432 mqrq_prev->bounce_buf = NULL;
433
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 mq->card = NULL;
435}
436EXPORT_SYMBOL(mmc_cleanup_queue);
437
438/**
439 * mmc_queue_suspend - suspend a MMC request queue
440 * @mq: MMC queue to suspend
441 *
442 * Stop the block request queue, and wait for our thread to
443 * complete any outstanding requests. This ensures that we
444 * won't suspend while a request is being processed.
445 */
Subhash Jadavanid1c87f12013-02-26 17:32:58 +0530446int mmc_queue_suspend(struct mmc_queue *mq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447{
Jens Axboe165125e2007-07-24 09:28:11 +0200448 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 unsigned long flags;
Subhash Jadavanid1c87f12013-02-26 17:32:58 +0530450 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
452 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
453 mq->flags |= MMC_QUEUE_SUSPENDED;
454
455 spin_lock_irqsave(q->queue_lock, flags);
456 blk_stop_queue(q);
457 spin_unlock_irqrestore(q->queue_lock, flags);
458
Subhash Jadavanid1c87f12013-02-26 17:32:58 +0530459 rc = down_trylock(&mq->thread_sem);
460 if (rc) {
461 /*
462 * Failed to take the lock so better to abort the
463 * suspend because mmcqd thread is processing requests.
464 */
465 mq->flags &= ~MMC_QUEUE_SUSPENDED;
466 spin_lock_irqsave(q->queue_lock, flags);
467 blk_start_queue(q);
468 spin_unlock_irqrestore(q->queue_lock, flags);
469 rc = -EBUSY;
470 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 }
Subhash Jadavanid1c87f12013-02-26 17:32:58 +0530472 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
475/**
476 * mmc_queue_resume - resume a previously suspended MMC request queue
477 * @mq: MMC queue to resume
478 */
479void mmc_queue_resume(struct mmc_queue *mq)
480{
Jens Axboe165125e2007-07-24 09:28:11 +0200481 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 unsigned long flags;
483
484 if (mq->flags & MMC_QUEUE_SUSPENDED) {
485 mq->flags &= ~MMC_QUEUE_SUSPENDED;
486
487 up(&mq->thread_sem);
488
489 spin_lock_irqsave(q->queue_lock, flags);
490 blk_start_queue(q);
491 spin_unlock_irqrestore(q->queue_lock, flags);
492 }
493}
Pierre Ossman98ac2162006-12-23 20:03:02 +0100494
Seungwon Jeon53f8f572012-09-27 15:00:26 +0200495static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
496 struct mmc_queue_req *mqrq,
497 struct scatterlist *sg)
498{
499 struct scatterlist *__sg;
500 unsigned int sg_len = 0;
501 struct request *req;
502 enum mmc_packed_cmd cmd;
503
504 cmd = mqrq->packed_cmd;
505
506 if (cmd == MMC_PACKED_WRITE) {
507 __sg = sg;
508 sg_set_buf(__sg, mqrq->packed_cmd_hdr,
509 sizeof(mqrq->packed_cmd_hdr));
510 sg_len++;
511 __sg->page_link &= ~0x02;
512 }
513
514 __sg = sg + sg_len;
515 list_for_each_entry(req, &mqrq->packed_list, queuelist) {
516 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
517 __sg = sg + (sg_len - 1);
518 (__sg++)->page_link &= ~0x02;
519 }
520 sg_mark_end(sg + (sg_len - 1));
521 return sg_len;
522}
523
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200524/*
525 * Prepare the sg list(s) to be handed of to the host driver
526 */
Per Forlin97868a22011-07-09 17:12:36 -0400527unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200528{
529 unsigned int sg_len;
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200530 size_t buflen;
531 struct scatterlist *sg;
532 int i;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200533
Seungwon Jeon53f8f572012-09-27 15:00:26 +0200534 if (!mqrq->bounce_buf) {
535 if (!list_empty(&mqrq->packed_list))
536 return mmc_queue_packed_map_sg(mq, mqrq, mqrq->sg);
537 else
538 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
539 }
Pierre Ossman98ccf142007-05-12 00:26:16 +0200540
Per Forlin97868a22011-07-09 17:12:36 -0400541 BUG_ON(!mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200542
Seungwon Jeon53f8f572012-09-27 15:00:26 +0200543 if (!list_empty(&mqrq->packed_list))
544 sg_len = mmc_queue_packed_map_sg(mq, mqrq, mqrq->bounce_sg);
545 else
546 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200547
Per Forlin97868a22011-07-09 17:12:36 -0400548 mqrq->bounce_sg_len = sg_len;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200549
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200550 buflen = 0;
Per Forlin97868a22011-07-09 17:12:36 -0400551 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200552 buflen += sg->length;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200553
Per Forlin97868a22011-07-09 17:12:36 -0400554 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200555
556 return 1;
557}
558
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200559/*
560 * If writing, bounce the data to the buffer before the request
561 * is sent to the host driver
562 */
Per Forlin97868a22011-07-09 17:12:36 -0400563void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200564{
Per Forlin97868a22011-07-09 17:12:36 -0400565 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200566 return;
567
Per Forlin97868a22011-07-09 17:12:36 -0400568 if (rq_data_dir(mqrq->req) != WRITE)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200569 return;
570
Per Forlin97868a22011-07-09 17:12:36 -0400571 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
572 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200573}
574
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200575/*
576 * If reading, bounce the data from the buffer after the request
577 * has been handled by the host driver
578 */
Per Forlin97868a22011-07-09 17:12:36 -0400579void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200580{
Per Forlin97868a22011-07-09 17:12:36 -0400581 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200582 return;
583
Per Forlin97868a22011-07-09 17:12:36 -0400584 if (rq_data_dir(mqrq->req) != READ)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200585 return;
586
Per Forlin97868a22011-07-09 17:12:36 -0400587 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
588 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200589}