blob: 46b7b1f0cade256b43b693a76463e8880bc8caac [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Pierre Ossman70f10482007-07-11 20:04:50 +02002 * linux/drivers/mmc/card/queue.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
Pierre Ossman98ac2162006-12-23 20:03:02 +01005 * Copyright 2006-2007 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
14#include <linux/blkdev.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070015#include <linux/freezer.h>
Christoph Hellwig87598a22006-11-13 20:23:52 +010016#include <linux/kthread.h>
Jens Axboe45711f12007-10-22 21:19:53 +020017#include <linux/scatterlist.h>
Santosh Shilimkar8e0cb8a2013-07-29 14:20:15 +010018#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20#include <linux/mmc/card.h>
21#include <linux/mmc/host.h>
Linus Walleij29eb7bd2016-09-20 11:34:38 +020022
Pierre Ossman98ac2162006-12-23 20:03:02 +010023#include "queue.h"
Linus Walleij29eb7bd2016-09-20 11:34:38 +020024#include "block.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Pierre Ossman98ccf142007-05-12 00:26:16 +020026#define MMC_QUEUE_BOUNCESZ 65536
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028/*
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020029 * Prepare a MMC request. This just filters out odd stuff.
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 */
31static int mmc_prep_request(struct request_queue *q, struct request *req)
32{
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +053033 struct mmc_queue *mq = q->queuedata;
34
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020035 /*
Adrian Hunterbd788c92010-08-11 14:17:47 -070036 * We only like normal block requests and discards.
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020037 */
Adrian Hunter7afafc82016-08-16 10:59:35 +030038 if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
39 req_op(req) != REQ_OP_SECURE_ERASE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 blk_dump_rq_flags(req, "MMC bad request");
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020041 return BLKPREP_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 }
43
Chuanxiao Dong4e93b9a2014-08-12 12:01:30 +080044 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +053045 return BLKPREP_KILL;
46
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020047 req->cmd_flags |= REQ_DONTPREP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020049 return BLKPREP_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050}
51
52static int mmc_queue_thread(void *d)
53{
54 struct mmc_queue *mq = d;
55 struct request_queue *q = mq->queue;
Adrian Huntere0097cf2016-11-29 12:09:10 +020056 struct mmc_context_info *cntx = &mq->card->host->context_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Rafael J. Wysocki83144182007-07-17 04:03:35 -070058 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 do {
62 struct request *req = NULL;
63
64 spin_lock_irq(q->queue_lock);
65 set_current_state(TASK_INTERRUPTIBLE);
Jens Axboe7eaceac2011-03-10 08:52:07 +010066 req = blk_fetch_request(q);
Adrian Huntere0097cf2016-11-29 12:09:10 +020067 mq->asleep = false;
68 cntx->is_waiting_last_req = false;
69 cntx->is_new_req = false;
70 if (!req) {
71 /*
72 * Dispatch queue is empty so set flags for
73 * mmc_request_fn() to wake us up.
74 */
75 if (mq->mqrq_prev->req)
76 cntx->is_waiting_last_req = true;
77 else
78 mq->asleep = true;
79 }
Per Forlin97868a22011-07-09 17:12:36 -040080 mq->mqrq_cur->req = req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 spin_unlock_irq(q->queue_lock);
82
Per Forlinee8a43a2011-07-01 18:55:33 +020083 if (req || mq->mqrq_prev->req) {
Adrian Hunter869c5542016-08-25 14:11:43 -060084 bool req_is_special = mmc_req_is_special(req);
85
Per Forlinee8a43a2011-07-01 18:55:33 +020086 set_current_state(TASK_RUNNING);
Linus Walleij29eb7bd2016-09-20 11:34:38 +020087 mmc_blk_issue_rq(mq, req);
Rabin Vincenta8c27c02015-06-14 19:26:11 +020088 cond_resched();
Konstantin Dorfman2220eed2013-01-14 14:28:17 -050089 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
90 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
91 continue; /* fetch again */
92 }
Seungwon Jeon45c5a912012-09-28 19:12:53 +090093
94 /*
95 * Current request becomes previous request
96 * and vice versa.
Seungwon Jeon369d3212012-12-26 10:40:17 +090097 * In case of special requests, current request
98 * has been finished. Do not assign it to previous
99 * request.
Seungwon Jeon45c5a912012-09-28 19:12:53 +0900100 */
Adrian Hunter869c5542016-08-25 14:11:43 -0600101 if (req_is_special)
Seungwon Jeon369d3212012-12-26 10:40:17 +0900102 mq->mqrq_cur->req = NULL;
103
Seungwon Jeon45c5a912012-09-28 19:12:53 +0900104 mq->mqrq_prev->brq.mrq.data = NULL;
105 mq->mqrq_prev->req = NULL;
Fabian Frederick75518472015-06-10 18:30:53 +0200106 swap(mq->mqrq_prev, mq->mqrq_cur);
Per Forlinee8a43a2011-07-01 18:55:33 +0200107 } else {
Vitaly Wool7b30d282006-12-07 20:08:02 +0100108 if (kthread_should_stop()) {
109 set_current_state(TASK_RUNNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 break;
Vitaly Wool7b30d282006-12-07 20:08:02 +0100111 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 up(&mq->thread_sem);
113 schedule();
114 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 } while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 up(&mq->thread_sem);
118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 return 0;
120}
121
122/*
123 * Generic MMC request handler. This is called for any queue on a
124 * particular host. When the host is not busy, we look for a request
125 * on any queue on this host, and attempt to issue it. This may
126 * not be the queue we were asked to process.
127 */
Venkatraman S1b50f5f2012-04-13 17:54:11 +0530128static void mmc_request_fn(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129{
130 struct mmc_queue *mq = q->queuedata;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100131 struct request *req;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -0500132 struct mmc_context_info *cntx;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100133
134 if (!mq) {
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800135 while ((req = blk_fetch_request(q)) != NULL) {
136 req->cmd_flags |= REQ_QUIET;
Tejun Heo296b2f62009-05-08 11:54:15 +0900137 __blk_end_request_all(req, -EIO);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800138 }
Pierre Ossman89b4e132006-11-14 22:08:16 +0100139 return;
140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Konstantin Dorfman2220eed2013-01-14 14:28:17 -0500142 cntx = &mq->card->host->context_info;
Adrian Huntere0097cf2016-11-29 12:09:10 +0200143
144 if (cntx->is_waiting_last_req) {
145 cntx->is_new_req = true;
146 wake_up_interruptible(&cntx->wait);
147 }
148
149 if (mq->asleep)
Christoph Hellwig87598a22006-11-13 20:23:52 +0100150 wake_up_process(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151}
152
Venkatraman S7513cd72011-08-23 21:16:02 +0530153static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
Per Forlin97868a22011-07-09 17:12:36 -0400154{
155 struct scatterlist *sg;
156
157 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
158 if (!sg)
159 *err = -ENOMEM;
160 else {
161 *err = 0;
162 sg_init_table(sg, sg_len);
163 }
164
165 return sg;
166}
167
Adrian Huntere056a1b2011-06-28 17:16:02 +0300168static void mmc_queue_setup_discard(struct request_queue *q,
169 struct mmc_card *card)
170{
171 unsigned max_discard;
172
173 max_discard = mmc_calc_max_discard(card);
174 if (!max_discard)
175 return;
176
177 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
Jens Axboe2bb4cd52015-07-14 08:15:12 -0600178 blk_queue_max_discard_sectors(q, max_discard);
Adrian Hunter7194efb2012-04-05 14:45:47 +0300179 if (card->erased_byte == 0 && !mmc_can_discard(card))
Adrian Huntere056a1b2011-06-28 17:16:02 +0300180 q->limits.discard_zeroes_data = 1;
181 q->limits.discard_granularity = card->pref_erase << 9;
182 /* granularity must not be greater than max. discard */
183 if (card->pref_erase > max_discard)
184 q->limits.discard_granularity = 0;
Maya Erez775a9362013-04-18 15:41:55 +0300185 if (mmc_can_secure_erase_trim(card))
Christoph Hellwig288dab82016-06-09 16:00:36 +0200186 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
Adrian Huntere056a1b2011-06-28 17:16:02 +0300187}
188
Adrian Hunterc8539822016-11-29 12:09:11 +0200189#ifdef CONFIG_MMC_BLOCK_BOUNCE
190static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
191 unsigned int bouncesz)
192{
193 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
194 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
195
196 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
197 if (!mqrq_cur->bounce_buf) {
198 pr_warn("%s: unable to allocate bounce cur buffer\n",
199 mmc_card_name(mq->card));
200 return false;
201 }
202
203 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
204 if (!mqrq_prev->bounce_buf) {
205 pr_warn("%s: unable to allocate bounce prev buffer\n",
206 mmc_card_name(mq->card));
207 kfree(mqrq_cur->bounce_buf);
208 mqrq_cur->bounce_buf = NULL;
209 return false;
210 }
211
212 return true;
213}
Adrian Hunterf2b8b522016-11-29 12:09:12 +0200214
215static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq,
216 unsigned int bouncesz)
217{
218 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
219 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
220 int ret;
221
222 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
223 if (ret)
224 return ret;
225
226 mqrq_cur->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
227 if (ret)
228 return ret;
229
230 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
231 if (ret)
232 return ret;
233
234 mqrq_prev->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
235
236 return ret;
237}
Adrian Hunterc8539822016-11-29 12:09:11 +0200238#endif
239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240/**
241 * mmc_init_queue - initialise a queue structure.
242 * @mq: mmc queue
243 * @card: mmc card to attach this queue
244 * @lock: queue lock
Adrian Hunterd09408a2011-06-23 13:40:28 +0300245 * @subname: partition subname
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 *
247 * Initialise a MMC card request queue.
248 */
Adrian Hunterd09408a2011-06-23 13:40:28 +0300249int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
250 spinlock_t *lock, const char *subname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251{
252 struct mmc_host *host = card->host;
253 u64 limit = BLK_BOUNCE_HIGH;
Adrian Hunterf2b8b522016-11-29 12:09:12 +0200254 bool bounce = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 int ret;
Per Forlin97868a22011-07-09 17:12:36 -0400256 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
Per Forlin04296b72011-07-01 18:55:31 +0200257 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Greg Kroah-Hartmanfcaf71f2006-09-12 17:00:10 +0200259 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
Russell Kinge83b3662014-02-11 17:11:04 +0000260 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
262 mq->card = card;
Venkatraman S1b50f5f2012-04-13 17:54:11 +0530263 mq->queue = blk_init_queue(mmc_request_fn, lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 if (!mq->queue)
265 return -ENOMEM;
266
Per Forlin97868a22011-07-09 17:12:36 -0400267 mq->mqrq_cur = mqrq_cur;
Per Forlin04296b72011-07-01 18:55:31 +0200268 mq->mqrq_prev = mqrq_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 mq->queue->queuedata = mq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Pierre Ossman98ccf142007-05-12 00:26:16 +0200271 blk_queue_prep_rq(mq->queue, mmc_prep_request);
Pierre Ossman8dddfe12008-10-14 20:04:46 +0200272 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -0600273 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
Adrian Huntere056a1b2011-06-28 17:16:02 +0300274 if (mmc_can_erase(card))
275 mmc_queue_setup_discard(mq->queue, card);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200276
277#ifdef CONFIG_MMC_BLOCK_BOUNCE
Martin K. Petersena36274e2010-09-10 01:33:59 -0400278 if (host->max_segs == 1) {
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200279 unsigned int bouncesz;
280
Pierre Ossman98ccf142007-05-12 00:26:16 +0200281 bouncesz = MMC_QUEUE_BOUNCESZ;
282
283 if (bouncesz > host->max_req_size)
284 bouncesz = host->max_req_size;
285 if (bouncesz > host->max_seg_size)
286 bouncesz = host->max_seg_size;
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200287 if (bouncesz > (host->max_blk_count * 512))
288 bouncesz = host->max_blk_count * 512;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200289
Adrian Hunterc8539822016-11-29 12:09:11 +0200290 if (bouncesz > 512 &&
291 mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200292 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500293 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500294 blk_queue_max_segments(mq->queue, bouncesz / 512);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200295 blk_queue_max_segment_size(mq->queue, bouncesz);
296
Adrian Hunterf2b8b522016-11-29 12:09:12 +0200297 ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
Per Forlin97868a22011-07-09 17:12:36 -0400298 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200299 goto cleanup_queue;
Adrian Hunterf2b8b522016-11-29 12:09:12 +0200300 bounce = true;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200301 }
302 }
303#endif
304
Adrian Hunterf2b8b522016-11-29 12:09:12 +0200305 if (!bounce) {
Pierre Ossman98ccf142007-05-12 00:26:16 +0200306 blk_queue_bounce_limit(mq->queue, limit);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500307 blk_queue_max_hw_sectors(mq->queue,
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200308 min(host->max_blk_count, host->max_req_size / 512));
Martin K. Petersena36274e2010-09-10 01:33:59 -0400309 blk_queue_max_segments(mq->queue, host->max_segs);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200310 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
311
Per Forlin97868a22011-07-09 17:12:36 -0400312 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
313 if (ret)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200314 goto cleanup_queue;
Per Forlin97868a22011-07-09 17:12:36 -0400315
Per Forlin04296b72011-07-01 18:55:31 +0200316
317 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
318 if (ret)
319 goto cleanup_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 }
321
Thomas Gleixner632cf922010-09-14 07:12:35 -0400322 sema_init(&mq->thread_sem, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
Adrian Hunterd09408a2011-06-23 13:40:28 +0300324 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
325 host->index, subname ? subname : "");
Ethan Dude528fa2010-09-30 18:40:27 -0400326
Christoph Hellwig87598a22006-11-13 20:23:52 +0100327 if (IS_ERR(mq->thread)) {
328 ret = PTR_ERR(mq->thread);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200329 goto free_bounce_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 }
331
Christoph Hellwig87598a22006-11-13 20:23:52 +0100332 return 0;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200333 free_bounce_sg:
Per Forlin97868a22011-07-09 17:12:36 -0400334 kfree(mqrq_cur->bounce_sg);
335 mqrq_cur->bounce_sg = NULL;
Per Forlin04296b72011-07-01 18:55:31 +0200336 kfree(mqrq_prev->bounce_sg);
337 mqrq_prev->bounce_sg = NULL;
Per Forlin97868a22011-07-09 17:12:36 -0400338
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200339 cleanup_queue:
Per Forlin97868a22011-07-09 17:12:36 -0400340 kfree(mqrq_cur->sg);
341 mqrq_cur->sg = NULL;
342 kfree(mqrq_cur->bounce_buf);
343 mqrq_cur->bounce_buf = NULL;
344
Per Forlin04296b72011-07-01 18:55:31 +0200345 kfree(mqrq_prev->sg);
346 mqrq_prev->sg = NULL;
347 kfree(mqrq_prev->bounce_buf);
348 mqrq_prev->bounce_buf = NULL;
349
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 blk_cleanup_queue(mq->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 return ret;
352}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354void mmc_cleanup_queue(struct mmc_queue *mq)
355{
Jens Axboe165125e2007-07-24 09:28:11 +0200356 struct request_queue *q = mq->queue;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100357 unsigned long flags;
Per Forlin97868a22011-07-09 17:12:36 -0400358 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
Per Forlin04296b72011-07-01 18:55:31 +0200359 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100360
Pierre Ossmand2b46f62007-04-28 16:52:12 +0200361 /* Make sure the queue isn't suspended, as that will deadlock */
362 mmc_queue_resume(mq);
363
Pierre Ossman89b4e132006-11-14 22:08:16 +0100364 /* Then terminate our worker thread */
Christoph Hellwig87598a22006-11-13 20:23:52 +0100365 kthread_stop(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800367 /* Empty the queue */
368 spin_lock_irqsave(q->queue_lock, flags);
369 q->queuedata = NULL;
370 blk_start_queue(q);
371 spin_unlock_irqrestore(q->queue_lock, flags);
372
Per Forlin97868a22011-07-09 17:12:36 -0400373 kfree(mqrq_cur->bounce_sg);
374 mqrq_cur->bounce_sg = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200375
Per Forlin97868a22011-07-09 17:12:36 -0400376 kfree(mqrq_cur->sg);
377 mqrq_cur->sg = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
Per Forlin97868a22011-07-09 17:12:36 -0400379 kfree(mqrq_cur->bounce_buf);
380 mqrq_cur->bounce_buf = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200381
Per Forlin04296b72011-07-01 18:55:31 +0200382 kfree(mqrq_prev->bounce_sg);
383 mqrq_prev->bounce_sg = NULL;
384
385 kfree(mqrq_prev->sg);
386 mqrq_prev->sg = NULL;
387
388 kfree(mqrq_prev->bounce_buf);
389 mqrq_prev->bounce_buf = NULL;
390
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 mq->card = NULL;
392}
393EXPORT_SYMBOL(mmc_cleanup_queue);
394
395/**
396 * mmc_queue_suspend - suspend a MMC request queue
397 * @mq: MMC queue to suspend
398 *
399 * Stop the block request queue, and wait for our thread to
400 * complete any outstanding requests. This ensures that we
401 * won't suspend while a request is being processed.
402 */
403void mmc_queue_suspend(struct mmc_queue *mq)
404{
Jens Axboe165125e2007-07-24 09:28:11 +0200405 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 unsigned long flags;
407
408 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
409 mq->flags |= MMC_QUEUE_SUSPENDED;
410
411 spin_lock_irqsave(q->queue_lock, flags);
412 blk_stop_queue(q);
413 spin_unlock_irqrestore(q->queue_lock, flags);
414
415 down(&mq->thread_sem);
416 }
417}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419/**
420 * mmc_queue_resume - resume a previously suspended MMC request queue
421 * @mq: MMC queue to resume
422 */
423void mmc_queue_resume(struct mmc_queue *mq)
424{
Jens Axboe165125e2007-07-24 09:28:11 +0200425 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 unsigned long flags;
427
428 if (mq->flags & MMC_QUEUE_SUSPENDED) {
429 mq->flags &= ~MMC_QUEUE_SUSPENDED;
430
431 up(&mq->thread_sem);
432
433 spin_lock_irqsave(q->queue_lock, flags);
434 blk_start_queue(q);
435 spin_unlock_irqrestore(q->queue_lock, flags);
436 }
437}
Pierre Ossman98ac2162006-12-23 20:03:02 +0100438
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200439/*
440 * Prepare the sg list(s) to be handed of to the host driver
441 */
Per Forlin97868a22011-07-09 17:12:36 -0400442unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200443{
444 unsigned int sg_len;
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200445 size_t buflen;
446 struct scatterlist *sg;
447 int i;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200448
Linus Walleij03d640a2016-11-25 10:35:00 +0100449 if (!mqrq->bounce_buf)
450 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200451
Per Forlin97868a22011-07-09 17:12:36 -0400452 BUG_ON(!mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200453
Linus Walleij03d640a2016-11-25 10:35:00 +0100454 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200455
Per Forlin97868a22011-07-09 17:12:36 -0400456 mqrq->bounce_sg_len = sg_len;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200457
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200458 buflen = 0;
Per Forlin97868a22011-07-09 17:12:36 -0400459 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200460 buflen += sg->length;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200461
Per Forlin97868a22011-07-09 17:12:36 -0400462 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200463
464 return 1;
465}
466
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200467/*
468 * If writing, bounce the data to the buffer before the request
469 * is sent to the host driver
470 */
Per Forlin97868a22011-07-09 17:12:36 -0400471void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200472{
Per Forlin97868a22011-07-09 17:12:36 -0400473 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200474 return;
475
Per Forlin97868a22011-07-09 17:12:36 -0400476 if (rq_data_dir(mqrq->req) != WRITE)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200477 return;
478
Per Forlin97868a22011-07-09 17:12:36 -0400479 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
480 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200481}
482
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200483/*
484 * If reading, bounce the data from the buffer after the request
485 * has been handled by the host driver
486 */
Per Forlin97868a22011-07-09 17:12:36 -0400487void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200488{
Per Forlin97868a22011-07-09 17:12:36 -0400489 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200490 return;
491
Per Forlin97868a22011-07-09 17:12:36 -0400492 if (rq_data_dir(mqrq->req) != READ)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200493 return;
494
Per Forlin97868a22011-07-09 17:12:36 -0400495 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
496 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200497}