blob: 493eb10ce58045851fc1064426acc08b842f4811 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2003 Russell King, All Rights Reserved.
Pierre Ossman98ac2162006-12-23 20:03:02 +01003 * Copyright 2006-2007 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/module.h>
12#include <linux/blkdev.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070013#include <linux/freezer.h>
Christoph Hellwig87598a22006-11-13 20:23:52 +010014#include <linux/kthread.h>
Jens Axboe45711f12007-10-22 21:19:53 +020015#include <linux/scatterlist.h>
Santosh Shilimkar8e0cb8a2013-07-29 14:20:15 +010016#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18#include <linux/mmc/card.h>
19#include <linux/mmc/host.h>
Linus Walleij29eb7bd2016-09-20 11:34:38 +020020
Pierre Ossman98ac2162006-12-23 20:03:02 +010021#include "queue.h"
Linus Walleij29eb7bd2016-09-20 11:34:38 +020022#include "block.h"
Ulf Hansson55244c52017-01-13 14:14:08 +010023#include "core.h"
Ulf Hansson4facdde2017-01-13 14:14:14 +010024#include "card.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Pierre Ossman98ccf142007-05-12 00:26:16 +020026#define MMC_QUEUE_BOUNCESZ 65536
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028/*
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020029 * Prepare a MMC request. This just filters out odd stuff.
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 */
31static int mmc_prep_request(struct request_queue *q, struct request *req)
32{
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +053033 struct mmc_queue *mq = q->queuedata;
34
Chuanxiao Dong4e93b9a2014-08-12 12:01:30 +080035 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +053036 return BLKPREP_KILL;
37
Christoph Hellwige8064022016-10-20 15:12:13 +020038 req->rq_flags |= RQF_DONTPREP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020040 return BLKPREP_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041}
42
43static int mmc_queue_thread(void *d)
44{
45 struct mmc_queue *mq = d;
46 struct request_queue *q = mq->queue;
Adrian Huntere0097cf2016-11-29 12:09:10 +020047 struct mmc_context_info *cntx = &mq->card->host->context_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Rafael J. Wysocki83144182007-07-17 04:03:35 -070049 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 do {
53 struct request *req = NULL;
54
55 spin_lock_irq(q->queue_lock);
56 set_current_state(TASK_INTERRUPTIBLE);
Jens Axboe7eaceac2011-03-10 08:52:07 +010057 req = blk_fetch_request(q);
Adrian Huntere0097cf2016-11-29 12:09:10 +020058 mq->asleep = false;
59 cntx->is_waiting_last_req = false;
60 cntx->is_new_req = false;
61 if (!req) {
62 /*
63 * Dispatch queue is empty so set flags for
64 * mmc_request_fn() to wake us up.
65 */
66 if (mq->mqrq_prev->req)
67 cntx->is_waiting_last_req = true;
68 else
69 mq->asleep = true;
70 }
Per Forlin97868a22011-07-09 17:12:36 -040071 mq->mqrq_cur->req = req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 spin_unlock_irq(q->queue_lock);
73
Per Forlinee8a43a2011-07-01 18:55:33 +020074 if (req || mq->mqrq_prev->req) {
Adrian Hunter869c5542016-08-25 14:11:43 -060075 bool req_is_special = mmc_req_is_special(req);
76
Per Forlinee8a43a2011-07-01 18:55:33 +020077 set_current_state(TASK_RUNNING);
Linus Walleij29eb7bd2016-09-20 11:34:38 +020078 mmc_blk_issue_rq(mq, req);
Rabin Vincenta8c27c02015-06-14 19:26:11 +020079 cond_resched();
Linus Walleij9491be52017-02-01 13:47:56 +010080 if (mq->new_request) {
81 mq->new_request = false;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -050082 continue; /* fetch again */
83 }
Seungwon Jeon45c5a912012-09-28 19:12:53 +090084
85 /*
86 * Current request becomes previous request
87 * and vice versa.
Seungwon Jeon369d3212012-12-26 10:40:17 +090088 * In case of special requests, current request
89 * has been finished. Do not assign it to previous
90 * request.
Seungwon Jeon45c5a912012-09-28 19:12:53 +090091 */
Adrian Hunter869c5542016-08-25 14:11:43 -060092 if (req_is_special)
Seungwon Jeon369d3212012-12-26 10:40:17 +090093 mq->mqrq_cur->req = NULL;
94
Seungwon Jeon45c5a912012-09-28 19:12:53 +090095 mq->mqrq_prev->brq.mrq.data = NULL;
96 mq->mqrq_prev->req = NULL;
Fabian Frederick75518472015-06-10 18:30:53 +020097 swap(mq->mqrq_prev, mq->mqrq_cur);
Per Forlinee8a43a2011-07-01 18:55:33 +020098 } else {
Vitaly Wool7b30d282006-12-07 20:08:02 +010099 if (kthread_should_stop()) {
100 set_current_state(TASK_RUNNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 break;
Vitaly Wool7b30d282006-12-07 20:08:02 +0100102 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 up(&mq->thread_sem);
104 schedule();
105 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 } while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 up(&mq->thread_sem);
109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 return 0;
111}
112
113/*
114 * Generic MMC request handler. This is called for any queue on a
115 * particular host. When the host is not busy, we look for a request
116 * on any queue on this host, and attempt to issue it. This may
117 * not be the queue we were asked to process.
118 */
Venkatraman S1b50f5f2012-04-13 17:54:11 +0530119static void mmc_request_fn(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120{
121 struct mmc_queue *mq = q->queuedata;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100122 struct request *req;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -0500123 struct mmc_context_info *cntx;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100124
125 if (!mq) {
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800126 while ((req = blk_fetch_request(q)) != NULL) {
Christoph Hellwige8064022016-10-20 15:12:13 +0200127 req->rq_flags |= RQF_QUIET;
Tejun Heo296b2f62009-05-08 11:54:15 +0900128 __blk_end_request_all(req, -EIO);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800129 }
Pierre Ossman89b4e132006-11-14 22:08:16 +0100130 return;
131 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Konstantin Dorfman2220eed2013-01-14 14:28:17 -0500133 cntx = &mq->card->host->context_info;
Adrian Huntere0097cf2016-11-29 12:09:10 +0200134
135 if (cntx->is_waiting_last_req) {
136 cntx->is_new_req = true;
137 wake_up_interruptible(&cntx->wait);
138 }
139
140 if (mq->asleep)
Christoph Hellwig87598a22006-11-13 20:23:52 +0100141 wake_up_process(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142}
143
Venkatraman S7513cd72011-08-23 21:16:02 +0530144static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
Per Forlin97868a22011-07-09 17:12:36 -0400145{
146 struct scatterlist *sg;
147
Markus Elfring63928d42017-01-08 22:10:40 +0100148 sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
Per Forlin97868a22011-07-09 17:12:36 -0400149 if (!sg)
150 *err = -ENOMEM;
151 else {
152 *err = 0;
153 sg_init_table(sg, sg_len);
154 }
155
156 return sg;
157}
158
Adrian Huntere056a1b2011-06-28 17:16:02 +0300159static void mmc_queue_setup_discard(struct request_queue *q,
160 struct mmc_card *card)
161{
162 unsigned max_discard;
163
164 max_discard = mmc_calc_max_discard(card);
165 if (!max_discard)
166 return;
167
168 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
Jens Axboe2bb4cd52015-07-14 08:15:12 -0600169 blk_queue_max_discard_sectors(q, max_discard);
Adrian Hunter7194efb2012-04-05 14:45:47 +0300170 if (card->erased_byte == 0 && !mmc_can_discard(card))
Adrian Huntere056a1b2011-06-28 17:16:02 +0300171 q->limits.discard_zeroes_data = 1;
172 q->limits.discard_granularity = card->pref_erase << 9;
173 /* granularity must not be greater than max. discard */
174 if (card->pref_erase > max_discard)
175 q->limits.discard_granularity = 0;
Maya Erez775a9362013-04-18 15:41:55 +0300176 if (mmc_can_secure_erase_trim(card))
Christoph Hellwig288dab82016-06-09 16:00:36 +0200177 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
Adrian Huntere056a1b2011-06-28 17:16:02 +0300178}
179
Adrian Hunterc8539822016-11-29 12:09:11 +0200180#ifdef CONFIG_MMC_BLOCK_BOUNCE
181static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
182 unsigned int bouncesz)
183{
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200184 int i;
Adrian Hunterc8539822016-11-29 12:09:11 +0200185
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200186 for (i = 0; i < mq->qdepth; i++) {
187 mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
188 if (!mq->mqrq[i].bounce_buf)
189 goto out_err;
Adrian Hunterc8539822016-11-29 12:09:11 +0200190 }
191
192 return true;
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200193
194out_err:
195 while (--i >= 0) {
196 kfree(mq->mqrq[i].bounce_buf);
197 mq->mqrq[i].bounce_buf = NULL;
198 }
199 pr_warn("%s: unable to allocate bounce buffers\n",
200 mmc_card_name(mq->card));
201 return false;
Adrian Hunterc8539822016-11-29 12:09:11 +0200202}
Adrian Hunterf2b8b522016-11-29 12:09:12 +0200203
204static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq,
205 unsigned int bouncesz)
206{
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200207 int i, ret;
Adrian Hunterf2b8b522016-11-29 12:09:12 +0200208
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200209 for (i = 0; i < mq->qdepth; i++) {
210 mq->mqrq[i].sg = mmc_alloc_sg(1, &ret);
211 if (ret)
212 return ret;
Adrian Hunterf2b8b522016-11-29 12:09:12 +0200213
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200214 mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
215 if (ret)
216 return ret;
217 }
Adrian Hunterf2b8b522016-11-29 12:09:12 +0200218
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200219 return 0;
Adrian Hunterf2b8b522016-11-29 12:09:12 +0200220}
Adrian Hunterc8539822016-11-29 12:09:11 +0200221#endif
222
Adrian Hunter64e29e422016-11-29 12:09:13 +0200223static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs)
224{
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200225 int i, ret;
Adrian Hunter64e29e422016-11-29 12:09:13 +0200226
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200227 for (i = 0; i < mq->qdepth; i++) {
228 mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret);
229 if (ret)
230 return ret;
231 }
Adrian Hunter64e29e422016-11-29 12:09:13 +0200232
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200233 return 0;
234}
Adrian Hunter64e29e422016-11-29 12:09:13 +0200235
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200236static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
237{
238 kfree(mqrq->bounce_sg);
239 mqrq->bounce_sg = NULL;
240
241 kfree(mqrq->sg);
242 mqrq->sg = NULL;
243
244 kfree(mqrq->bounce_buf);
245 mqrq->bounce_buf = NULL;
Adrian Hunter64e29e422016-11-29 12:09:13 +0200246}
247
Adrian Hunterc09949c2016-11-29 12:09:14 +0200248static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq)
249{
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200250 int i;
Adrian Hunterc09949c2016-11-29 12:09:14 +0200251
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200252 for (i = 0; i < mq->qdepth; i++)
253 mmc_queue_req_free_bufs(&mq->mqrq[i]);
Adrian Hunterc09949c2016-11-29 12:09:14 +0200254}
255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256/**
257 * mmc_init_queue - initialise a queue structure.
258 * @mq: mmc queue
259 * @card: mmc card to attach this queue
260 * @lock: queue lock
Adrian Hunterd09408a2011-06-23 13:40:28 +0300261 * @subname: partition subname
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 *
263 * Initialise a MMC card request queue.
264 */
Adrian Hunterd09408a2011-06-23 13:40:28 +0300265int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
266 spinlock_t *lock, const char *subname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267{
268 struct mmc_host *host = card->host;
269 u64 limit = BLK_BOUNCE_HIGH;
Adrian Hunterf2b8b522016-11-29 12:09:12 +0200270 bool bounce = false;
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200271 int ret = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Greg Kroah-Hartmanfcaf71f2006-09-12 17:00:10 +0200273 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
Russell Kinge83b3662014-02-11 17:11:04 +0000274 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
276 mq->card = card;
Venkatraman S1b50f5f2012-04-13 17:54:11 +0530277 mq->queue = blk_init_queue(mmc_request_fn, lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 if (!mq->queue)
279 return -ENOMEM;
280
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200281 mq->qdepth = 2;
282 mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req),
283 GFP_KERNEL);
284 if (!mq->mqrq)
285 goto blk_cleanup;
Adrian Hunterc09949c2016-11-29 12:09:14 +0200286 mq->mqrq_cur = &mq->mqrq[0];
287 mq->mqrq_prev = &mq->mqrq[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 mq->queue->queuedata = mq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
Pierre Ossman98ccf142007-05-12 00:26:16 +0200290 blk_queue_prep_rq(mq->queue, mmc_prep_request);
Pierre Ossman8dddfe12008-10-14 20:04:46 +0200291 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -0600292 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
Adrian Huntere056a1b2011-06-28 17:16:02 +0300293 if (mmc_can_erase(card))
294 mmc_queue_setup_discard(mq->queue, card);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200295
296#ifdef CONFIG_MMC_BLOCK_BOUNCE
Martin K. Petersena36274e2010-09-10 01:33:59 -0400297 if (host->max_segs == 1) {
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200298 unsigned int bouncesz;
299
Pierre Ossman98ccf142007-05-12 00:26:16 +0200300 bouncesz = MMC_QUEUE_BOUNCESZ;
301
302 if (bouncesz > host->max_req_size)
303 bouncesz = host->max_req_size;
304 if (bouncesz > host->max_seg_size)
305 bouncesz = host->max_seg_size;
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200306 if (bouncesz > (host->max_blk_count * 512))
307 bouncesz = host->max_blk_count * 512;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200308
Adrian Hunterc8539822016-11-29 12:09:11 +0200309 if (bouncesz > 512 &&
310 mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200311 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500312 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500313 blk_queue_max_segments(mq->queue, bouncesz / 512);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200314 blk_queue_max_segment_size(mq->queue, bouncesz);
315
Adrian Hunterf2b8b522016-11-29 12:09:12 +0200316 ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
Per Forlin97868a22011-07-09 17:12:36 -0400317 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200318 goto cleanup_queue;
Adrian Hunterf2b8b522016-11-29 12:09:12 +0200319 bounce = true;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200320 }
321 }
322#endif
323
Adrian Hunterf2b8b522016-11-29 12:09:12 +0200324 if (!bounce) {
Pierre Ossman98ccf142007-05-12 00:26:16 +0200325 blk_queue_bounce_limit(mq->queue, limit);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500326 blk_queue_max_hw_sectors(mq->queue,
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200327 min(host->max_blk_count, host->max_req_size / 512));
Martin K. Petersena36274e2010-09-10 01:33:59 -0400328 blk_queue_max_segments(mq->queue, host->max_segs);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200329 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
330
Adrian Hunter64e29e422016-11-29 12:09:13 +0200331 ret = mmc_queue_alloc_sgs(mq, host->max_segs);
Per Forlin04296b72011-07-01 18:55:31 +0200332 if (ret)
333 goto cleanup_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 }
335
Thomas Gleixner632cf922010-09-14 07:12:35 -0400336 sema_init(&mq->thread_sem, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
Adrian Hunterd09408a2011-06-23 13:40:28 +0300338 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
339 host->index, subname ? subname : "");
Ethan Dude528fa2010-09-30 18:40:27 -0400340
Christoph Hellwig87598a22006-11-13 20:23:52 +0100341 if (IS_ERR(mq->thread)) {
342 ret = PTR_ERR(mq->thread);
Adrian Hunterc09949c2016-11-29 12:09:14 +0200343 goto cleanup_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 }
345
Christoph Hellwig87598a22006-11-13 20:23:52 +0100346 return 0;
Per Forlin97868a22011-07-09 17:12:36 -0400347
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200348 cleanup_queue:
Adrian Hunterc09949c2016-11-29 12:09:14 +0200349 mmc_queue_reqs_free_bufs(mq);
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200350 kfree(mq->mqrq);
351 mq->mqrq = NULL;
352blk_cleanup:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 blk_cleanup_queue(mq->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 return ret;
355}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357void mmc_cleanup_queue(struct mmc_queue *mq)
358{
Jens Axboe165125e2007-07-24 09:28:11 +0200359 struct request_queue *q = mq->queue;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100360 unsigned long flags;
361
Pierre Ossmand2b46f62007-04-28 16:52:12 +0200362 /* Make sure the queue isn't suspended, as that will deadlock */
363 mmc_queue_resume(mq);
364
Pierre Ossman89b4e132006-11-14 22:08:16 +0100365 /* Then terminate our worker thread */
Christoph Hellwig87598a22006-11-13 20:23:52 +0100366 kthread_stop(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800368 /* Empty the queue */
369 spin_lock_irqsave(q->queue_lock, flags);
370 q->queuedata = NULL;
371 blk_start_queue(q);
372 spin_unlock_irqrestore(q->queue_lock, flags);
373
Adrian Hunterc09949c2016-11-29 12:09:14 +0200374 mmc_queue_reqs_free_bufs(mq);
Adrian Hunterc5bda0c2016-11-29 12:09:15 +0200375 kfree(mq->mqrq);
376 mq->mqrq = NULL;
Per Forlin04296b72011-07-01 18:55:31 +0200377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 mq->card = NULL;
379}
380EXPORT_SYMBOL(mmc_cleanup_queue);
381
382/**
383 * mmc_queue_suspend - suspend a MMC request queue
384 * @mq: MMC queue to suspend
385 *
386 * Stop the block request queue, and wait for our thread to
387 * complete any outstanding requests. This ensures that we
388 * won't suspend while a request is being processed.
389 */
390void mmc_queue_suspend(struct mmc_queue *mq)
391{
Jens Axboe165125e2007-07-24 09:28:11 +0200392 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 unsigned long flags;
394
Linus Walleij9491be52017-02-01 13:47:56 +0100395 if (!mq->suspended) {
396 mq->suspended |= true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
398 spin_lock_irqsave(q->queue_lock, flags);
399 blk_stop_queue(q);
400 spin_unlock_irqrestore(q->queue_lock, flags);
401
402 down(&mq->thread_sem);
403 }
404}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
406/**
407 * mmc_queue_resume - resume a previously suspended MMC request queue
408 * @mq: MMC queue to resume
409 */
410void mmc_queue_resume(struct mmc_queue *mq)
411{
Jens Axboe165125e2007-07-24 09:28:11 +0200412 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 unsigned long flags;
414
Linus Walleij9491be52017-02-01 13:47:56 +0100415 if (mq->suspended) {
416 mq->suspended = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
418 up(&mq->thread_sem);
419
420 spin_lock_irqsave(q->queue_lock, flags);
421 blk_start_queue(q);
422 spin_unlock_irqrestore(q->queue_lock, flags);
423 }
424}
Pierre Ossman98ac2162006-12-23 20:03:02 +0100425
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200426/*
427 * Prepare the sg list(s) to be handed of to the host driver
428 */
Per Forlin97868a22011-07-09 17:12:36 -0400429unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200430{
431 unsigned int sg_len;
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200432 size_t buflen;
433 struct scatterlist *sg;
434 int i;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200435
Linus Walleij03d640a2016-11-25 10:35:00 +0100436 if (!mqrq->bounce_buf)
437 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200438
Linus Walleij03d640a2016-11-25 10:35:00 +0100439 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200440
Per Forlin97868a22011-07-09 17:12:36 -0400441 mqrq->bounce_sg_len = sg_len;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200442
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200443 buflen = 0;
Per Forlin97868a22011-07-09 17:12:36 -0400444 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200445 buflen += sg->length;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200446
Per Forlin97868a22011-07-09 17:12:36 -0400447 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200448
449 return 1;
450}
451
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200452/*
453 * If writing, bounce the data to the buffer before the request
454 * is sent to the host driver
455 */
Per Forlin97868a22011-07-09 17:12:36 -0400456void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200457{
Per Forlin97868a22011-07-09 17:12:36 -0400458 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200459 return;
460
Per Forlin97868a22011-07-09 17:12:36 -0400461 if (rq_data_dir(mqrq->req) != WRITE)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200462 return;
463
Per Forlin97868a22011-07-09 17:12:36 -0400464 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
465 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200466}
467
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200468/*
469 * If reading, bounce the data from the buffer after the request
470 * has been handled by the host driver
471 */
Per Forlin97868a22011-07-09 17:12:36 -0400472void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200473{
Per Forlin97868a22011-07-09 17:12:36 -0400474 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200475 return;
476
Per Forlin97868a22011-07-09 17:12:36 -0400477 if (rq_data_dir(mqrq->req) != READ)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200478 return;
479
Per Forlin97868a22011-07-09 17:12:36 -0400480 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
481 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200482}