blob: 8037f73a109a14a4b9593e4034705ebc29ec8752 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Pierre Ossman70f10482007-07-11 20:04:50 +02002 * linux/drivers/mmc/card/queue.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
Pierre Ossman98ac2162006-12-23 20:03:02 +01005 * Copyright 2006-2007 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
14#include <linux/blkdev.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070015#include <linux/freezer.h>
Christoph Hellwig87598a22006-11-13 20:23:52 +010016#include <linux/kthread.h>
Jens Axboe45711f12007-10-22 21:19:53 +020017#include <linux/scatterlist.h>
Santosh Shilimkar8e0cb8a2013-07-29 14:20:15 +010018#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20#include <linux/mmc/card.h>
21#include <linux/mmc/host.h>
Linus Walleij29eb7bd2016-09-20 11:34:38 +020022
Pierre Ossman98ac2162006-12-23 20:03:02 +010023#include "queue.h"
Linus Walleij29eb7bd2016-09-20 11:34:38 +020024#include "block.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Pierre Ossman98ccf142007-05-12 00:26:16 +020026#define MMC_QUEUE_BOUNCESZ 65536
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028/*
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020029 * Prepare a MMC request. This just filters out odd stuff.
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 */
31static int mmc_prep_request(struct request_queue *q, struct request *req)
32{
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +053033 struct mmc_queue *mq = q->queuedata;
34
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020035 /*
Adrian Hunterbd788c92010-08-11 14:17:47 -070036 * We only like normal block requests and discards.
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020037 */
Adrian Hunter7afafc82016-08-16 10:59:35 +030038 if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
39 req_op(req) != REQ_OP_SECURE_ERASE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 blk_dump_rq_flags(req, "MMC bad request");
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020041 return BLKPREP_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 }
43
Chuanxiao Dong4e93b9a2014-08-12 12:01:30 +080044 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +053045 return BLKPREP_KILL;
46
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020047 req->cmd_flags |= REQ_DONTPREP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020049 return BLKPREP_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050}
51
52static int mmc_queue_thread(void *d)
53{
54 struct mmc_queue *mq = d;
55 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Rafael J. Wysocki83144182007-07-17 04:03:35 -070057 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 do {
61 struct request *req = NULL;
62
63 spin_lock_irq(q->queue_lock);
64 set_current_state(TASK_INTERRUPTIBLE);
Jens Axboe7eaceac2011-03-10 08:52:07 +010065 req = blk_fetch_request(q);
Per Forlin97868a22011-07-09 17:12:36 -040066 mq->mqrq_cur->req = req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 spin_unlock_irq(q->queue_lock);
68
Per Forlinee8a43a2011-07-01 18:55:33 +020069 if (req || mq->mqrq_prev->req) {
Adrian Hunter869c5542016-08-25 14:11:43 -060070 bool req_is_special = mmc_req_is_special(req);
71
Per Forlinee8a43a2011-07-01 18:55:33 +020072 set_current_state(TASK_RUNNING);
Linus Walleij29eb7bd2016-09-20 11:34:38 +020073 mmc_blk_issue_rq(mq, req);
Rabin Vincenta8c27c02015-06-14 19:26:11 +020074 cond_resched();
Konstantin Dorfman2220eed2013-01-14 14:28:17 -050075 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
76 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
77 continue; /* fetch again */
78 }
Seungwon Jeon45c5a912012-09-28 19:12:53 +090079
80 /*
81 * Current request becomes previous request
82 * and vice versa.
Seungwon Jeon369d3212012-12-26 10:40:17 +090083 * In case of special requests, current request
84 * has been finished. Do not assign it to previous
85 * request.
Seungwon Jeon45c5a912012-09-28 19:12:53 +090086 */
Adrian Hunter869c5542016-08-25 14:11:43 -060087 if (req_is_special)
Seungwon Jeon369d3212012-12-26 10:40:17 +090088 mq->mqrq_cur->req = NULL;
89
Seungwon Jeon45c5a912012-09-28 19:12:53 +090090 mq->mqrq_prev->brq.mrq.data = NULL;
91 mq->mqrq_prev->req = NULL;
Fabian Frederick75518472015-06-10 18:30:53 +020092 swap(mq->mqrq_prev, mq->mqrq_cur);
Per Forlinee8a43a2011-07-01 18:55:33 +020093 } else {
Vitaly Wool7b30d282006-12-07 20:08:02 +010094 if (kthread_should_stop()) {
95 set_current_state(TASK_RUNNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 break;
Vitaly Wool7b30d282006-12-07 20:08:02 +010097 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 up(&mq->thread_sem);
99 schedule();
100 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 } while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 up(&mq->thread_sem);
104
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 return 0;
106}
107
108/*
109 * Generic MMC request handler. This is called for any queue on a
110 * particular host. When the host is not busy, we look for a request
111 * on any queue on this host, and attempt to issue it. This may
112 * not be the queue we were asked to process.
113 */
Venkatraman S1b50f5f2012-04-13 17:54:11 +0530114static void mmc_request_fn(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115{
116 struct mmc_queue *mq = q->queuedata;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100117 struct request *req;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -0500118 unsigned long flags;
119 struct mmc_context_info *cntx;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100120
121 if (!mq) {
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800122 while ((req = blk_fetch_request(q)) != NULL) {
123 req->cmd_flags |= REQ_QUIET;
Tejun Heo296b2f62009-05-08 11:54:15 +0900124 __blk_end_request_all(req, -EIO);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800125 }
Pierre Ossman89b4e132006-11-14 22:08:16 +0100126 return;
127 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
Konstantin Dorfman2220eed2013-01-14 14:28:17 -0500129 cntx = &mq->card->host->context_info;
130 if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
131 /*
132 * New MMC request arrived when MMC thread may be
133 * blocked on the previous request to be complete
134 * with no current request fetched
135 */
136 spin_lock_irqsave(&cntx->lock, flags);
137 if (cntx->is_waiting_last_req) {
138 cntx->is_new_req = true;
139 wake_up_interruptible(&cntx->wait);
140 }
141 spin_unlock_irqrestore(&cntx->lock, flags);
142 } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
Christoph Hellwig87598a22006-11-13 20:23:52 +0100143 wake_up_process(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144}
145
Venkatraman S7513cd72011-08-23 21:16:02 +0530146static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
Per Forlin97868a22011-07-09 17:12:36 -0400147{
148 struct scatterlist *sg;
149
150 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
151 if (!sg)
152 *err = -ENOMEM;
153 else {
154 *err = 0;
155 sg_init_table(sg, sg_len);
156 }
157
158 return sg;
159}
160
Adrian Huntere056a1b2011-06-28 17:16:02 +0300161static void mmc_queue_setup_discard(struct request_queue *q,
162 struct mmc_card *card)
163{
164 unsigned max_discard;
165
166 max_discard = mmc_calc_max_discard(card);
167 if (!max_discard)
168 return;
169
170 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
Jens Axboe2bb4cd52015-07-14 08:15:12 -0600171 blk_queue_max_discard_sectors(q, max_discard);
Adrian Hunter7194efb2012-04-05 14:45:47 +0300172 if (card->erased_byte == 0 && !mmc_can_discard(card))
Adrian Huntere056a1b2011-06-28 17:16:02 +0300173 q->limits.discard_zeroes_data = 1;
174 q->limits.discard_granularity = card->pref_erase << 9;
175 /* granularity must not be greater than max. discard */
176 if (card->pref_erase > max_discard)
177 q->limits.discard_granularity = 0;
Maya Erez775a9362013-04-18 15:41:55 +0300178 if (mmc_can_secure_erase_trim(card))
Christoph Hellwig288dab82016-06-09 16:00:36 +0200179 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
Adrian Huntere056a1b2011-06-28 17:16:02 +0300180}
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182/**
183 * mmc_init_queue - initialise a queue structure.
184 * @mq: mmc queue
185 * @card: mmc card to attach this queue
186 * @lock: queue lock
Adrian Hunterd09408a2011-06-23 13:40:28 +0300187 * @subname: partition subname
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 *
189 * Initialise a MMC card request queue.
190 */
Adrian Hunterd09408a2011-06-23 13:40:28 +0300191int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
192 spinlock_t *lock, const char *subname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
194 struct mmc_host *host = card->host;
195 u64 limit = BLK_BOUNCE_HIGH;
196 int ret;
Per Forlin97868a22011-07-09 17:12:36 -0400197 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
Per Forlin04296b72011-07-01 18:55:31 +0200198 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Greg Kroah-Hartmanfcaf71f2006-09-12 17:00:10 +0200200 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
Russell Kinge83b3662014-02-11 17:11:04 +0000201 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
203 mq->card = card;
Venkatraman S1b50f5f2012-04-13 17:54:11 +0530204 mq->queue = blk_init_queue(mmc_request_fn, lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 if (!mq->queue)
206 return -ENOMEM;
207
Per Forlin97868a22011-07-09 17:12:36 -0400208 mq->mqrq_cur = mqrq_cur;
Per Forlin04296b72011-07-01 18:55:31 +0200209 mq->mqrq_prev = mqrq_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 mq->queue->queuedata = mq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Pierre Ossman98ccf142007-05-12 00:26:16 +0200212 blk_queue_prep_rq(mq->queue, mmc_prep_request);
Pierre Ossman8dddfe12008-10-14 20:04:46 +0200213 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -0600214 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
Adrian Huntere056a1b2011-06-28 17:16:02 +0300215 if (mmc_can_erase(card))
216 mmc_queue_setup_discard(mq->queue, card);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200217
218#ifdef CONFIG_MMC_BLOCK_BOUNCE
Martin K. Petersena36274e2010-09-10 01:33:59 -0400219 if (host->max_segs == 1) {
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200220 unsigned int bouncesz;
221
Pierre Ossman98ccf142007-05-12 00:26:16 +0200222 bouncesz = MMC_QUEUE_BOUNCESZ;
223
224 if (bouncesz > host->max_req_size)
225 bouncesz = host->max_req_size;
226 if (bouncesz > host->max_seg_size)
227 bouncesz = host->max_seg_size;
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200228 if (bouncesz > (host->max_blk_count * 512))
229 bouncesz = host->max_blk_count * 512;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200230
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200231 if (bouncesz > 512) {
Per Forlin97868a22011-07-09 17:12:36 -0400232 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
233 if (!mqrq_cur->bounce_buf) {
Joe Perches66061102014-09-12 14:56:56 -0700234 pr_warn("%s: unable to allocate bounce cur buffer\n",
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200235 mmc_card_name(card));
Bhuvanesh Suracharifdb409f2014-12-01 02:23:02 -0500236 } else {
237 mqrq_prev->bounce_buf =
238 kmalloc(bouncesz, GFP_KERNEL);
239 if (!mqrq_prev->bounce_buf) {
240 pr_warn("%s: unable to allocate bounce prev buffer\n",
241 mmc_card_name(card));
242 kfree(mqrq_cur->bounce_buf);
243 mqrq_cur->bounce_buf = NULL;
244 }
Per Forlin04296b72011-07-01 18:55:31 +0200245 }
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200246 }
247
Per Forlin04296b72011-07-01 18:55:31 +0200248 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200249 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500250 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500251 blk_queue_max_segments(mq->queue, bouncesz / 512);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200252 blk_queue_max_segment_size(mq->queue, bouncesz);
253
Per Forlin97868a22011-07-09 17:12:36 -0400254 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
255 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200256 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200257
Per Forlin97868a22011-07-09 17:12:36 -0400258 mqrq_cur->bounce_sg =
259 mmc_alloc_sg(bouncesz / 512, &ret);
260 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200261 goto cleanup_queue;
Per Forlin97868a22011-07-09 17:12:36 -0400262
Per Forlin04296b72011-07-01 18:55:31 +0200263 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
264 if (ret)
265 goto cleanup_queue;
266
267 mqrq_prev->bounce_sg =
268 mmc_alloc_sg(bouncesz / 512, &ret);
269 if (ret)
270 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200271 }
272 }
273#endif
274
Per Forlin04296b72011-07-01 18:55:31 +0200275 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
Pierre Ossman98ccf142007-05-12 00:26:16 +0200276 blk_queue_bounce_limit(mq->queue, limit);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500277 blk_queue_max_hw_sectors(mq->queue,
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200278 min(host->max_blk_count, host->max_req_size / 512));
Martin K. Petersena36274e2010-09-10 01:33:59 -0400279 blk_queue_max_segments(mq->queue, host->max_segs);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200280 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
281
Per Forlin97868a22011-07-09 17:12:36 -0400282 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
283 if (ret)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200284 goto cleanup_queue;
Per Forlin97868a22011-07-09 17:12:36 -0400285
Per Forlin04296b72011-07-01 18:55:31 +0200286
287 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
288 if (ret)
289 goto cleanup_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 }
291
Thomas Gleixner632cf922010-09-14 07:12:35 -0400292 sema_init(&mq->thread_sem, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
Adrian Hunterd09408a2011-06-23 13:40:28 +0300294 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
295 host->index, subname ? subname : "");
Ethan Dude528fa2010-09-30 18:40:27 -0400296
Christoph Hellwig87598a22006-11-13 20:23:52 +0100297 if (IS_ERR(mq->thread)) {
298 ret = PTR_ERR(mq->thread);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200299 goto free_bounce_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 }
301
Christoph Hellwig87598a22006-11-13 20:23:52 +0100302 return 0;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200303 free_bounce_sg:
Per Forlin97868a22011-07-09 17:12:36 -0400304 kfree(mqrq_cur->bounce_sg);
305 mqrq_cur->bounce_sg = NULL;
Per Forlin04296b72011-07-01 18:55:31 +0200306 kfree(mqrq_prev->bounce_sg);
307 mqrq_prev->bounce_sg = NULL;
Per Forlin97868a22011-07-09 17:12:36 -0400308
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200309 cleanup_queue:
Per Forlin97868a22011-07-09 17:12:36 -0400310 kfree(mqrq_cur->sg);
311 mqrq_cur->sg = NULL;
312 kfree(mqrq_cur->bounce_buf);
313 mqrq_cur->bounce_buf = NULL;
314
Per Forlin04296b72011-07-01 18:55:31 +0200315 kfree(mqrq_prev->sg);
316 mqrq_prev->sg = NULL;
317 kfree(mqrq_prev->bounce_buf);
318 mqrq_prev->bounce_buf = NULL;
319
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 blk_cleanup_queue(mq->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 return ret;
322}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
324void mmc_cleanup_queue(struct mmc_queue *mq)
325{
Jens Axboe165125e2007-07-24 09:28:11 +0200326 struct request_queue *q = mq->queue;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100327 unsigned long flags;
Per Forlin97868a22011-07-09 17:12:36 -0400328 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
Per Forlin04296b72011-07-01 18:55:31 +0200329 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100330
Pierre Ossmand2b46f62007-04-28 16:52:12 +0200331 /* Make sure the queue isn't suspended, as that will deadlock */
332 mmc_queue_resume(mq);
333
Pierre Ossman89b4e132006-11-14 22:08:16 +0100334 /* Then terminate our worker thread */
Christoph Hellwig87598a22006-11-13 20:23:52 +0100335 kthread_stop(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800337 /* Empty the queue */
338 spin_lock_irqsave(q->queue_lock, flags);
339 q->queuedata = NULL;
340 blk_start_queue(q);
341 spin_unlock_irqrestore(q->queue_lock, flags);
342
Per Forlin97868a22011-07-09 17:12:36 -0400343 kfree(mqrq_cur->bounce_sg);
344 mqrq_cur->bounce_sg = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200345
Per Forlin97868a22011-07-09 17:12:36 -0400346 kfree(mqrq_cur->sg);
347 mqrq_cur->sg = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
Per Forlin97868a22011-07-09 17:12:36 -0400349 kfree(mqrq_cur->bounce_buf);
350 mqrq_cur->bounce_buf = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200351
Per Forlin04296b72011-07-01 18:55:31 +0200352 kfree(mqrq_prev->bounce_sg);
353 mqrq_prev->bounce_sg = NULL;
354
355 kfree(mqrq_prev->sg);
356 mqrq_prev->sg = NULL;
357
358 kfree(mqrq_prev->bounce_buf);
359 mqrq_prev->bounce_buf = NULL;
360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 mq->card = NULL;
362}
363EXPORT_SYMBOL(mmc_cleanup_queue);
364
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900365int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
366{
367 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
368 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
369 int ret = 0;
370
371
372 mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
373 if (!mqrq_cur->packed) {
374 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
375 mmc_card_name(card));
376 ret = -ENOMEM;
377 goto out;
378 }
379
380 mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
381 if (!mqrq_prev->packed) {
382 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
383 mmc_card_name(card));
384 kfree(mqrq_cur->packed);
385 mqrq_cur->packed = NULL;
386 ret = -ENOMEM;
387 goto out;
388 }
389
390 INIT_LIST_HEAD(&mqrq_cur->packed->list);
391 INIT_LIST_HEAD(&mqrq_prev->packed->list);
392
393out:
394 return ret;
395}
396
397void mmc_packed_clean(struct mmc_queue *mq)
398{
399 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
400 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
401
402 kfree(mqrq_cur->packed);
403 mqrq_cur->packed = NULL;
404 kfree(mqrq_prev->packed);
405 mqrq_prev->packed = NULL;
406}
407
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408/**
409 * mmc_queue_suspend - suspend a MMC request queue
410 * @mq: MMC queue to suspend
411 *
412 * Stop the block request queue, and wait for our thread to
413 * complete any outstanding requests. This ensures that we
414 * won't suspend while a request is being processed.
415 */
416void mmc_queue_suspend(struct mmc_queue *mq)
417{
Jens Axboe165125e2007-07-24 09:28:11 +0200418 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 unsigned long flags;
420
421 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
422 mq->flags |= MMC_QUEUE_SUSPENDED;
423
424 spin_lock_irqsave(q->queue_lock, flags);
425 blk_stop_queue(q);
426 spin_unlock_irqrestore(q->queue_lock, flags);
427
428 down(&mq->thread_sem);
429 }
430}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
432/**
433 * mmc_queue_resume - resume a previously suspended MMC request queue
434 * @mq: MMC queue to resume
435 */
436void mmc_queue_resume(struct mmc_queue *mq)
437{
Jens Axboe165125e2007-07-24 09:28:11 +0200438 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 unsigned long flags;
440
441 if (mq->flags & MMC_QUEUE_SUSPENDED) {
442 mq->flags &= ~MMC_QUEUE_SUSPENDED;
443
444 up(&mq->thread_sem);
445
446 spin_lock_irqsave(q->queue_lock, flags);
447 blk_start_queue(q);
448 spin_unlock_irqrestore(q->queue_lock, flags);
449 }
450}
Pierre Ossman98ac2162006-12-23 20:03:02 +0100451
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900452static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
453 struct mmc_packed *packed,
454 struct scatterlist *sg,
455 enum mmc_packed_type cmd_type)
456{
457 struct scatterlist *__sg = sg;
458 unsigned int sg_len = 0;
459 struct request *req;
460
461 if (mmc_packed_wr(cmd_type)) {
462 unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
463 unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
464 unsigned int len, remain, offset = 0;
465 u8 *buf = (u8 *)packed->cmd_hdr;
466
467 remain = hdr_sz;
468 do {
469 len = min(remain, max_seg_sz);
470 sg_set_buf(__sg, buf + offset, len);
471 offset += len;
472 remain -= len;
Dan Williamsda81ed12015-08-07 18:15:14 +0200473 sg_unmark_end(__sg++);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900474 sg_len++;
475 } while (remain);
476 }
477
478 list_for_each_entry(req, &packed->list, queuelist) {
479 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
480 __sg = sg + (sg_len - 1);
Dan Williamsda81ed12015-08-07 18:15:14 +0200481 sg_unmark_end(__sg++);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900482 }
483 sg_mark_end(sg + (sg_len - 1));
484 return sg_len;
485}
486
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200487/*
488 * Prepare the sg list(s) to be handed of to the host driver
489 */
Per Forlin97868a22011-07-09 17:12:36 -0400490unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200491{
492 unsigned int sg_len;
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200493 size_t buflen;
494 struct scatterlist *sg;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900495 enum mmc_packed_type cmd_type;
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200496 int i;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200497
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900498 cmd_type = mqrq->cmd_type;
499
500 if (!mqrq->bounce_buf) {
501 if (mmc_packed_cmd(cmd_type))
502 return mmc_queue_packed_map_sg(mq, mqrq->packed,
503 mqrq->sg, cmd_type);
504 else
505 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
506 }
Pierre Ossman98ccf142007-05-12 00:26:16 +0200507
Per Forlin97868a22011-07-09 17:12:36 -0400508 BUG_ON(!mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200509
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900510 if (mmc_packed_cmd(cmd_type))
511 sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
512 mqrq->bounce_sg, cmd_type);
513 else
514 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200515
Per Forlin97868a22011-07-09 17:12:36 -0400516 mqrq->bounce_sg_len = sg_len;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200517
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200518 buflen = 0;
Per Forlin97868a22011-07-09 17:12:36 -0400519 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200520 buflen += sg->length;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200521
Per Forlin97868a22011-07-09 17:12:36 -0400522 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200523
524 return 1;
525}
526
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200527/*
528 * If writing, bounce the data to the buffer before the request
529 * is sent to the host driver
530 */
Per Forlin97868a22011-07-09 17:12:36 -0400531void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200532{
Per Forlin97868a22011-07-09 17:12:36 -0400533 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200534 return;
535
Per Forlin97868a22011-07-09 17:12:36 -0400536 if (rq_data_dir(mqrq->req) != WRITE)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200537 return;
538
Per Forlin97868a22011-07-09 17:12:36 -0400539 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
540 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200541}
542
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200543/*
544 * If reading, bounce the data from the buffer after the request
545 * has been handled by the host driver
546 */
Per Forlin97868a22011-07-09 17:12:36 -0400547void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200548{
Per Forlin97868a22011-07-09 17:12:36 -0400549 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200550 return;
551
Per Forlin97868a22011-07-09 17:12:36 -0400552 if (rq_data_dir(mqrq->req) != READ)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200553 return;
554
Per Forlin97868a22011-07-09 17:12:36 -0400555 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
556 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200557}