blob: 1810f765f0d1e65b0948c22250a96b6ce5a51602 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Pierre Ossman70f10482007-07-11 20:04:50 +02002 * linux/drivers/mmc/card/queue.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
Pierre Ossman98ac2162006-12-23 20:03:02 +01005 * Copyright 2006-2007 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
14#include <linux/blkdev.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070015#include <linux/freezer.h>
Christoph Hellwig87598a22006-11-13 20:23:52 +010016#include <linux/kthread.h>
Jens Axboe45711f12007-10-22 21:19:53 +020017#include <linux/scatterlist.h>
Santosh Shilimkar8e0cb8a2013-07-29 14:20:15 +010018#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20#include <linux/mmc/card.h>
21#include <linux/mmc/host.h>
Tim Murray01d8cc52016-01-19 16:36:40 -080022#include <linux/sched/rt.h>
Linus Walleij29eb7bd2016-09-20 11:34:38 +020023
Pierre Ossman98ac2162006-12-23 20:03:02 +010024#include "queue.h"
Linus Walleij29eb7bd2016-09-20 11:34:38 +020025#include "block.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Pierre Ossman98ccf142007-05-12 00:26:16 +020027#define MMC_QUEUE_BOUNCESZ 65536
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029/*
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020030 * Prepare a MMC request. This just filters out odd stuff.
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 */
32static int mmc_prep_request(struct request_queue *q, struct request *req)
33{
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +053034 struct mmc_queue *mq = q->queuedata;
35
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020036 /*
Adrian Hunterbd788c92010-08-11 14:17:47 -070037 * We only like normal block requests and discards.
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020038 */
Adrian Hunter7afafc82016-08-16 10:59:35 +030039 if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
40 req_op(req) != REQ_OP_SECURE_ERASE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 blk_dump_rq_flags(req, "MMC bad request");
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020042 return BLKPREP_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 }
44
Chuanxiao Dong4e93b9a2014-08-12 12:01:30 +080045 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +053046 return BLKPREP_KILL;
47
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020048 req->cmd_flags |= REQ_DONTPREP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020050 return BLKPREP_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051}
52
53static int mmc_queue_thread(void *d)
54{
55 struct mmc_queue *mq = d;
56 struct request_queue *q = mq->queue;
Tim Murray01d8cc52016-01-19 16:36:40 -080057 struct sched_param scheduler_params = {0};
58
59 scheduler_params.sched_priority = 1;
60
61 sched_setscheduler(current, SCHED_FIFO, &scheduler_params);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Rafael J. Wysocki83144182007-07-17 04:03:35 -070063 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 do {
67 struct request *req = NULL;
68
69 spin_lock_irq(q->queue_lock);
70 set_current_state(TASK_INTERRUPTIBLE);
Jens Axboe7eaceac2011-03-10 08:52:07 +010071 req = blk_fetch_request(q);
Per Forlin97868a22011-07-09 17:12:36 -040072 mq->mqrq_cur->req = req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 spin_unlock_irq(q->queue_lock);
74
Per Forlinee8a43a2011-07-01 18:55:33 +020075 if (req || mq->mqrq_prev->req) {
Adrian Hunter869c5542016-08-25 14:11:43 -060076 bool req_is_special = mmc_req_is_special(req);
77
Per Forlinee8a43a2011-07-01 18:55:33 +020078 set_current_state(TASK_RUNNING);
Linus Walleij29eb7bd2016-09-20 11:34:38 +020079 mmc_blk_issue_rq(mq, req);
Rabin Vincenta8c27c02015-06-14 19:26:11 +020080 cond_resched();
Konstantin Dorfman2220eed2013-01-14 14:28:17 -050081 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
82 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
83 continue; /* fetch again */
84 }
Seungwon Jeon45c5a912012-09-28 19:12:53 +090085
86 /*
87 * Current request becomes previous request
88 * and vice versa.
Seungwon Jeon369d3212012-12-26 10:40:17 +090089 * In case of special requests, current request
90 * has been finished. Do not assign it to previous
91 * request.
Seungwon Jeon45c5a912012-09-28 19:12:53 +090092 */
Adrian Hunter869c5542016-08-25 14:11:43 -060093 if (req_is_special)
Seungwon Jeon369d3212012-12-26 10:40:17 +090094 mq->mqrq_cur->req = NULL;
95
Seungwon Jeon45c5a912012-09-28 19:12:53 +090096 mq->mqrq_prev->brq.mrq.data = NULL;
97 mq->mqrq_prev->req = NULL;
Fabian Frederick75518472015-06-10 18:30:53 +020098 swap(mq->mqrq_prev, mq->mqrq_cur);
Per Forlinee8a43a2011-07-01 18:55:33 +020099 } else {
Vitaly Wool7b30d282006-12-07 20:08:02 +0100100 if (kthread_should_stop()) {
101 set_current_state(TASK_RUNNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 break;
Vitaly Wool7b30d282006-12-07 20:08:02 +0100103 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 up(&mq->thread_sem);
105 schedule();
106 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 } while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 up(&mq->thread_sem);
110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 return 0;
112}
113
114/*
115 * Generic MMC request handler. This is called for any queue on a
116 * particular host. When the host is not busy, we look for a request
117 * on any queue on this host, and attempt to issue it. This may
118 * not be the queue we were asked to process.
119 */
Venkatraman S1b50f5f2012-04-13 17:54:11 +0530120static void mmc_request_fn(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121{
122 struct mmc_queue *mq = q->queuedata;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100123 struct request *req;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -0500124 unsigned long flags;
125 struct mmc_context_info *cntx;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100126
127 if (!mq) {
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800128 while ((req = blk_fetch_request(q)) != NULL) {
129 req->cmd_flags |= REQ_QUIET;
Tejun Heo296b2f62009-05-08 11:54:15 +0900130 __blk_end_request_all(req, -EIO);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800131 }
Pierre Ossman89b4e132006-11-14 22:08:16 +0100132 return;
133 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Konstantin Dorfman2220eed2013-01-14 14:28:17 -0500135 cntx = &mq->card->host->context_info;
136 if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
137 /*
138 * New MMC request arrived when MMC thread may be
139 * blocked on the previous request to be complete
140 * with no current request fetched
141 */
142 spin_lock_irqsave(&cntx->lock, flags);
143 if (cntx->is_waiting_last_req) {
144 cntx->is_new_req = true;
145 wake_up_interruptible(&cntx->wait);
146 }
147 spin_unlock_irqrestore(&cntx->lock, flags);
148 } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
Christoph Hellwig87598a22006-11-13 20:23:52 +0100149 wake_up_process(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150}
151
Venkatraman S7513cd72011-08-23 21:16:02 +0530152static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
Per Forlin97868a22011-07-09 17:12:36 -0400153{
154 struct scatterlist *sg;
155
156 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
157 if (!sg)
158 *err = -ENOMEM;
159 else {
160 *err = 0;
161 sg_init_table(sg, sg_len);
162 }
163
164 return sg;
165}
166
Adrian Huntere056a1b2011-06-28 17:16:02 +0300167static void mmc_queue_setup_discard(struct request_queue *q,
168 struct mmc_card *card)
169{
170 unsigned max_discard;
171
172 max_discard = mmc_calc_max_discard(card);
173 if (!max_discard)
174 return;
175
176 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
Jens Axboe2bb4cd52015-07-14 08:15:12 -0600177 blk_queue_max_discard_sectors(q, max_discard);
Adrian Hunter7194efb2012-04-05 14:45:47 +0300178 if (card->erased_byte == 0 && !mmc_can_discard(card))
Adrian Huntere056a1b2011-06-28 17:16:02 +0300179 q->limits.discard_zeroes_data = 1;
180 q->limits.discard_granularity = card->pref_erase << 9;
181 /* granularity must not be greater than max. discard */
182 if (card->pref_erase > max_discard)
183 q->limits.discard_granularity = 0;
Maya Erez775a9362013-04-18 15:41:55 +0300184 if (mmc_can_secure_erase_trim(card))
Christoph Hellwig288dab82016-06-09 16:00:36 +0200185 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
Adrian Huntere056a1b2011-06-28 17:16:02 +0300186}
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188/**
189 * mmc_init_queue - initialise a queue structure.
190 * @mq: mmc queue
191 * @card: mmc card to attach this queue
192 * @lock: queue lock
Adrian Hunterd09408a2011-06-23 13:40:28 +0300193 * @subname: partition subname
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 *
195 * Initialise a MMC card request queue.
196 */
Adrian Hunterd09408a2011-06-23 13:40:28 +0300197int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
198 spinlock_t *lock, const char *subname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
200 struct mmc_host *host = card->host;
201 u64 limit = BLK_BOUNCE_HIGH;
202 int ret;
Per Forlin97868a22011-07-09 17:12:36 -0400203 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
Per Forlin04296b72011-07-01 18:55:31 +0200204 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Greg Kroah-Hartmanfcaf71f2006-09-12 17:00:10 +0200206 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
Russell Kinge83b3662014-02-11 17:11:04 +0000207 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
209 mq->card = card;
Venkatraman S1b50f5f2012-04-13 17:54:11 +0530210 mq->queue = blk_init_queue(mmc_request_fn, lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 if (!mq->queue)
212 return -ENOMEM;
213
Per Forlin97868a22011-07-09 17:12:36 -0400214 mq->mqrq_cur = mqrq_cur;
Per Forlin04296b72011-07-01 18:55:31 +0200215 mq->mqrq_prev = mqrq_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 mq->queue->queuedata = mq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
Pierre Ossman98ccf142007-05-12 00:26:16 +0200218 blk_queue_prep_rq(mq->queue, mmc_prep_request);
Pierre Ossman8dddfe12008-10-14 20:04:46 +0200219 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
Mike Snitzerb277da02014-10-04 10:55:32 -0600220 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
Adrian Huntere056a1b2011-06-28 17:16:02 +0300221 if (mmc_can_erase(card))
222 mmc_queue_setup_discard(mq->queue, card);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200223
224#ifdef CONFIG_MMC_BLOCK_BOUNCE
Martin K. Petersena36274e2010-09-10 01:33:59 -0400225 if (host->max_segs == 1) {
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200226 unsigned int bouncesz;
227
Pierre Ossman98ccf142007-05-12 00:26:16 +0200228 bouncesz = MMC_QUEUE_BOUNCESZ;
229
230 if (bouncesz > host->max_req_size)
231 bouncesz = host->max_req_size;
232 if (bouncesz > host->max_seg_size)
233 bouncesz = host->max_seg_size;
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200234 if (bouncesz > (host->max_blk_count * 512))
235 bouncesz = host->max_blk_count * 512;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200236
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200237 if (bouncesz > 512) {
Per Forlin97868a22011-07-09 17:12:36 -0400238 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
239 if (!mqrq_cur->bounce_buf) {
Joe Perches66061102014-09-12 14:56:56 -0700240 pr_warn("%s: unable to allocate bounce cur buffer\n",
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200241 mmc_card_name(card));
Bhuvanesh Suracharifdb409f2014-12-01 02:23:02 -0500242 } else {
243 mqrq_prev->bounce_buf =
244 kmalloc(bouncesz, GFP_KERNEL);
245 if (!mqrq_prev->bounce_buf) {
246 pr_warn("%s: unable to allocate bounce prev buffer\n",
247 mmc_card_name(card));
248 kfree(mqrq_cur->bounce_buf);
249 mqrq_cur->bounce_buf = NULL;
250 }
Per Forlin04296b72011-07-01 18:55:31 +0200251 }
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200252 }
253
Per Forlin04296b72011-07-01 18:55:31 +0200254 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200255 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500256 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500257 blk_queue_max_segments(mq->queue, bouncesz / 512);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200258 blk_queue_max_segment_size(mq->queue, bouncesz);
259
Per Forlin97868a22011-07-09 17:12:36 -0400260 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
261 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200262 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200263
Per Forlin97868a22011-07-09 17:12:36 -0400264 mqrq_cur->bounce_sg =
265 mmc_alloc_sg(bouncesz / 512, &ret);
266 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200267 goto cleanup_queue;
Per Forlin97868a22011-07-09 17:12:36 -0400268
Per Forlin04296b72011-07-01 18:55:31 +0200269 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
270 if (ret)
271 goto cleanup_queue;
272
273 mqrq_prev->bounce_sg =
274 mmc_alloc_sg(bouncesz / 512, &ret);
275 if (ret)
276 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200277 }
278 }
279#endif
280
Per Forlin04296b72011-07-01 18:55:31 +0200281 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
Pierre Ossman98ccf142007-05-12 00:26:16 +0200282 blk_queue_bounce_limit(mq->queue, limit);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500283 blk_queue_max_hw_sectors(mq->queue,
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200284 min(host->max_blk_count, host->max_req_size / 512));
Martin K. Petersena36274e2010-09-10 01:33:59 -0400285 blk_queue_max_segments(mq->queue, host->max_segs);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200286 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
287
Per Forlin97868a22011-07-09 17:12:36 -0400288 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
289 if (ret)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200290 goto cleanup_queue;
Per Forlin97868a22011-07-09 17:12:36 -0400291
Per Forlin04296b72011-07-01 18:55:31 +0200292
293 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
294 if (ret)
295 goto cleanup_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 }
297
Thomas Gleixner632cf922010-09-14 07:12:35 -0400298 sema_init(&mq->thread_sem, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Adrian Hunterd09408a2011-06-23 13:40:28 +0300300 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
301 host->index, subname ? subname : "");
Ethan Dude528fa2010-09-30 18:40:27 -0400302
Christoph Hellwig87598a22006-11-13 20:23:52 +0100303 if (IS_ERR(mq->thread)) {
304 ret = PTR_ERR(mq->thread);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200305 goto free_bounce_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 }
307
Christoph Hellwig87598a22006-11-13 20:23:52 +0100308 return 0;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200309 free_bounce_sg:
Per Forlin97868a22011-07-09 17:12:36 -0400310 kfree(mqrq_cur->bounce_sg);
311 mqrq_cur->bounce_sg = NULL;
Per Forlin04296b72011-07-01 18:55:31 +0200312 kfree(mqrq_prev->bounce_sg);
313 mqrq_prev->bounce_sg = NULL;
Per Forlin97868a22011-07-09 17:12:36 -0400314
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200315 cleanup_queue:
Per Forlin97868a22011-07-09 17:12:36 -0400316 kfree(mqrq_cur->sg);
317 mqrq_cur->sg = NULL;
318 kfree(mqrq_cur->bounce_buf);
319 mqrq_cur->bounce_buf = NULL;
320
Per Forlin04296b72011-07-01 18:55:31 +0200321 kfree(mqrq_prev->sg);
322 mqrq_prev->sg = NULL;
323 kfree(mqrq_prev->bounce_buf);
324 mqrq_prev->bounce_buf = NULL;
325
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 blk_cleanup_queue(mq->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 return ret;
328}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330void mmc_cleanup_queue(struct mmc_queue *mq)
331{
Jens Axboe165125e2007-07-24 09:28:11 +0200332 struct request_queue *q = mq->queue;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100333 unsigned long flags;
Per Forlin97868a22011-07-09 17:12:36 -0400334 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
Per Forlin04296b72011-07-01 18:55:31 +0200335 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100336
Pierre Ossmand2b46f62007-04-28 16:52:12 +0200337 /* Make sure the queue isn't suspended, as that will deadlock */
338 mmc_queue_resume(mq);
339
Pierre Ossman89b4e132006-11-14 22:08:16 +0100340 /* Then terminate our worker thread */
Christoph Hellwig87598a22006-11-13 20:23:52 +0100341 kthread_stop(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800343 /* Empty the queue */
344 spin_lock_irqsave(q->queue_lock, flags);
345 q->queuedata = NULL;
346 blk_start_queue(q);
347 spin_unlock_irqrestore(q->queue_lock, flags);
348
Per Forlin97868a22011-07-09 17:12:36 -0400349 kfree(mqrq_cur->bounce_sg);
350 mqrq_cur->bounce_sg = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200351
Per Forlin97868a22011-07-09 17:12:36 -0400352 kfree(mqrq_cur->sg);
353 mqrq_cur->sg = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Per Forlin97868a22011-07-09 17:12:36 -0400355 kfree(mqrq_cur->bounce_buf);
356 mqrq_cur->bounce_buf = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200357
Per Forlin04296b72011-07-01 18:55:31 +0200358 kfree(mqrq_prev->bounce_sg);
359 mqrq_prev->bounce_sg = NULL;
360
361 kfree(mqrq_prev->sg);
362 mqrq_prev->sg = NULL;
363
364 kfree(mqrq_prev->bounce_buf);
365 mqrq_prev->bounce_buf = NULL;
366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 mq->card = NULL;
368}
369EXPORT_SYMBOL(mmc_cleanup_queue);
370
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900371int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
372{
373 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
374 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
375 int ret = 0;
376
377
378 mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
379 if (!mqrq_cur->packed) {
380 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
381 mmc_card_name(card));
382 ret = -ENOMEM;
383 goto out;
384 }
385
386 mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
387 if (!mqrq_prev->packed) {
388 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
389 mmc_card_name(card));
390 kfree(mqrq_cur->packed);
391 mqrq_cur->packed = NULL;
392 ret = -ENOMEM;
393 goto out;
394 }
395
396 INIT_LIST_HEAD(&mqrq_cur->packed->list);
397 INIT_LIST_HEAD(&mqrq_prev->packed->list);
398
399out:
400 return ret;
401}
402
403void mmc_packed_clean(struct mmc_queue *mq)
404{
405 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
406 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
407
408 kfree(mqrq_cur->packed);
409 mqrq_cur->packed = NULL;
410 kfree(mqrq_prev->packed);
411 mqrq_prev->packed = NULL;
412}
413
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414/**
415 * mmc_queue_suspend - suspend a MMC request queue
416 * @mq: MMC queue to suspend
417 *
418 * Stop the block request queue, and wait for our thread to
419 * complete any outstanding requests. This ensures that we
420 * won't suspend while a request is being processed.
421 */
422void mmc_queue_suspend(struct mmc_queue *mq)
423{
Jens Axboe165125e2007-07-24 09:28:11 +0200424 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 unsigned long flags;
426
427 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
428 mq->flags |= MMC_QUEUE_SUSPENDED;
429
430 spin_lock_irqsave(q->queue_lock, flags);
431 blk_stop_queue(q);
432 spin_unlock_irqrestore(q->queue_lock, flags);
433
434 down(&mq->thread_sem);
435 }
436}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
438/**
439 * mmc_queue_resume - resume a previously suspended MMC request queue
440 * @mq: MMC queue to resume
441 */
442void mmc_queue_resume(struct mmc_queue *mq)
443{
Jens Axboe165125e2007-07-24 09:28:11 +0200444 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 unsigned long flags;
446
447 if (mq->flags & MMC_QUEUE_SUSPENDED) {
448 mq->flags &= ~MMC_QUEUE_SUSPENDED;
449
450 up(&mq->thread_sem);
451
452 spin_lock_irqsave(q->queue_lock, flags);
453 blk_start_queue(q);
454 spin_unlock_irqrestore(q->queue_lock, flags);
455 }
456}
Pierre Ossman98ac2162006-12-23 20:03:02 +0100457
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900458static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
459 struct mmc_packed *packed,
460 struct scatterlist *sg,
461 enum mmc_packed_type cmd_type)
462{
463 struct scatterlist *__sg = sg;
464 unsigned int sg_len = 0;
465 struct request *req;
466
467 if (mmc_packed_wr(cmd_type)) {
468 unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
469 unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
470 unsigned int len, remain, offset = 0;
471 u8 *buf = (u8 *)packed->cmd_hdr;
472
473 remain = hdr_sz;
474 do {
475 len = min(remain, max_seg_sz);
476 sg_set_buf(__sg, buf + offset, len);
477 offset += len;
478 remain -= len;
Dan Williamsda81ed12015-08-07 18:15:14 +0200479 sg_unmark_end(__sg++);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900480 sg_len++;
481 } while (remain);
482 }
483
484 list_for_each_entry(req, &packed->list, queuelist) {
485 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
486 __sg = sg + (sg_len - 1);
Dan Williamsda81ed12015-08-07 18:15:14 +0200487 sg_unmark_end(__sg++);
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900488 }
489 sg_mark_end(sg + (sg_len - 1));
490 return sg_len;
491}
492
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200493/*
494 * Prepare the sg list(s) to be handed of to the host driver
495 */
Per Forlin97868a22011-07-09 17:12:36 -0400496unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200497{
498 unsigned int sg_len;
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200499 size_t buflen;
500 struct scatterlist *sg;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900501 enum mmc_packed_type cmd_type;
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200502 int i;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200503
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900504 cmd_type = mqrq->cmd_type;
505
506 if (!mqrq->bounce_buf) {
507 if (mmc_packed_cmd(cmd_type))
508 return mmc_queue_packed_map_sg(mq, mqrq->packed,
509 mqrq->sg, cmd_type);
510 else
511 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
512 }
Pierre Ossman98ccf142007-05-12 00:26:16 +0200513
Per Forlin97868a22011-07-09 17:12:36 -0400514 BUG_ON(!mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200515
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900516 if (mmc_packed_cmd(cmd_type))
517 sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
518 mqrq->bounce_sg, cmd_type);
519 else
520 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200521
Per Forlin97868a22011-07-09 17:12:36 -0400522 mqrq->bounce_sg_len = sg_len;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200523
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200524 buflen = 0;
Per Forlin97868a22011-07-09 17:12:36 -0400525 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200526 buflen += sg->length;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200527
Per Forlin97868a22011-07-09 17:12:36 -0400528 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200529
530 return 1;
531}
532
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200533/*
534 * If writing, bounce the data to the buffer before the request
535 * is sent to the host driver
536 */
Per Forlin97868a22011-07-09 17:12:36 -0400537void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200538{
Per Forlin97868a22011-07-09 17:12:36 -0400539 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200540 return;
541
Per Forlin97868a22011-07-09 17:12:36 -0400542 if (rq_data_dir(mqrq->req) != WRITE)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200543 return;
544
Per Forlin97868a22011-07-09 17:12:36 -0400545 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
546 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200547}
548
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200549/*
550 * If reading, bounce the data from the buffer after the request
551 * has been handled by the host driver
552 */
Per Forlin97868a22011-07-09 17:12:36 -0400553void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200554{
Per Forlin97868a22011-07-09 17:12:36 -0400555 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200556 return;
557
Per Forlin97868a22011-07-09 17:12:36 -0400558 if (rq_data_dir(mqrq->req) != READ)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200559 return;
560
Per Forlin97868a22011-07-09 17:12:36 -0400561 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
562 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200563}