blob: 3e049c13429cfbe730179724053796cc65ba62de [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Pierre Ossman70f10482007-07-11 20:04:50 +02002 * linux/drivers/mmc/card/queue.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
Pierre Ossman98ac2162006-12-23 20:03:02 +01005 * Copyright 2006-2007 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
14#include <linux/blkdev.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070015#include <linux/freezer.h>
Christoph Hellwig87598a22006-11-13 20:23:52 +010016#include <linux/kthread.h>
Jens Axboe45711f12007-10-22 21:19:53 +020017#include <linux/scatterlist.h>
Santosh Shilimkar8e0cb8a2013-07-29 14:20:15 +010018#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20#include <linux/mmc/card.h>
21#include <linux/mmc/host.h>
Pierre Ossman98ac2162006-12-23 20:03:02 +010022#include "queue.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Pierre Ossman98ccf142007-05-12 00:26:16 +020024#define MMC_QUEUE_BOUNCESZ 65536
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026/*
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020027 * Prepare a MMC request. This just filters out odd stuff.
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 */
29static int mmc_prep_request(struct request_queue *q, struct request *req)
30{
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +053031 struct mmc_queue *mq = q->queuedata;
32
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020033 /*
Adrian Hunterbd788c92010-08-11 14:17:47 -070034 * We only like normal block requests and discards.
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020035 */
Adrian Hunterbd788c92010-08-11 14:17:47 -070036 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 blk_dump_rq_flags(req, "MMC bad request");
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020038 return BLKPREP_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 }
40
Sujit Reddy Thummaa8ad82cc2011-12-08 14:05:50 +053041 if (mq && mmc_card_removed(mq->card))
42 return BLKPREP_KILL;
43
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020044 req->cmd_flags |= REQ_DONTPREP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020046 return BLKPREP_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047}
48
49static int mmc_queue_thread(void *d)
50{
51 struct mmc_queue *mq = d;
52 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Rafael J. Wysocki83144182007-07-17 04:03:35 -070054 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 do {
58 struct request *req = NULL;
Per Forlinee8a43a2011-07-01 18:55:33 +020059 struct mmc_queue_req *tmp;
Seungwon Jeon369d3212012-12-26 10:40:17 +090060 unsigned int cmd_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
62 spin_lock_irq(q->queue_lock);
63 set_current_state(TASK_INTERRUPTIBLE);
Jens Axboe7eaceac2011-03-10 08:52:07 +010064 req = blk_fetch_request(q);
Per Forlin97868a22011-07-09 17:12:36 -040065 mq->mqrq_cur->req = req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 spin_unlock_irq(q->queue_lock);
67
Per Forlinee8a43a2011-07-01 18:55:33 +020068 if (req || mq->mqrq_prev->req) {
69 set_current_state(TASK_RUNNING);
Seungwon Jeon369d3212012-12-26 10:40:17 +090070 cmd_flags = req ? req->cmd_flags : 0;
Per Forlinee8a43a2011-07-01 18:55:33 +020071 mq->issue_fn(mq, req);
Konstantin Dorfman2220eed2013-01-14 14:28:17 -050072 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
73 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
74 continue; /* fetch again */
75 }
Seungwon Jeon45c5a912012-09-28 19:12:53 +090076
77 /*
78 * Current request becomes previous request
79 * and vice versa.
Seungwon Jeon369d3212012-12-26 10:40:17 +090080 * In case of special requests, current request
81 * has been finished. Do not assign it to previous
82 * request.
Seungwon Jeon45c5a912012-09-28 19:12:53 +090083 */
Seungwon Jeon369d3212012-12-26 10:40:17 +090084 if (cmd_flags & MMC_REQ_SPECIAL_MASK)
85 mq->mqrq_cur->req = NULL;
86
Seungwon Jeon45c5a912012-09-28 19:12:53 +090087 mq->mqrq_prev->brq.mrq.data = NULL;
88 mq->mqrq_prev->req = NULL;
89 tmp = mq->mqrq_prev;
90 mq->mqrq_prev = mq->mqrq_cur;
91 mq->mqrq_cur = tmp;
Per Forlinee8a43a2011-07-01 18:55:33 +020092 } else {
Vitaly Wool7b30d282006-12-07 20:08:02 +010093 if (kthread_should_stop()) {
94 set_current_state(TASK_RUNNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 break;
Vitaly Wool7b30d282006-12-07 20:08:02 +010096 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 up(&mq->thread_sem);
98 schedule();
99 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 } while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 up(&mq->thread_sem);
103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 return 0;
105}
106
107/*
108 * Generic MMC request handler. This is called for any queue on a
109 * particular host. When the host is not busy, we look for a request
110 * on any queue on this host, and attempt to issue it. This may
111 * not be the queue we were asked to process.
112 */
Venkatraman S1b50f5f2012-04-13 17:54:11 +0530113static void mmc_request_fn(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114{
115 struct mmc_queue *mq = q->queuedata;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100116 struct request *req;
Konstantin Dorfman2220eed2013-01-14 14:28:17 -0500117 unsigned long flags;
118 struct mmc_context_info *cntx;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100119
120 if (!mq) {
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800121 while ((req = blk_fetch_request(q)) != NULL) {
122 req->cmd_flags |= REQ_QUIET;
Tejun Heo296b2f62009-05-08 11:54:15 +0900123 __blk_end_request_all(req, -EIO);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800124 }
Pierre Ossman89b4e132006-11-14 22:08:16 +0100125 return;
126 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Konstantin Dorfman2220eed2013-01-14 14:28:17 -0500128 cntx = &mq->card->host->context_info;
129 if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
130 /*
131 * New MMC request arrived when MMC thread may be
132 * blocked on the previous request to be complete
133 * with no current request fetched
134 */
135 spin_lock_irqsave(&cntx->lock, flags);
136 if (cntx->is_waiting_last_req) {
137 cntx->is_new_req = true;
138 wake_up_interruptible(&cntx->wait);
139 }
140 spin_unlock_irqrestore(&cntx->lock, flags);
141 } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
Christoph Hellwig87598a22006-11-13 20:23:52 +0100142 wake_up_process(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143}
144
Venkatraman S7513cd72011-08-23 21:16:02 +0530145static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
Per Forlin97868a22011-07-09 17:12:36 -0400146{
147 struct scatterlist *sg;
148
149 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
150 if (!sg)
151 *err = -ENOMEM;
152 else {
153 *err = 0;
154 sg_init_table(sg, sg_len);
155 }
156
157 return sg;
158}
159
Adrian Huntere056a1b2011-06-28 17:16:02 +0300160static void mmc_queue_setup_discard(struct request_queue *q,
161 struct mmc_card *card)
162{
163 unsigned max_discard;
164
165 max_discard = mmc_calc_max_discard(card);
166 if (!max_discard)
167 return;
168
169 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
170 q->limits.max_discard_sectors = max_discard;
Adrian Hunter7194efb2012-04-05 14:45:47 +0300171 if (card->erased_byte == 0 && !mmc_can_discard(card))
Adrian Huntere056a1b2011-06-28 17:16:02 +0300172 q->limits.discard_zeroes_data = 1;
173 q->limits.discard_granularity = card->pref_erase << 9;
174 /* granularity must not be greater than max. discard */
175 if (card->pref_erase > max_discard)
176 q->limits.discard_granularity = 0;
Maya Erez775a9362013-04-18 15:41:55 +0300177 if (mmc_can_secure_erase_trim(card))
Adrian Huntere056a1b2011-06-28 17:16:02 +0300178 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
179}
180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181/**
182 * mmc_init_queue - initialise a queue structure.
183 * @mq: mmc queue
184 * @card: mmc card to attach this queue
185 * @lock: queue lock
Adrian Hunterd09408a2011-06-23 13:40:28 +0300186 * @subname: partition subname
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 *
188 * Initialise a MMC card request queue.
189 */
Adrian Hunterd09408a2011-06-23 13:40:28 +0300190int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
191 spinlock_t *lock, const char *subname)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
193 struct mmc_host *host = card->host;
194 u64 limit = BLK_BOUNCE_HIGH;
195 int ret;
Per Forlin97868a22011-07-09 17:12:36 -0400196 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
Per Forlin04296b72011-07-01 18:55:31 +0200197 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Greg Kroah-Hartmanfcaf71f2006-09-12 17:00:10 +0200199 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
Russell Kinge83b3662014-02-11 17:11:04 +0000200 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202 mq->card = card;
Venkatraman S1b50f5f2012-04-13 17:54:11 +0530203 mq->queue = blk_init_queue(mmc_request_fn, lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 if (!mq->queue)
205 return -ENOMEM;
206
Per Forlin97868a22011-07-09 17:12:36 -0400207 mq->mqrq_cur = mqrq_cur;
Per Forlin04296b72011-07-01 18:55:31 +0200208 mq->mqrq_prev = mqrq_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 mq->queue->queuedata = mq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
Pierre Ossman98ccf142007-05-12 00:26:16 +0200211 blk_queue_prep_rq(mq->queue, mmc_prep_request);
Pierre Ossman8dddfe12008-10-14 20:04:46 +0200212 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
Adrian Huntere056a1b2011-06-28 17:16:02 +0300213 if (mmc_can_erase(card))
214 mmc_queue_setup_discard(mq->queue, card);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200215
216#ifdef CONFIG_MMC_BLOCK_BOUNCE
Martin K. Petersena36274e2010-09-10 01:33:59 -0400217 if (host->max_segs == 1) {
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200218 unsigned int bouncesz;
219
Pierre Ossman98ccf142007-05-12 00:26:16 +0200220 bouncesz = MMC_QUEUE_BOUNCESZ;
221
222 if (bouncesz > host->max_req_size)
223 bouncesz = host->max_req_size;
224 if (bouncesz > host->max_seg_size)
225 bouncesz = host->max_seg_size;
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200226 if (bouncesz > (host->max_blk_count * 512))
227 bouncesz = host->max_blk_count * 512;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200228
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200229 if (bouncesz > 512) {
Per Forlin97868a22011-07-09 17:12:36 -0400230 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
231 if (!mqrq_cur->bounce_buf) {
Girish K Sa3c76eb2011-10-11 11:44:09 +0530232 pr_warning("%s: unable to "
Per Forlin97868a22011-07-09 17:12:36 -0400233 "allocate bounce cur buffer\n",
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200234 mmc_card_name(card));
235 }
Per Forlin04296b72011-07-01 18:55:31 +0200236 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
237 if (!mqrq_prev->bounce_buf) {
Girish K Sa3c76eb2011-10-11 11:44:09 +0530238 pr_warning("%s: unable to "
Per Forlin04296b72011-07-01 18:55:31 +0200239 "allocate bounce prev buffer\n",
240 mmc_card_name(card));
241 kfree(mqrq_cur->bounce_buf);
242 mqrq_cur->bounce_buf = NULL;
243 }
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200244 }
245
Per Forlin04296b72011-07-01 18:55:31 +0200246 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200247 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500248 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500249 blk_queue_max_segments(mq->queue, bouncesz / 512);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200250 blk_queue_max_segment_size(mq->queue, bouncesz);
251
Per Forlin97868a22011-07-09 17:12:36 -0400252 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
253 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200254 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200255
Per Forlin97868a22011-07-09 17:12:36 -0400256 mqrq_cur->bounce_sg =
257 mmc_alloc_sg(bouncesz / 512, &ret);
258 if (ret)
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200259 goto cleanup_queue;
Per Forlin97868a22011-07-09 17:12:36 -0400260
Per Forlin04296b72011-07-01 18:55:31 +0200261 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
262 if (ret)
263 goto cleanup_queue;
264
265 mqrq_prev->bounce_sg =
266 mmc_alloc_sg(bouncesz / 512, &ret);
267 if (ret)
268 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200269 }
270 }
271#endif
272
Per Forlin04296b72011-07-01 18:55:31 +0200273 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
Pierre Ossman98ccf142007-05-12 00:26:16 +0200274 blk_queue_bounce_limit(mq->queue, limit);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500275 blk_queue_max_hw_sectors(mq->queue,
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200276 min(host->max_blk_count, host->max_req_size / 512));
Martin K. Petersena36274e2010-09-10 01:33:59 -0400277 blk_queue_max_segments(mq->queue, host->max_segs);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200278 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
279
Per Forlin97868a22011-07-09 17:12:36 -0400280 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
281 if (ret)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200282 goto cleanup_queue;
Per Forlin97868a22011-07-09 17:12:36 -0400283
Per Forlin04296b72011-07-01 18:55:31 +0200284
285 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
286 if (ret)
287 goto cleanup_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 }
289
Thomas Gleixner632cf922010-09-14 07:12:35 -0400290 sema_init(&mq->thread_sem, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Adrian Hunterd09408a2011-06-23 13:40:28 +0300292 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
293 host->index, subname ? subname : "");
Ethan Dude528fa2010-09-30 18:40:27 -0400294
Christoph Hellwig87598a22006-11-13 20:23:52 +0100295 if (IS_ERR(mq->thread)) {
296 ret = PTR_ERR(mq->thread);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200297 goto free_bounce_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 }
299
Christoph Hellwig87598a22006-11-13 20:23:52 +0100300 return 0;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200301 free_bounce_sg:
Per Forlin97868a22011-07-09 17:12:36 -0400302 kfree(mqrq_cur->bounce_sg);
303 mqrq_cur->bounce_sg = NULL;
Per Forlin04296b72011-07-01 18:55:31 +0200304 kfree(mqrq_prev->bounce_sg);
305 mqrq_prev->bounce_sg = NULL;
Per Forlin97868a22011-07-09 17:12:36 -0400306
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200307 cleanup_queue:
Per Forlin97868a22011-07-09 17:12:36 -0400308 kfree(mqrq_cur->sg);
309 mqrq_cur->sg = NULL;
310 kfree(mqrq_cur->bounce_buf);
311 mqrq_cur->bounce_buf = NULL;
312
Per Forlin04296b72011-07-01 18:55:31 +0200313 kfree(mqrq_prev->sg);
314 mqrq_prev->sg = NULL;
315 kfree(mqrq_prev->bounce_buf);
316 mqrq_prev->bounce_buf = NULL;
317
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 blk_cleanup_queue(mq->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 return ret;
320}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
322void mmc_cleanup_queue(struct mmc_queue *mq)
323{
Jens Axboe165125e2007-07-24 09:28:11 +0200324 struct request_queue *q = mq->queue;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100325 unsigned long flags;
Per Forlin97868a22011-07-09 17:12:36 -0400326 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
Per Forlin04296b72011-07-01 18:55:31 +0200327 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100328
Pierre Ossmand2b46f62007-04-28 16:52:12 +0200329 /* Make sure the queue isn't suspended, as that will deadlock */
330 mmc_queue_resume(mq);
331
Pierre Ossman89b4e132006-11-14 22:08:16 +0100332 /* Then terminate our worker thread */
Christoph Hellwig87598a22006-11-13 20:23:52 +0100333 kthread_stop(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800335 /* Empty the queue */
336 spin_lock_irqsave(q->queue_lock, flags);
337 q->queuedata = NULL;
338 blk_start_queue(q);
339 spin_unlock_irqrestore(q->queue_lock, flags);
340
Per Forlin97868a22011-07-09 17:12:36 -0400341 kfree(mqrq_cur->bounce_sg);
342 mqrq_cur->bounce_sg = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200343
Per Forlin97868a22011-07-09 17:12:36 -0400344 kfree(mqrq_cur->sg);
345 mqrq_cur->sg = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
Per Forlin97868a22011-07-09 17:12:36 -0400347 kfree(mqrq_cur->bounce_buf);
348 mqrq_cur->bounce_buf = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200349
Per Forlin04296b72011-07-01 18:55:31 +0200350 kfree(mqrq_prev->bounce_sg);
351 mqrq_prev->bounce_sg = NULL;
352
353 kfree(mqrq_prev->sg);
354 mqrq_prev->sg = NULL;
355
356 kfree(mqrq_prev->bounce_buf);
357 mqrq_prev->bounce_buf = NULL;
358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 mq->card = NULL;
360}
361EXPORT_SYMBOL(mmc_cleanup_queue);
362
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900363int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
364{
365 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
366 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
367 int ret = 0;
368
369
370 mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
371 if (!mqrq_cur->packed) {
372 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
373 mmc_card_name(card));
374 ret = -ENOMEM;
375 goto out;
376 }
377
378 mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
379 if (!mqrq_prev->packed) {
380 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
381 mmc_card_name(card));
382 kfree(mqrq_cur->packed);
383 mqrq_cur->packed = NULL;
384 ret = -ENOMEM;
385 goto out;
386 }
387
388 INIT_LIST_HEAD(&mqrq_cur->packed->list);
389 INIT_LIST_HEAD(&mqrq_prev->packed->list);
390
391out:
392 return ret;
393}
394
395void mmc_packed_clean(struct mmc_queue *mq)
396{
397 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
398 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
399
400 kfree(mqrq_cur->packed);
401 mqrq_cur->packed = NULL;
402 kfree(mqrq_prev->packed);
403 mqrq_prev->packed = NULL;
404}
405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406/**
407 * mmc_queue_suspend - suspend a MMC request queue
408 * @mq: MMC queue to suspend
409 *
410 * Stop the block request queue, and wait for our thread to
411 * complete any outstanding requests. This ensures that we
412 * won't suspend while a request is being processed.
413 */
414void mmc_queue_suspend(struct mmc_queue *mq)
415{
Jens Axboe165125e2007-07-24 09:28:11 +0200416 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 unsigned long flags;
418
419 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
420 mq->flags |= MMC_QUEUE_SUSPENDED;
421
422 spin_lock_irqsave(q->queue_lock, flags);
423 blk_stop_queue(q);
424 spin_unlock_irqrestore(q->queue_lock, flags);
425
426 down(&mq->thread_sem);
427 }
428}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
430/**
431 * mmc_queue_resume - resume a previously suspended MMC request queue
432 * @mq: MMC queue to resume
433 */
434void mmc_queue_resume(struct mmc_queue *mq)
435{
Jens Axboe165125e2007-07-24 09:28:11 +0200436 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 unsigned long flags;
438
439 if (mq->flags & MMC_QUEUE_SUSPENDED) {
440 mq->flags &= ~MMC_QUEUE_SUSPENDED;
441
442 up(&mq->thread_sem);
443
444 spin_lock_irqsave(q->queue_lock, flags);
445 blk_start_queue(q);
446 spin_unlock_irqrestore(q->queue_lock, flags);
447 }
448}
Pierre Ossman98ac2162006-12-23 20:03:02 +0100449
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900450static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
451 struct mmc_packed *packed,
452 struct scatterlist *sg,
453 enum mmc_packed_type cmd_type)
454{
455 struct scatterlist *__sg = sg;
456 unsigned int sg_len = 0;
457 struct request *req;
458
459 if (mmc_packed_wr(cmd_type)) {
460 unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
461 unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
462 unsigned int len, remain, offset = 0;
463 u8 *buf = (u8 *)packed->cmd_hdr;
464
465 remain = hdr_sz;
466 do {
467 len = min(remain, max_seg_sz);
468 sg_set_buf(__sg, buf + offset, len);
469 offset += len;
470 remain -= len;
471 (__sg++)->page_link &= ~0x02;
472 sg_len++;
473 } while (remain);
474 }
475
476 list_for_each_entry(req, &packed->list, queuelist) {
477 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
478 __sg = sg + (sg_len - 1);
479 (__sg++)->page_link &= ~0x02;
480 }
481 sg_mark_end(sg + (sg_len - 1));
482 return sg_len;
483}
484
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200485/*
486 * Prepare the sg list(s) to be handed of to the host driver
487 */
Per Forlin97868a22011-07-09 17:12:36 -0400488unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200489{
490 unsigned int sg_len;
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200491 size_t buflen;
492 struct scatterlist *sg;
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900493 enum mmc_packed_type cmd_type;
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200494 int i;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200495
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900496 cmd_type = mqrq->cmd_type;
497
498 if (!mqrq->bounce_buf) {
499 if (mmc_packed_cmd(cmd_type))
500 return mmc_queue_packed_map_sg(mq, mqrq->packed,
501 mqrq->sg, cmd_type);
502 else
503 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
504 }
Pierre Ossman98ccf142007-05-12 00:26:16 +0200505
Per Forlin97868a22011-07-09 17:12:36 -0400506 BUG_ON(!mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200507
Seungwon Jeonce39f9d2013-02-06 17:02:46 +0900508 if (mmc_packed_cmd(cmd_type))
509 sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
510 mqrq->bounce_sg, cmd_type);
511 else
512 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200513
Per Forlin97868a22011-07-09 17:12:36 -0400514 mqrq->bounce_sg_len = sg_len;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200515
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200516 buflen = 0;
Per Forlin97868a22011-07-09 17:12:36 -0400517 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200518 buflen += sg->length;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200519
Per Forlin97868a22011-07-09 17:12:36 -0400520 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200521
522 return 1;
523}
524
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200525/*
526 * If writing, bounce the data to the buffer before the request
527 * is sent to the host driver
528 */
Per Forlin97868a22011-07-09 17:12:36 -0400529void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200530{
Per Forlin97868a22011-07-09 17:12:36 -0400531 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200532 return;
533
Per Forlin97868a22011-07-09 17:12:36 -0400534 if (rq_data_dir(mqrq->req) != WRITE)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200535 return;
536
Per Forlin97868a22011-07-09 17:12:36 -0400537 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
538 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200539}
540
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200541/*
542 * If reading, bounce the data from the buffer after the request
543 * has been handled by the host driver
544 */
Per Forlin97868a22011-07-09 17:12:36 -0400545void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200546{
Per Forlin97868a22011-07-09 17:12:36 -0400547 if (!mqrq->bounce_buf)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200548 return;
549
Per Forlin97868a22011-07-09 17:12:36 -0400550 if (rq_data_dir(mqrq->req) != READ)
Pierre Ossman98ccf142007-05-12 00:26:16 +0200551 return;
552
Per Forlin97868a22011-07-09 17:12:36 -0400553 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
554 mqrq->bounce_buf, mqrq->sg[0].length);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200555}