blob: 7c3392e5072256813bf55dd80bbc375ad923f511 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Pierre Ossman70f10482007-07-11 20:04:50 +02002 * linux/drivers/mmc/card/queue.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
Pierre Ossman98ac2162006-12-23 20:03:02 +01005 * Copyright 2006-2007 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
14#include <linux/blkdev.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070015#include <linux/freezer.h>
Christoph Hellwig87598a22006-11-13 20:23:52 +010016#include <linux/kthread.h>
Jens Axboe45711f12007-10-22 21:19:53 +020017#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
Pierre Ossman98ac2162006-12-23 20:03:02 +010021#include "queue.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Pierre Ossman98ccf142007-05-12 00:26:16 +020023#define MMC_QUEUE_BOUNCESZ 65536
24
Christoph Hellwig87598a22006-11-13 20:23:52 +010025#define MMC_QUEUE_SUSPENDED (1 << 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
27/*
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020028 * Prepare a MMC request. This just filters out odd stuff.
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 */
30static int mmc_prep_request(struct request_queue *q, struct request *req)
31{
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020032 /*
Adrian Hunterbd788c92010-08-11 14:17:47 -070033 * We only like normal block requests and discards.
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020034 */
Adrian Hunterbd788c92010-08-11 14:17:47 -070035 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 blk_dump_rq_flags(req, "MMC bad request");
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020037 return BLKPREP_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 }
39
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020040 req->cmd_flags |= REQ_DONTPREP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020042 return BLKPREP_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070043}
44
45static int mmc_queue_thread(void *d)
46{
47 struct mmc_queue *mq = d;
48 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Rafael J. Wysocki83144182007-07-17 04:03:35 -070050 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 do {
54 struct request *req = NULL;
55
56 spin_lock_irq(q->queue_lock);
57 set_current_state(TASK_INTERRUPTIBLE);
Tejun Heo9934c8c2009-05-08 11:54:16 +090058 if (!blk_queue_plugged(q))
59 req = blk_fetch_request(q);
Juha [êöläc723e08a2006-08-06 09:58:22 +010060 mq->req = req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 spin_unlock_irq(q->queue_lock);
62
63 if (!req) {
Vitaly Wool7b30d282006-12-07 20:08:02 +010064 if (kthread_should_stop()) {
65 set_current_state(TASK_RUNNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 break;
Vitaly Wool7b30d282006-12-07 20:08:02 +010067 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 up(&mq->thread_sem);
69 schedule();
70 down(&mq->thread_sem);
71 continue;
72 }
73 set_current_state(TASK_RUNNING);
74
75 mq->issue_fn(mq, req);
76 } while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 up(&mq->thread_sem);
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 return 0;
80}
81
82/*
83 * Generic MMC request handler. This is called for any queue on a
84 * particular host. When the host is not busy, we look for a request
85 * on any queue on this host, and attempt to issue it. This may
86 * not be the queue we were asked to process.
87 */
Jens Axboe165125e2007-07-24 09:28:11 +020088static void mmc_request(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089{
90 struct mmc_queue *mq = q->queuedata;
Pierre Ossman89b4e132006-11-14 22:08:16 +010091 struct request *req;
Pierre Ossman89b4e132006-11-14 22:08:16 +010092
93 if (!mq) {
Adrian Hunter5fa83ce2010-01-08 14:43:00 -080094 while ((req = blk_fetch_request(q)) != NULL) {
95 req->cmd_flags |= REQ_QUIET;
Tejun Heo296b2f62009-05-08 11:54:15 +090096 __blk_end_request_all(req, -EIO);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -080097 }
Pierre Ossman89b4e132006-11-14 22:08:16 +010098 return;
99 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101 if (!mq->req)
Christoph Hellwig87598a22006-11-13 20:23:52 +0100102 wake_up_process(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103}
104
105/**
106 * mmc_init_queue - initialise a queue structure.
107 * @mq: mmc queue
108 * @card: mmc card to attach this queue
109 * @lock: queue lock
110 *
111 * Initialise a MMC card request queue.
112 */
113int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
114{
115 struct mmc_host *host = card->host;
116 u64 limit = BLK_BOUNCE_HIGH;
117 int ret;
118
Greg Kroah-Hartmanfcaf71f2006-09-12 17:00:10 +0200119 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
120 limit = *mmc_dev(host)->dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122 mq->card = card;
123 mq->queue = blk_init_queue(mmc_request, lock);
124 if (!mq->queue)
125 return -ENOMEM;
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 mq->queue->queuedata = mq;
128 mq->req = NULL;
129
Pierre Ossman98ccf142007-05-12 00:26:16 +0200130 blk_queue_prep_rq(mq->queue, mmc_prep_request);
Pierre Ossman8dddfe12008-10-14 20:04:46 +0200131 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
Adrian Hunterbd788c92010-08-11 14:17:47 -0700132 if (mmc_can_erase(card)) {
133 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
134 mq->queue->limits.max_discard_sectors = UINT_MAX;
135 if (card->erased_byte == 0)
136 mq->queue->limits.discard_zeroes_data = 1;
137 if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) {
138 mq->queue->limits.discard_granularity =
139 card->erase_size << 9;
140 mq->queue->limits.discard_alignment =
141 card->erase_size << 9;
142 }
Adrian Hunter49804542010-08-11 14:17:50 -0700143 if (mmc_can_secure_erase_trim(card))
144 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
145 mq->queue);
Adrian Hunterbd788c92010-08-11 14:17:47 -0700146 }
Pierre Ossman98ccf142007-05-12 00:26:16 +0200147
148#ifdef CONFIG_MMC_BLOCK_BOUNCE
Martin K. Petersena36274e2010-09-10 01:33:59 -0400149 if (host->max_segs == 1) {
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200150 unsigned int bouncesz;
151
Pierre Ossman98ccf142007-05-12 00:26:16 +0200152 bouncesz = MMC_QUEUE_BOUNCESZ;
153
154 if (bouncesz > host->max_req_size)
155 bouncesz = host->max_req_size;
156 if (bouncesz > host->max_seg_size)
157 bouncesz = host->max_seg_size;
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200158 if (bouncesz > (host->max_blk_count * 512))
159 bouncesz = host->max_blk_count * 512;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200160
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200161 if (bouncesz > 512) {
162 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
163 if (!mq->bounce_buf) {
164 printk(KERN_WARNING "%s: unable to "
165 "allocate bounce buffer\n",
166 mmc_card_name(card));
167 }
168 }
169
170 if (mq->bounce_buf) {
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200171 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500172 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500173 blk_queue_max_segments(mq->queue, bouncesz / 512);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200174 blk_queue_max_segment_size(mq->queue, bouncesz);
175
Jens Axboe45711f12007-10-22 21:19:53 +0200176 mq->sg = kmalloc(sizeof(struct scatterlist),
Pierre Ossman98ccf142007-05-12 00:26:16 +0200177 GFP_KERNEL);
178 if (!mq->sg) {
179 ret = -ENOMEM;
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200180 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200181 }
Jens Axboe45711f12007-10-22 21:19:53 +0200182 sg_init_table(mq->sg, 1);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200183
Jens Axboe45711f12007-10-22 21:19:53 +0200184 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
Pierre Ossman98ccf142007-05-12 00:26:16 +0200185 bouncesz / 512, GFP_KERNEL);
186 if (!mq->bounce_sg) {
187 ret = -ENOMEM;
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200188 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200189 }
Jens Axboe45711f12007-10-22 21:19:53 +0200190 sg_init_table(mq->bounce_sg, bouncesz / 512);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200191 }
192 }
193#endif
194
195 if (!mq->bounce_buf) {
196 blk_queue_bounce_limit(mq->queue, limit);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500197 blk_queue_max_hw_sectors(mq->queue,
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200198 min(host->max_blk_count, host->max_req_size / 512));
Martin K. Petersena36274e2010-09-10 01:33:59 -0400199 blk_queue_max_segments(mq->queue, host->max_segs);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200200 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
201
Haavard Skinnemoen05e5b132007-11-23 10:19:00 +0100202 mq->sg = kmalloc(sizeof(struct scatterlist) *
Martin K. Petersena36274e2010-09-10 01:33:59 -0400203 host->max_segs, GFP_KERNEL);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200204 if (!mq->sg) {
205 ret = -ENOMEM;
206 goto cleanup_queue;
207 }
Martin K. Petersena36274e2010-09-10 01:33:59 -0400208 sg_init_table(mq->sg, host->max_segs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 }
210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 init_MUTEX(&mq->thread_sem);
212
Christoph Hellwig87598a22006-11-13 20:23:52 +0100213 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
214 if (IS_ERR(mq->thread)) {
215 ret = PTR_ERR(mq->thread);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200216 goto free_bounce_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 }
218
Christoph Hellwig87598a22006-11-13 20:23:52 +0100219 return 0;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200220 free_bounce_sg:
221 if (mq->bounce_sg)
222 kfree(mq->bounce_sg);
223 mq->bounce_sg = NULL;
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200224 cleanup_queue:
225 if (mq->sg)
226 kfree(mq->sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 mq->sg = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200228 if (mq->bounce_buf)
229 kfree(mq->bounce_buf);
230 mq->bounce_buf = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 blk_cleanup_queue(mq->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 return ret;
233}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
235void mmc_cleanup_queue(struct mmc_queue *mq)
236{
Jens Axboe165125e2007-07-24 09:28:11 +0200237 struct request_queue *q = mq->queue;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100238 unsigned long flags;
239
Pierre Ossmand2b46f62007-04-28 16:52:12 +0200240 /* Make sure the queue isn't suspended, as that will deadlock */
241 mmc_queue_resume(mq);
242
Pierre Ossman89b4e132006-11-14 22:08:16 +0100243 /* Then terminate our worker thread */
Christoph Hellwig87598a22006-11-13 20:23:52 +0100244 kthread_stop(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800246 /* Empty the queue */
247 spin_lock_irqsave(q->queue_lock, flags);
248 q->queuedata = NULL;
249 blk_start_queue(q);
250 spin_unlock_irqrestore(q->queue_lock, flags);
251
Pierre Ossman98ccf142007-05-12 00:26:16 +0200252 if (mq->bounce_sg)
253 kfree(mq->bounce_sg);
254 mq->bounce_sg = NULL;
255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 kfree(mq->sg);
257 mq->sg = NULL;
258
Pierre Ossman98ccf142007-05-12 00:26:16 +0200259 if (mq->bounce_buf)
260 kfree(mq->bounce_buf);
261 mq->bounce_buf = NULL;
262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 mq->card = NULL;
264}
265EXPORT_SYMBOL(mmc_cleanup_queue);
266
267/**
268 * mmc_queue_suspend - suspend a MMC request queue
269 * @mq: MMC queue to suspend
270 *
271 * Stop the block request queue, and wait for our thread to
272 * complete any outstanding requests. This ensures that we
273 * won't suspend while a request is being processed.
274 */
275void mmc_queue_suspend(struct mmc_queue *mq)
276{
Jens Axboe165125e2007-07-24 09:28:11 +0200277 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 unsigned long flags;
279
280 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
281 mq->flags |= MMC_QUEUE_SUSPENDED;
282
283 spin_lock_irqsave(q->queue_lock, flags);
284 blk_stop_queue(q);
285 spin_unlock_irqrestore(q->queue_lock, flags);
286
287 down(&mq->thread_sem);
288 }
289}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
291/**
292 * mmc_queue_resume - resume a previously suspended MMC request queue
293 * @mq: MMC queue to resume
294 */
295void mmc_queue_resume(struct mmc_queue *mq)
296{
Jens Axboe165125e2007-07-24 09:28:11 +0200297 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 unsigned long flags;
299
300 if (mq->flags & MMC_QUEUE_SUSPENDED) {
301 mq->flags &= ~MMC_QUEUE_SUSPENDED;
302
303 up(&mq->thread_sem);
304
305 spin_lock_irqsave(q->queue_lock, flags);
306 blk_start_queue(q);
307 spin_unlock_irqrestore(q->queue_lock, flags);
308 }
309}
Pierre Ossman98ac2162006-12-23 20:03:02 +0100310
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200311/*
312 * Prepare the sg list(s) to be handed of to the host driver
313 */
Pierre Ossman98ccf142007-05-12 00:26:16 +0200314unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
315{
316 unsigned int sg_len;
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200317 size_t buflen;
318 struct scatterlist *sg;
319 int i;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200320
321 if (!mq->bounce_buf)
322 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
323
324 BUG_ON(!mq->bounce_sg);
325
326 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
327
328 mq->bounce_sg_len = sg_len;
329
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200330 buflen = 0;
331 for_each_sg(mq->bounce_sg, sg, sg_len, i)
332 buflen += sg->length;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200333
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200334 sg_init_one(mq->sg, mq->bounce_buf, buflen);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200335
336 return 1;
337}
338
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200339/*
340 * If writing, bounce the data to the buffer before the request
341 * is sent to the host driver
342 */
Pierre Ossman98ccf142007-05-12 00:26:16 +0200343void mmc_queue_bounce_pre(struct mmc_queue *mq)
344{
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200345 unsigned long flags;
346
Pierre Ossman98ccf142007-05-12 00:26:16 +0200347 if (!mq->bounce_buf)
348 return;
349
Pierre Ossman98ccf142007-05-12 00:26:16 +0200350 if (rq_data_dir(mq->req) != WRITE)
351 return;
352
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200353 local_irq_save(flags);
354 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
355 mq->bounce_buf, mq->sg[0].length);
356 local_irq_restore(flags);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200357}
358
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200359/*
360 * If reading, bounce the data from the buffer after the request
361 * has been handled by the host driver
362 */
Pierre Ossman98ccf142007-05-12 00:26:16 +0200363void mmc_queue_bounce_post(struct mmc_queue *mq)
364{
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200365 unsigned long flags;
366
Pierre Ossman98ccf142007-05-12 00:26:16 +0200367 if (!mq->bounce_buf)
368 return;
369
Pierre Ossman98ccf142007-05-12 00:26:16 +0200370 if (rq_data_dir(mq->req) != READ)
371 return;
372
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200373 local_irq_save(flags);
374 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
375 mq->bounce_buf, mq->sg[0].length);
376 local_irq_restore(flags);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200377}
378