blob: e876678176be1cbb4df04c8268d42e65105cc18a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Pierre Ossman70f10482007-07-11 20:04:50 +02002 * linux/drivers/mmc/card/queue.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
Pierre Ossman98ac2162006-12-23 20:03:02 +01005 * Copyright 2006-2007 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
14#include <linux/blkdev.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070015#include <linux/freezer.h>
Christoph Hellwig87598a22006-11-13 20:23:52 +010016#include <linux/kthread.h>
Jens Axboe45711f12007-10-22 21:19:53 +020017#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
Pierre Ossman98ac2162006-12-23 20:03:02 +010021#include "queue.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Pierre Ossman98ccf142007-05-12 00:26:16 +020023#define MMC_QUEUE_BOUNCESZ 65536
24
Christoph Hellwig87598a22006-11-13 20:23:52 +010025#define MMC_QUEUE_SUSPENDED (1 << 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
27/*
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020028 * Prepare a MMC request. This just filters out odd stuff.
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 */
30static int mmc_prep_request(struct request_queue *q, struct request *req)
31{
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020032 /*
Adrian Hunterbd788c92010-08-11 14:17:47 -070033 * We only like normal block requests and discards.
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020034 */
Adrian Hunterbd788c92010-08-11 14:17:47 -070035 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 blk_dump_rq_flags(req, "MMC bad request");
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020037 return BLKPREP_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 }
39
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020040 req->cmd_flags |= REQ_DONTPREP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020042 return BLKPREP_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070043}
44
45static int mmc_queue_thread(void *d)
46{
47 struct mmc_queue *mq = d;
48 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Rafael J. Wysocki83144182007-07-17 04:03:35 -070050 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 do {
54 struct request *req = NULL;
55
56 spin_lock_irq(q->queue_lock);
57 set_current_state(TASK_INTERRUPTIBLE);
Tejun Heo9934c8c2009-05-08 11:54:16 +090058 if (!blk_queue_plugged(q))
59 req = blk_fetch_request(q);
Juha [êöläc723e08a2006-08-06 09:58:22 +010060 mq->req = req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 spin_unlock_irq(q->queue_lock);
62
63 if (!req) {
Vitaly Wool7b30d282006-12-07 20:08:02 +010064 if (kthread_should_stop()) {
65 set_current_state(TASK_RUNNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 break;
Vitaly Wool7b30d282006-12-07 20:08:02 +010067 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 up(&mq->thread_sem);
69 schedule();
70 down(&mq->thread_sem);
71 continue;
72 }
73 set_current_state(TASK_RUNNING);
74
75 mq->issue_fn(mq, req);
76 } while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 up(&mq->thread_sem);
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 return 0;
80}
81
82/*
83 * Generic MMC request handler. This is called for any queue on a
84 * particular host. When the host is not busy, we look for a request
85 * on any queue on this host, and attempt to issue it. This may
86 * not be the queue we were asked to process.
87 */
Jens Axboe165125e2007-07-24 09:28:11 +020088static void mmc_request(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089{
90 struct mmc_queue *mq = q->queuedata;
Pierre Ossman89b4e132006-11-14 22:08:16 +010091 struct request *req;
Pierre Ossman89b4e132006-11-14 22:08:16 +010092
93 if (!mq) {
Adrian Hunter5fa83ce2010-01-08 14:43:00 -080094 while ((req = blk_fetch_request(q)) != NULL) {
95 req->cmd_flags |= REQ_QUIET;
Tejun Heo296b2f62009-05-08 11:54:15 +090096 __blk_end_request_all(req, -EIO);
Adrian Hunter5fa83ce2010-01-08 14:43:00 -080097 }
Pierre Ossman89b4e132006-11-14 22:08:16 +010098 return;
99 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101 if (!mq->req)
Christoph Hellwig87598a22006-11-13 20:23:52 +0100102 wake_up_process(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103}
104
105/**
106 * mmc_init_queue - initialise a queue structure.
107 * @mq: mmc queue
108 * @card: mmc card to attach this queue
109 * @lock: queue lock
110 *
111 * Initialise a MMC card request queue.
112 */
113int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
114{
115 struct mmc_host *host = card->host;
116 u64 limit = BLK_BOUNCE_HIGH;
117 int ret;
118
Greg Kroah-Hartmanfcaf71f2006-09-12 17:00:10 +0200119 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
120 limit = *mmc_dev(host)->dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122 mq->card = card;
123 mq->queue = blk_init_queue(mmc_request, lock);
124 if (!mq->queue)
125 return -ENOMEM;
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 mq->queue->queuedata = mq;
128 mq->req = NULL;
129
Pierre Ossman98ccf142007-05-12 00:26:16 +0200130 blk_queue_prep_rq(mq->queue, mmc_prep_request);
FUJITA Tomonori00fff262010-07-03 17:45:40 +0900131 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN);
Pierre Ossman8dddfe12008-10-14 20:04:46 +0200132 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
Adrian Hunterbd788c92010-08-11 14:17:47 -0700133 if (mmc_can_erase(card)) {
134 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
135 mq->queue->limits.max_discard_sectors = UINT_MAX;
136 if (card->erased_byte == 0)
137 mq->queue->limits.discard_zeroes_data = 1;
138 if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) {
139 mq->queue->limits.discard_granularity =
140 card->erase_size << 9;
141 mq->queue->limits.discard_alignment =
142 card->erase_size << 9;
143 }
Adrian Hunter49804542010-08-11 14:17:50 -0700144 if (mmc_can_secure_erase_trim(card))
145 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
146 mq->queue);
Adrian Hunterbd788c92010-08-11 14:17:47 -0700147 }
Pierre Ossman98ccf142007-05-12 00:26:16 +0200148
149#ifdef CONFIG_MMC_BLOCK_BOUNCE
150 if (host->max_hw_segs == 1) {
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200151 unsigned int bouncesz;
152
Pierre Ossman98ccf142007-05-12 00:26:16 +0200153 bouncesz = MMC_QUEUE_BOUNCESZ;
154
155 if (bouncesz > host->max_req_size)
156 bouncesz = host->max_req_size;
157 if (bouncesz > host->max_seg_size)
158 bouncesz = host->max_seg_size;
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200159 if (bouncesz > (host->max_blk_count * 512))
160 bouncesz = host->max_blk_count * 512;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200161
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200162 if (bouncesz > 512) {
163 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
164 if (!mq->bounce_buf) {
165 printk(KERN_WARNING "%s: unable to "
166 "allocate bounce buffer\n",
167 mmc_card_name(card));
168 }
169 }
170
171 if (mq->bounce_buf) {
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200172 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500173 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500174 blk_queue_max_segments(mq->queue, bouncesz / 512);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200175 blk_queue_max_segment_size(mq->queue, bouncesz);
176
Jens Axboe45711f12007-10-22 21:19:53 +0200177 mq->sg = kmalloc(sizeof(struct scatterlist),
Pierre Ossman98ccf142007-05-12 00:26:16 +0200178 GFP_KERNEL);
179 if (!mq->sg) {
180 ret = -ENOMEM;
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200181 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200182 }
Jens Axboe45711f12007-10-22 21:19:53 +0200183 sg_init_table(mq->sg, 1);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200184
Jens Axboe45711f12007-10-22 21:19:53 +0200185 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
Pierre Ossman98ccf142007-05-12 00:26:16 +0200186 bouncesz / 512, GFP_KERNEL);
187 if (!mq->bounce_sg) {
188 ret = -ENOMEM;
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200189 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200190 }
Jens Axboe45711f12007-10-22 21:19:53 +0200191 sg_init_table(mq->bounce_sg, bouncesz / 512);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200192 }
193 }
194#endif
195
196 if (!mq->bounce_buf) {
197 blk_queue_bounce_limit(mq->queue, limit);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -0500198 blk_queue_max_hw_sectors(mq->queue,
Pierre Ossmanf3eb0aa2008-08-16 21:34:02 +0200199 min(host->max_blk_count, host->max_req_size / 512));
Martin K. Petersen8a783622010-02-26 00:20:39 -0500200 blk_queue_max_segments(mq->queue, host->max_hw_segs);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200201 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
202
Haavard Skinnemoen05e5b132007-11-23 10:19:00 +0100203 mq->sg = kmalloc(sizeof(struct scatterlist) *
Pierre Ossman98ccf142007-05-12 00:26:16 +0200204 host->max_phys_segs, GFP_KERNEL);
205 if (!mq->sg) {
206 ret = -ENOMEM;
207 goto cleanup_queue;
208 }
Haavard Skinnemoen05e5b132007-11-23 10:19:00 +0100209 sg_init_table(mq->sg, host->max_phys_segs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 }
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 init_MUTEX(&mq->thread_sem);
213
Christoph Hellwig87598a22006-11-13 20:23:52 +0100214 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
215 if (IS_ERR(mq->thread)) {
216 ret = PTR_ERR(mq->thread);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200217 goto free_bounce_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 }
219
Christoph Hellwig87598a22006-11-13 20:23:52 +0100220 return 0;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200221 free_bounce_sg:
222 if (mq->bounce_sg)
223 kfree(mq->bounce_sg);
224 mq->bounce_sg = NULL;
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200225 cleanup_queue:
226 if (mq->sg)
227 kfree(mq->sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 mq->sg = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200229 if (mq->bounce_buf)
230 kfree(mq->bounce_buf);
231 mq->bounce_buf = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 blk_cleanup_queue(mq->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 return ret;
234}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
236void mmc_cleanup_queue(struct mmc_queue *mq)
237{
Jens Axboe165125e2007-07-24 09:28:11 +0200238 struct request_queue *q = mq->queue;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100239 unsigned long flags;
240
Pierre Ossmand2b46f62007-04-28 16:52:12 +0200241 /* Make sure the queue isn't suspended, as that will deadlock */
242 mmc_queue_resume(mq);
243
Pierre Ossman89b4e132006-11-14 22:08:16 +0100244 /* Then terminate our worker thread */
Christoph Hellwig87598a22006-11-13 20:23:52 +0100245 kthread_stop(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Adrian Hunter5fa83ce2010-01-08 14:43:00 -0800247 /* Empty the queue */
248 spin_lock_irqsave(q->queue_lock, flags);
249 q->queuedata = NULL;
250 blk_start_queue(q);
251 spin_unlock_irqrestore(q->queue_lock, flags);
252
Pierre Ossman98ccf142007-05-12 00:26:16 +0200253 if (mq->bounce_sg)
254 kfree(mq->bounce_sg);
255 mq->bounce_sg = NULL;
256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 kfree(mq->sg);
258 mq->sg = NULL;
259
Pierre Ossman98ccf142007-05-12 00:26:16 +0200260 if (mq->bounce_buf)
261 kfree(mq->bounce_buf);
262 mq->bounce_buf = NULL;
263
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 mq->card = NULL;
265}
266EXPORT_SYMBOL(mmc_cleanup_queue);
267
268/**
269 * mmc_queue_suspend - suspend a MMC request queue
270 * @mq: MMC queue to suspend
271 *
272 * Stop the block request queue, and wait for our thread to
273 * complete any outstanding requests. This ensures that we
274 * won't suspend while a request is being processed.
275 */
276void mmc_queue_suspend(struct mmc_queue *mq)
277{
Jens Axboe165125e2007-07-24 09:28:11 +0200278 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 unsigned long flags;
280
281 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
282 mq->flags |= MMC_QUEUE_SUSPENDED;
283
284 spin_lock_irqsave(q->queue_lock, flags);
285 blk_stop_queue(q);
286 spin_unlock_irqrestore(q->queue_lock, flags);
287
288 down(&mq->thread_sem);
289 }
290}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
292/**
293 * mmc_queue_resume - resume a previously suspended MMC request queue
294 * @mq: MMC queue to resume
295 */
296void mmc_queue_resume(struct mmc_queue *mq)
297{
Jens Axboe165125e2007-07-24 09:28:11 +0200298 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 unsigned long flags;
300
301 if (mq->flags & MMC_QUEUE_SUSPENDED) {
302 mq->flags &= ~MMC_QUEUE_SUSPENDED;
303
304 up(&mq->thread_sem);
305
306 spin_lock_irqsave(q->queue_lock, flags);
307 blk_start_queue(q);
308 spin_unlock_irqrestore(q->queue_lock, flags);
309 }
310}
Pierre Ossman98ac2162006-12-23 20:03:02 +0100311
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200312/*
313 * Prepare the sg list(s) to be handed of to the host driver
314 */
Pierre Ossman98ccf142007-05-12 00:26:16 +0200315unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
316{
317 unsigned int sg_len;
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200318 size_t buflen;
319 struct scatterlist *sg;
320 int i;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200321
322 if (!mq->bounce_buf)
323 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
324
325 BUG_ON(!mq->bounce_sg);
326
327 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
328
329 mq->bounce_sg_len = sg_len;
330
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200331 buflen = 0;
332 for_each_sg(mq->bounce_sg, sg, sg_len, i)
333 buflen += sg->length;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200334
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200335 sg_init_one(mq->sg, mq->bounce_buf, buflen);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200336
337 return 1;
338}
339
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200340/*
341 * If writing, bounce the data to the buffer before the request
342 * is sent to the host driver
343 */
Pierre Ossman98ccf142007-05-12 00:26:16 +0200344void mmc_queue_bounce_pre(struct mmc_queue *mq)
345{
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200346 unsigned long flags;
347
Pierre Ossman98ccf142007-05-12 00:26:16 +0200348 if (!mq->bounce_buf)
349 return;
350
Pierre Ossman98ccf142007-05-12 00:26:16 +0200351 if (rq_data_dir(mq->req) != WRITE)
352 return;
353
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200354 local_irq_save(flags);
355 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
356 mq->bounce_buf, mq->sg[0].length);
357 local_irq_restore(flags);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200358}
359
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200360/*
361 * If reading, bounce the data from the buffer after the request
362 * has been handled by the host driver
363 */
Pierre Ossman98ccf142007-05-12 00:26:16 +0200364void mmc_queue_bounce_post(struct mmc_queue *mq)
365{
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200366 unsigned long flags;
367
Pierre Ossman98ccf142007-05-12 00:26:16 +0200368 if (!mq->bounce_buf)
369 return;
370
Pierre Ossman98ccf142007-05-12 00:26:16 +0200371 if (rq_data_dir(mq->req) != READ)
372 return;
373
Pierre Ossman2ff1fa62008-07-22 14:35:42 +0200374 local_irq_save(flags);
375 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
376 mq->bounce_buf, mq->sg[0].length);
377 local_irq_restore(flags);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200378}
379