blob: 1b9c9b6da5b7349b39148129c7a6ce4a709e7717 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Pierre Ossman70f10482007-07-11 20:04:50 +02002 * linux/drivers/mmc/card/queue.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
Pierre Ossman98ac2162006-12-23 20:03:02 +01005 * Copyright 2006-2007 Pierre Ossman
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12#include <linux/module.h>
13#include <linux/blkdev.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070014#include <linux/freezer.h>
Christoph Hellwig87598a22006-11-13 20:23:52 +010015#include <linux/kthread.h>
Jens Axboe45711f12007-10-22 21:19:53 +020016#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18#include <linux/mmc/card.h>
19#include <linux/mmc/host.h>
Pierre Ossman98ac2162006-12-23 20:03:02 +010020#include "queue.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Pierre Ossman98ccf142007-05-12 00:26:16 +020022#define MMC_QUEUE_BOUNCESZ 65536
23
Christoph Hellwig87598a22006-11-13 20:23:52 +010024#define MMC_QUEUE_SUSPENDED (1 << 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26/*
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020027 * Prepare a MMC request. This just filters out odd stuff.
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 */
29static int mmc_prep_request(struct request_queue *q, struct request *req)
30{
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020031 /*
32 * We only like normal block requests.
33 */
34 if (!blk_fs_request(req) && !blk_pc_request(req)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 blk_dump_rq_flags(req, "MMC bad request");
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020036 return BLKPREP_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 }
38
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020039 req->cmd_flags |= REQ_DONTPREP;
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Pierre Ossman9c9f2d62007-05-16 17:29:21 +020041 return BLKPREP_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042}
43
44static int mmc_queue_thread(void *d)
45{
46 struct mmc_queue *mq = d;
47 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Rafael J. Wysocki83144182007-07-17 04:03:35 -070049 current->flags |= PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 down(&mq->thread_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 do {
53 struct request *req = NULL;
54
55 spin_lock_irq(q->queue_lock);
56 set_current_state(TASK_INTERRUPTIBLE);
57 if (!blk_queue_plugged(q))
Juha [êöläc723e08a2006-08-06 09:58:22 +010058 req = elv_next_request(q);
59 mq->req = req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 spin_unlock_irq(q->queue_lock);
61
62 if (!req) {
Vitaly Wool7b30d282006-12-07 20:08:02 +010063 if (kthread_should_stop()) {
64 set_current_state(TASK_RUNNING);
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 break;
Vitaly Wool7b30d282006-12-07 20:08:02 +010066 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 up(&mq->thread_sem);
68 schedule();
69 down(&mq->thread_sem);
70 continue;
71 }
72 set_current_state(TASK_RUNNING);
73
74 mq->issue_fn(mq, req);
75 } while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 up(&mq->thread_sem);
77
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 return 0;
79}
80
81/*
82 * Generic MMC request handler. This is called for any queue on a
83 * particular host. When the host is not busy, we look for a request
84 * on any queue on this host, and attempt to issue it. This may
85 * not be the queue we were asked to process.
86 */
Jens Axboe165125e2007-07-24 09:28:11 +020087static void mmc_request(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
89 struct mmc_queue *mq = q->queuedata;
Pierre Ossman89b4e132006-11-14 22:08:16 +010090 struct request *req;
91 int ret;
92
93 if (!mq) {
94 printk(KERN_ERR "MMC: killing requests for dead queue\n");
95 while ((req = elv_next_request(q)) != NULL) {
96 do {
97 ret = end_that_request_chunk(req, 0,
98 req->current_nr_sectors << 9);
99 } while (ret);
100 }
101 return;
102 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (!mq->req)
Christoph Hellwig87598a22006-11-13 20:23:52 +0100105 wake_up_process(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106}
107
108/**
109 * mmc_init_queue - initialise a queue structure.
110 * @mq: mmc queue
111 * @card: mmc card to attach this queue
112 * @lock: queue lock
113 *
114 * Initialise a MMC card request queue.
115 */
116int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
117{
118 struct mmc_host *host = card->host;
119 u64 limit = BLK_BOUNCE_HIGH;
120 int ret;
121
Greg Kroah-Hartmanfcaf71f2006-09-12 17:00:10 +0200122 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
123 limit = *mmc_dev(host)->dma_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125 mq->card = card;
126 mq->queue = blk_init_queue(mmc_request, lock);
127 if (!mq->queue)
128 return -ENOMEM;
129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 mq->queue->queuedata = mq;
131 mq->req = NULL;
132
Pierre Ossman98ccf142007-05-12 00:26:16 +0200133 blk_queue_prep_rq(mq->queue, mmc_prep_request);
134
135#ifdef CONFIG_MMC_BLOCK_BOUNCE
136 if (host->max_hw_segs == 1) {
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200137 unsigned int bouncesz;
138
Pierre Ossman98ccf142007-05-12 00:26:16 +0200139 bouncesz = MMC_QUEUE_BOUNCESZ;
140
141 if (bouncesz > host->max_req_size)
142 bouncesz = host->max_req_size;
143 if (bouncesz > host->max_seg_size)
144 bouncesz = host->max_seg_size;
145
146 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
147 if (!mq->bounce_buf) {
148 printk(KERN_WARNING "%s: unable to allocate "
149 "bounce buffer\n", mmc_card_name(card));
150 } else {
151 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
152 blk_queue_max_sectors(mq->queue, bouncesz / 512);
153 blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
154 blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
155 blk_queue_max_segment_size(mq->queue, bouncesz);
156
Jens Axboe45711f12007-10-22 21:19:53 +0200157 mq->sg = kmalloc(sizeof(struct scatterlist),
Pierre Ossman98ccf142007-05-12 00:26:16 +0200158 GFP_KERNEL);
159 if (!mq->sg) {
160 ret = -ENOMEM;
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200161 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200162 }
Jens Axboe45711f12007-10-22 21:19:53 +0200163 sg_init_table(mq->sg, 1);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200164
Jens Axboe45711f12007-10-22 21:19:53 +0200165 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
Pierre Ossman98ccf142007-05-12 00:26:16 +0200166 bouncesz / 512, GFP_KERNEL);
167 if (!mq->bounce_sg) {
168 ret = -ENOMEM;
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200169 goto cleanup_queue;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200170 }
Jens Axboe45711f12007-10-22 21:19:53 +0200171 sg_init_table(mq->bounce_sg, bouncesz / 512);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200172 }
173 }
174#endif
175
176 if (!mq->bounce_buf) {
177 blk_queue_bounce_limit(mq->queue, limit);
178 blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
179 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
180 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
181 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
182
Jens Axboed3ad0aa2007-10-04 20:11:30 +0200183 mq->sg = kzalloc(sizeof(struct scatterlist) *
Pierre Ossman98ccf142007-05-12 00:26:16 +0200184 host->max_phys_segs, GFP_KERNEL);
185 if (!mq->sg) {
186 ret = -ENOMEM;
187 goto cleanup_queue;
188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 }
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 init_MUTEX(&mq->thread_sem);
192
Christoph Hellwig87598a22006-11-13 20:23:52 +0100193 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
194 if (IS_ERR(mq->thread)) {
195 ret = PTR_ERR(mq->thread);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200196 goto free_bounce_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 }
198
Christoph Hellwig87598a22006-11-13 20:23:52 +0100199 return 0;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200200 free_bounce_sg:
201 if (mq->bounce_sg)
202 kfree(mq->bounce_sg);
203 mq->bounce_sg = NULL;
Pierre Ossmanaafabfa2007-08-09 14:28:02 +0200204 cleanup_queue:
205 if (mq->sg)
206 kfree(mq->sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 mq->sg = NULL;
Pierre Ossman98ccf142007-05-12 00:26:16 +0200208 if (mq->bounce_buf)
209 kfree(mq->bounce_buf);
210 mq->bounce_buf = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 blk_cleanup_queue(mq->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 return ret;
213}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215void mmc_cleanup_queue(struct mmc_queue *mq)
216{
Jens Axboe165125e2007-07-24 09:28:11 +0200217 struct request_queue *q = mq->queue;
Pierre Ossman89b4e132006-11-14 22:08:16 +0100218 unsigned long flags;
219
220 /* Mark that we should start throwing out stragglers */
221 spin_lock_irqsave(q->queue_lock, flags);
222 q->queuedata = NULL;
223 spin_unlock_irqrestore(q->queue_lock, flags);
224
Pierre Ossmand2b46f62007-04-28 16:52:12 +0200225 /* Make sure the queue isn't suspended, as that will deadlock */
226 mmc_queue_resume(mq);
227
Pierre Ossman89b4e132006-11-14 22:08:16 +0100228 /* Then terminate our worker thread */
Christoph Hellwig87598a22006-11-13 20:23:52 +0100229 kthread_stop(mq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Pierre Ossman98ccf142007-05-12 00:26:16 +0200231 if (mq->bounce_sg)
232 kfree(mq->bounce_sg);
233 mq->bounce_sg = NULL;
234
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 kfree(mq->sg);
236 mq->sg = NULL;
237
Pierre Ossman98ccf142007-05-12 00:26:16 +0200238 if (mq->bounce_buf)
239 kfree(mq->bounce_buf);
240 mq->bounce_buf = NULL;
241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 blk_cleanup_queue(mq->queue);
243
244 mq->card = NULL;
245}
246EXPORT_SYMBOL(mmc_cleanup_queue);
247
248/**
249 * mmc_queue_suspend - suspend a MMC request queue
250 * @mq: MMC queue to suspend
251 *
252 * Stop the block request queue, and wait for our thread to
253 * complete any outstanding requests. This ensures that we
254 * won't suspend while a request is being processed.
255 */
256void mmc_queue_suspend(struct mmc_queue *mq)
257{
Jens Axboe165125e2007-07-24 09:28:11 +0200258 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 unsigned long flags;
260
261 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
262 mq->flags |= MMC_QUEUE_SUSPENDED;
263
264 spin_lock_irqsave(q->queue_lock, flags);
265 blk_stop_queue(q);
266 spin_unlock_irqrestore(q->queue_lock, flags);
267
268 down(&mq->thread_sem);
269 }
270}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
272/**
273 * mmc_queue_resume - resume a previously suspended MMC request queue
274 * @mq: MMC queue to resume
275 */
276void mmc_queue_resume(struct mmc_queue *mq)
277{
Jens Axboe165125e2007-07-24 09:28:11 +0200278 struct request_queue *q = mq->queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 unsigned long flags;
280
281 if (mq->flags & MMC_QUEUE_SUSPENDED) {
282 mq->flags &= ~MMC_QUEUE_SUSPENDED;
283
284 up(&mq->thread_sem);
285
286 spin_lock_irqsave(q->queue_lock, flags);
287 blk_start_queue(q);
288 spin_unlock_irqrestore(q->queue_lock, flags);
289 }
290}
Pierre Ossman98ac2162006-12-23 20:03:02 +0100291
Pierre Ossman98ccf142007-05-12 00:26:16 +0200292static void copy_sg(struct scatterlist *dst, unsigned int dst_len,
293 struct scatterlist *src, unsigned int src_len)
294{
295 unsigned int chunk;
296 char *dst_buf, *src_buf;
297 unsigned int dst_size, src_size;
298
299 dst_buf = NULL;
300 src_buf = NULL;
301 dst_size = 0;
302 src_size = 0;
303
304 while (src_len) {
305 BUG_ON(dst_len == 0);
306
307 if (dst_size == 0) {
Jens Axboe45711f12007-10-22 21:19:53 +0200308 dst_buf = sg_virt(dst);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200309 dst_size = dst->length;
310 }
311
312 if (src_size == 0) {
Roland Dreier85780072007-11-08 13:50:58 +0100313 src_buf = sg_virt(src);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200314 src_size = src->length;
315 }
316
317 chunk = min(dst_size, src_size);
318
319 memcpy(dst_buf, src_buf, chunk);
320
321 dst_buf += chunk;
322 src_buf += chunk;
323 dst_size -= chunk;
324 src_size -= chunk;
325
326 if (dst_size == 0) {
327 dst++;
328 dst_len--;
329 }
330
331 if (src_size == 0) {
332 src++;
333 src_len--;
334 }
335 }
336}
337
338unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
339{
340 unsigned int sg_len;
341
342 if (!mq->bounce_buf)
343 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
344
345 BUG_ON(!mq->bounce_sg);
346
347 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
348
349 mq->bounce_sg_len = sg_len;
350
351 /*
352 * Shortcut in the event we only get a single entry.
353 */
354 if (sg_len == 1) {
355 memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
356 return 1;
357 }
358
Jens Axboe45711f12007-10-22 21:19:53 +0200359 sg_init_one(mq->sg, mq->bounce_buf, 0);
Pierre Ossman98ccf142007-05-12 00:26:16 +0200360
361 while (sg_len) {
362 mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
363 sg_len--;
364 }
365
366 return 1;
367}
368
369void mmc_queue_bounce_pre(struct mmc_queue *mq)
370{
371 if (!mq->bounce_buf)
372 return;
373
374 if (mq->bounce_sg_len == 1)
375 return;
376 if (rq_data_dir(mq->req) != WRITE)
377 return;
378
379 copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len);
380}
381
382void mmc_queue_bounce_post(struct mmc_queue *mq)
383{
384 if (!mq->bounce_buf)
385 return;
386
387 if (mq->bounce_sg_len == 1)
388 return;
389 if (rq_data_dir(mq->req) != READ)
390 return;
391
392 copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1);
393}
394