Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Pierre Ossman | 70f1048 | 2007-07-11 20:04:50 +0200 | [diff] [blame] | 2 | * linux/drivers/mmc/card/queue.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2003 Russell King, All Rights Reserved. |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 5 | * Copyright 2006-2007 Pierre Ossman |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | */ |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 12 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/module.h> |
| 14 | #include <linux/blkdev.h> |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 15 | #include <linux/freezer.h> |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 16 | #include <linux/kthread.h> |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 17 | #include <linux/scatterlist.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
| 19 | #include <linux/mmc/card.h> |
| 20 | #include <linux/mmc/host.h> |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 21 | #include "queue.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 23 | #define MMC_QUEUE_BOUNCESZ 65536 |
| 24 | |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 25 | #define MMC_QUEUE_SUSPENDED (1 << 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
| 27 | /* |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 28 | * Prepare a MMC request. This just filters out odd stuff. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | */ |
| 30 | static int mmc_prep_request(struct request_queue *q, struct request *req) |
| 31 | { |
Sujit Reddy Thumma | cfefa14 | 2011-12-08 14:05:50 +0530 | [diff] [blame] | 32 | struct mmc_queue *mq = q->queuedata; |
| 33 | |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 34 | /* |
Adrian Hunter | bd788c9 | 2010-08-11 14:17:47 -0700 | [diff] [blame] | 35 | * We only like normal block requests and discards. |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 36 | */ |
Adrian Hunter | bd788c9 | 2010-08-11 14:17:47 -0700 | [diff] [blame] | 37 | if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | blk_dump_rq_flags(req, "MMC bad request"); |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 39 | return BLKPREP_KILL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | } |
| 41 | |
Sujit Reddy Thumma | cfefa14 | 2011-12-08 14:05:50 +0530 | [diff] [blame] | 42 | if (mq && mmc_card_removed(mq->card)) |
| 43 | return BLKPREP_KILL; |
| 44 | |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 45 | req->cmd_flags |= REQ_DONTPREP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 47 | return BLKPREP_OK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | } |
| 49 | |
| 50 | static int mmc_queue_thread(void *d) |
| 51 | { |
| 52 | struct mmc_queue *mq = d; |
| 53 | struct request_queue *q = mq->queue; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 54 | struct request *req; |
| 55 | |
| 56 | #ifdef CONFIG_MMC_PERF_PROFILING |
| 57 | ktime_t start, diff; |
| 58 | struct mmc_host *host = mq->card->host; |
| 59 | unsigned long bytes_xfer; |
| 60 | #endif |
| 61 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 63 | current->flags |= PF_MEMALLOC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | down(&mq->thread_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | do { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 67 | req = NULL; /* Must be set to NULL at each iteration */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | |
| 69 | spin_lock_irq(q->queue_lock); |
| 70 | set_current_state(TASK_INTERRUPTIBLE); |
Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 71 | req = blk_fetch_request(q); |
Juha [êölä | c723e08a | 2006-08-06 09:58:22 +0100 | [diff] [blame] | 72 | mq->req = req; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | spin_unlock_irq(q->queue_lock); |
| 74 | |
| 75 | if (!req) { |
Vitaly Wool | 7b30d28 | 2006-12-07 20:08:02 +0100 | [diff] [blame] | 76 | if (kthread_should_stop()) { |
| 77 | set_current_state(TASK_RUNNING); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | break; |
Vitaly Wool | 7b30d28 | 2006-12-07 20:08:02 +0100 | [diff] [blame] | 79 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | up(&mq->thread_sem); |
| 81 | schedule(); |
| 82 | down(&mq->thread_sem); |
| 83 | continue; |
| 84 | } |
| 85 | set_current_state(TASK_RUNNING); |
| 86 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 87 | #ifdef CONFIG_MMC_PERF_PROFILING |
| 88 | bytes_xfer = blk_rq_bytes(req); |
| 89 | if (rq_data_dir(req) == READ) { |
| 90 | start = ktime_get(); |
| 91 | mq->issue_fn(mq, req); |
| 92 | diff = ktime_sub(ktime_get(), start); |
| 93 | host->perf.rbytes_mmcq += bytes_xfer; |
| 94 | host->perf.rtime_mmcq = |
| 95 | ktime_add(host->perf.rtime_mmcq, diff); |
| 96 | } else { |
| 97 | start = ktime_get(); |
| 98 | mq->issue_fn(mq, req); |
| 99 | diff = ktime_sub(ktime_get(), start); |
| 100 | host->perf.wbytes_mmcq += bytes_xfer; |
| 101 | host->perf.wtime_mmcq = |
| 102 | ktime_add(host->perf.wtime_mmcq, diff); |
| 103 | } |
| 104 | #else |
| 105 | mq->issue_fn(mq, req); |
| 106 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | } while (1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | up(&mq->thread_sem); |
| 109 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | return 0; |
| 111 | } |
| 112 | |
| 113 | /* |
| 114 | * Generic MMC request handler. This is called for any queue on a |
| 115 | * particular host. When the host is not busy, we look for a request |
| 116 | * on any queue on this host, and attempt to issue it. This may |
| 117 | * not be the queue we were asked to process. |
| 118 | */ |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 119 | static void mmc_request(struct request_queue *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | { |
| 121 | struct mmc_queue *mq = q->queuedata; |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 122 | struct request *req; |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 123 | |
| 124 | if (!mq) { |
Adrian Hunter | 5fa83ce | 2010-01-08 14:43:00 -0800 | [diff] [blame] | 125 | while ((req = blk_fetch_request(q)) != NULL) { |
| 126 | req->cmd_flags |= REQ_QUIET; |
Tejun Heo | 296b2f6 | 2009-05-08 11:54:15 +0900 | [diff] [blame] | 127 | __blk_end_request_all(req, -EIO); |
Adrian Hunter | 5fa83ce | 2010-01-08 14:43:00 -0800 | [diff] [blame] | 128 | } |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 129 | return; |
| 130 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | |
| 132 | if (!mq->req) |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 133 | wake_up_process(mq->thread); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | } |
| 135 | |
| 136 | /** |
| 137 | * mmc_init_queue - initialise a queue structure. |
| 138 | * @mq: mmc queue |
| 139 | * @card: mmc card to attach this queue |
| 140 | * @lock: queue lock |
Adrian Hunter | d09408a | 2011-06-23 13:40:28 +0300 | [diff] [blame] | 141 | * @subname: partition subname |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | * |
| 143 | * Initialise a MMC card request queue. |
| 144 | */ |
Adrian Hunter | d09408a | 2011-06-23 13:40:28 +0300 | [diff] [blame] | 145 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, |
| 146 | spinlock_t *lock, const char *subname) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | { |
| 148 | struct mmc_host *host = card->host; |
| 149 | u64 limit = BLK_BOUNCE_HIGH; |
| 150 | int ret; |
| 151 | |
Greg Kroah-Hartman | fcaf71f | 2006-09-12 17:00:10 +0200 | [diff] [blame] | 152 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
| 153 | limit = *mmc_dev(host)->dma_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | |
| 155 | mq->card = card; |
| 156 | mq->queue = blk_init_queue(mmc_request, lock); |
| 157 | if (!mq->queue) |
| 158 | return -ENOMEM; |
| 159 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | mq->queue->queuedata = mq; |
| 161 | mq->req = NULL; |
| 162 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 163 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
Pierre Ossman | 8dddfe1 | 2008-10-14 20:04:46 +0200 | [diff] [blame] | 164 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); |
Adrian Hunter | bd788c9 | 2010-08-11 14:17:47 -0700 | [diff] [blame] | 165 | if (mmc_can_erase(card)) { |
| 166 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); |
| 167 | mq->queue->limits.max_discard_sectors = UINT_MAX; |
| 168 | if (card->erased_byte == 0) |
| 169 | mq->queue->limits.discard_zeroes_data = 1; |
Adrian Hunter | c31b55c | 2011-06-23 13:40:29 +0300 | [diff] [blame] | 170 | mq->queue->limits.discard_granularity = card->pref_erase << 9; |
Adrian Hunter | 4980454 | 2010-08-11 14:17:50 -0700 | [diff] [blame] | 171 | if (mmc_can_secure_erase_trim(card)) |
| 172 | queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, |
| 173 | mq->queue); |
Adrian Hunter | bd788c9 | 2010-08-11 14:17:47 -0700 | [diff] [blame] | 174 | } |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 175 | |
| 176 | #ifdef CONFIG_MMC_BLOCK_BOUNCE |
Martin K. Petersen | a36274e | 2010-09-10 01:33:59 -0400 | [diff] [blame] | 177 | if (host->max_segs == 1) { |
Pierre Ossman | aafabfa | 2007-08-09 14:28:02 +0200 | [diff] [blame] | 178 | unsigned int bouncesz; |
| 179 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 180 | bouncesz = MMC_QUEUE_BOUNCESZ; |
| 181 | |
| 182 | if (bouncesz > host->max_req_size) |
| 183 | bouncesz = host->max_req_size; |
| 184 | if (bouncesz > host->max_seg_size) |
| 185 | bouncesz = host->max_seg_size; |
Pierre Ossman | f3eb0aa | 2008-08-16 21:34:02 +0200 | [diff] [blame] | 186 | if (bouncesz > (host->max_blk_count * 512)) |
| 187 | bouncesz = host->max_blk_count * 512; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 188 | |
Pierre Ossman | f3eb0aa | 2008-08-16 21:34:02 +0200 | [diff] [blame] | 189 | if (bouncesz > 512) { |
| 190 | mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
| 191 | if (!mq->bounce_buf) { |
| 192 | printk(KERN_WARNING "%s: unable to " |
| 193 | "allocate bounce buffer\n", |
| 194 | mmc_card_name(card)); |
| 195 | } |
| 196 | } |
| 197 | |
| 198 | if (mq->bounce_buf) { |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 199 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); |
Martin K. Petersen | 086fa5f | 2010-02-26 00:20:38 -0500 | [diff] [blame] | 200 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); |
Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 201 | blk_queue_max_segments(mq->queue, bouncesz / 512); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 202 | blk_queue_max_segment_size(mq->queue, bouncesz); |
| 203 | |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 204 | mq->sg = kmalloc(sizeof(struct scatterlist), |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 205 | GFP_KERNEL); |
| 206 | if (!mq->sg) { |
| 207 | ret = -ENOMEM; |
Pierre Ossman | aafabfa | 2007-08-09 14:28:02 +0200 | [diff] [blame] | 208 | goto cleanup_queue; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 209 | } |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 210 | sg_init_table(mq->sg, 1); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 211 | |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 212 | mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 213 | bouncesz / 512, GFP_KERNEL); |
| 214 | if (!mq->bounce_sg) { |
| 215 | ret = -ENOMEM; |
Pierre Ossman | aafabfa | 2007-08-09 14:28:02 +0200 | [diff] [blame] | 216 | goto cleanup_queue; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 217 | } |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 218 | sg_init_table(mq->bounce_sg, bouncesz / 512); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 219 | } |
| 220 | } |
| 221 | #endif |
| 222 | |
| 223 | if (!mq->bounce_buf) { |
| 224 | blk_queue_bounce_limit(mq->queue, limit); |
Martin K. Petersen | 086fa5f | 2010-02-26 00:20:38 -0500 | [diff] [blame] | 225 | blk_queue_max_hw_sectors(mq->queue, |
Pierre Ossman | f3eb0aa | 2008-08-16 21:34:02 +0200 | [diff] [blame] | 226 | min(host->max_blk_count, host->max_req_size / 512)); |
Martin K. Petersen | a36274e | 2010-09-10 01:33:59 -0400 | [diff] [blame] | 227 | blk_queue_max_segments(mq->queue, host->max_segs); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 228 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
| 229 | |
Haavard Skinnemoen | 05e5b13 | 2007-11-23 10:19:00 +0100 | [diff] [blame] | 230 | mq->sg = kmalloc(sizeof(struct scatterlist) * |
Martin K. Petersen | a36274e | 2010-09-10 01:33:59 -0400 | [diff] [blame] | 231 | host->max_segs, GFP_KERNEL); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 232 | if (!mq->sg) { |
| 233 | ret = -ENOMEM; |
| 234 | goto cleanup_queue; |
| 235 | } |
Martin K. Petersen | a36274e | 2010-09-10 01:33:59 -0400 | [diff] [blame] | 236 | sg_init_table(mq->sg, host->max_segs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | } |
| 238 | |
Thomas Gleixner | 632cf92 | 2010-09-14 07:12:35 -0400 | [diff] [blame] | 239 | sema_init(&mq->thread_sem, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | |
Adrian Hunter | d09408a | 2011-06-23 13:40:28 +0300 | [diff] [blame] | 241 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", |
| 242 | host->index, subname ? subname : ""); |
Ethan Du | de528fa | 2010-09-30 18:40:27 -0400 | [diff] [blame] | 243 | |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 244 | if (IS_ERR(mq->thread)) { |
| 245 | ret = PTR_ERR(mq->thread); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 246 | goto free_bounce_sg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | } |
| 248 | |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 249 | return 0; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 250 | free_bounce_sg: |
| 251 | if (mq->bounce_sg) |
| 252 | kfree(mq->bounce_sg); |
| 253 | mq->bounce_sg = NULL; |
Pierre Ossman | aafabfa | 2007-08-09 14:28:02 +0200 | [diff] [blame] | 254 | cleanup_queue: |
| 255 | if (mq->sg) |
| 256 | kfree(mq->sg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | mq->sg = NULL; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 258 | if (mq->bounce_buf) |
| 259 | kfree(mq->bounce_buf); |
| 260 | mq->bounce_buf = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | blk_cleanup_queue(mq->queue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | return ret; |
| 263 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | |
| 265 | void mmc_cleanup_queue(struct mmc_queue *mq) |
| 266 | { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 267 | struct request_queue *q = mq->queue; |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 268 | unsigned long flags; |
| 269 | |
Pierre Ossman | d2b46f6 | 2007-04-28 16:52:12 +0200 | [diff] [blame] | 270 | /* Make sure the queue isn't suspended, as that will deadlock */ |
| 271 | mmc_queue_resume(mq); |
| 272 | |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 273 | /* Then terminate our worker thread */ |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 274 | kthread_stop(mq->thread); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | |
Adrian Hunter | 5fa83ce | 2010-01-08 14:43:00 -0800 | [diff] [blame] | 276 | /* Empty the queue */ |
| 277 | spin_lock_irqsave(q->queue_lock, flags); |
| 278 | q->queuedata = NULL; |
| 279 | blk_start_queue(q); |
| 280 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 281 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 282 | if (mq->bounce_sg) |
| 283 | kfree(mq->bounce_sg); |
| 284 | mq->bounce_sg = NULL; |
| 285 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | kfree(mq->sg); |
| 287 | mq->sg = NULL; |
| 288 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 289 | if (mq->bounce_buf) |
| 290 | kfree(mq->bounce_buf); |
| 291 | mq->bounce_buf = NULL; |
| 292 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | mq->card = NULL; |
| 294 | } |
| 295 | EXPORT_SYMBOL(mmc_cleanup_queue); |
| 296 | |
| 297 | /** |
| 298 | * mmc_queue_suspend - suspend a MMC request queue |
| 299 | * @mq: MMC queue to suspend |
| 300 | * |
| 301 | * Stop the block request queue, and wait for our thread to |
| 302 | * complete any outstanding requests. This ensures that we |
| 303 | * won't suspend while a request is being processed. |
| 304 | */ |
| 305 | void mmc_queue_suspend(struct mmc_queue *mq) |
| 306 | { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 307 | struct request_queue *q = mq->queue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | unsigned long flags; |
| 309 | |
| 310 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { |
| 311 | mq->flags |= MMC_QUEUE_SUSPENDED; |
| 312 | |
| 313 | spin_lock_irqsave(q->queue_lock, flags); |
| 314 | blk_stop_queue(q); |
| 315 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 316 | |
| 317 | down(&mq->thread_sem); |
| 318 | } |
| 319 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | |
| 321 | /** |
| 322 | * mmc_queue_resume - resume a previously suspended MMC request queue |
| 323 | * @mq: MMC queue to resume |
| 324 | */ |
| 325 | void mmc_queue_resume(struct mmc_queue *mq) |
| 326 | { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 327 | struct request_queue *q = mq->queue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | unsigned long flags; |
| 329 | |
| 330 | if (mq->flags & MMC_QUEUE_SUSPENDED) { |
| 331 | mq->flags &= ~MMC_QUEUE_SUSPENDED; |
| 332 | |
| 333 | up(&mq->thread_sem); |
| 334 | |
| 335 | spin_lock_irqsave(q->queue_lock, flags); |
| 336 | blk_start_queue(q); |
| 337 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 338 | } |
| 339 | } |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 340 | |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 341 | /* |
| 342 | * Prepare the sg list(s) to be handed of to the host driver |
| 343 | */ |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 344 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq) |
| 345 | { |
| 346 | unsigned int sg_len; |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 347 | size_t buflen; |
| 348 | struct scatterlist *sg; |
| 349 | int i; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 350 | |
| 351 | if (!mq->bounce_buf) |
| 352 | return blk_rq_map_sg(mq->queue, mq->req, mq->sg); |
| 353 | |
| 354 | BUG_ON(!mq->bounce_sg); |
| 355 | |
| 356 | sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); |
| 357 | |
| 358 | mq->bounce_sg_len = sg_len; |
| 359 | |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 360 | buflen = 0; |
| 361 | for_each_sg(mq->bounce_sg, sg, sg_len, i) |
| 362 | buflen += sg->length; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 363 | |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 364 | sg_init_one(mq->sg, mq->bounce_buf, buflen); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 365 | |
| 366 | return 1; |
| 367 | } |
| 368 | |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 369 | /* |
| 370 | * If writing, bounce the data to the buffer before the request |
| 371 | * is sent to the host driver |
| 372 | */ |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 373 | void mmc_queue_bounce_pre(struct mmc_queue *mq) |
| 374 | { |
| 375 | if (!mq->bounce_buf) |
| 376 | return; |
| 377 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 378 | if (rq_data_dir(mq->req) != WRITE) |
| 379 | return; |
| 380 | |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 381 | sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, |
| 382 | mq->bounce_buf, mq->sg[0].length); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 383 | } |
| 384 | |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 385 | /* |
| 386 | * If reading, bounce the data from the buffer after the request |
| 387 | * has been handled by the host driver |
| 388 | */ |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 389 | void mmc_queue_bounce_post(struct mmc_queue *mq) |
| 390 | { |
| 391 | if (!mq->bounce_buf) |
| 392 | return; |
| 393 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 394 | if (rq_data_dir(mq->req) != READ) |
| 395 | return; |
| 396 | |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 397 | sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, |
| 398 | mq->bounce_buf, mq->sg[0].length); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 399 | } |
| 400 | |