Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 2 | * linux/drivers/mmc/queue.c |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2003 Russell King, All Rights Reserved. |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 5 | * Copyright 2006-2007 Pierre Ossman |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | */ |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/blkdev.h> |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 14 | #include <linux/kthread.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
| 16 | #include <linux/mmc/card.h> |
| 17 | #include <linux/mmc/host.h> |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 18 | #include "queue.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 20 | #define MMC_QUEUE_BOUNCESZ 65536 |
| 21 | |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 22 | #define MMC_QUEUE_SUSPENDED (1 << 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
| 24 | /* |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 25 | * Prepare a MMC request. This just filters out odd stuff. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | */ |
| 27 | static int mmc_prep_request(struct request_queue *q, struct request *req) |
| 28 | { |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 29 | /* |
| 30 | * We only like normal block requests. |
| 31 | */ |
| 32 | if (!blk_fs_request(req) && !blk_pc_request(req)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | blk_dump_rq_flags(req, "MMC bad request"); |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 34 | return BLKPREP_KILL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | } |
| 36 | |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 37 | req->cmd_flags |= REQ_DONTPREP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 39 | return BLKPREP_OK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | } |
| 41 | |
| 42 | static int mmc_queue_thread(void *d) |
| 43 | { |
| 44 | struct mmc_queue *mq = d; |
| 45 | struct request_queue *q = mq->queue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
| 47 | /* |
| 48 | * Set iothread to ensure that we aren't put to sleep by |
| 49 | * the process freezing. We handle suspension ourselves. |
| 50 | */ |
| 51 | current->flags |= PF_MEMALLOC|PF_NOFREEZE; |
| 52 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | down(&mq->thread_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | do { |
| 55 | struct request *req = NULL; |
| 56 | |
| 57 | spin_lock_irq(q->queue_lock); |
| 58 | set_current_state(TASK_INTERRUPTIBLE); |
| 59 | if (!blk_queue_plugged(q)) |
Juha [êölä | c723e08a | 2006-08-06 09:58:22 +0100 | [diff] [blame] | 60 | req = elv_next_request(q); |
| 61 | mq->req = req; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | spin_unlock_irq(q->queue_lock); |
| 63 | |
| 64 | if (!req) { |
Vitaly Wool | 7b30d28 | 2006-12-07 20:08:02 +0100 | [diff] [blame] | 65 | if (kthread_should_stop()) { |
| 66 | set_current_state(TASK_RUNNING); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | break; |
Vitaly Wool | 7b30d28 | 2006-12-07 20:08:02 +0100 | [diff] [blame] | 68 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | up(&mq->thread_sem); |
| 70 | schedule(); |
| 71 | down(&mq->thread_sem); |
| 72 | continue; |
| 73 | } |
| 74 | set_current_state(TASK_RUNNING); |
| 75 | |
| 76 | mq->issue_fn(mq, req); |
| 77 | } while (1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | up(&mq->thread_sem); |
| 79 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | return 0; |
| 81 | } |
| 82 | |
| 83 | /* |
| 84 | * Generic MMC request handler. This is called for any queue on a |
| 85 | * particular host. When the host is not busy, we look for a request |
| 86 | * on any queue on this host, and attempt to issue it. This may |
| 87 | * not be the queue we were asked to process. |
| 88 | */ |
| 89 | static void mmc_request(request_queue_t *q) |
| 90 | { |
| 91 | struct mmc_queue *mq = q->queuedata; |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 92 | struct request *req; |
| 93 | int ret; |
| 94 | |
| 95 | if (!mq) { |
| 96 | printk(KERN_ERR "MMC: killing requests for dead queue\n"); |
| 97 | while ((req = elv_next_request(q)) != NULL) { |
| 98 | do { |
| 99 | ret = end_that_request_chunk(req, 0, |
| 100 | req->current_nr_sectors << 9); |
| 101 | } while (ret); |
| 102 | } |
| 103 | return; |
| 104 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
| 106 | if (!mq->req) |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 107 | wake_up_process(mq->thread); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | } |
| 109 | |
| 110 | /** |
| 111 | * mmc_init_queue - initialise a queue structure. |
| 112 | * @mq: mmc queue |
| 113 | * @card: mmc card to attach this queue |
| 114 | * @lock: queue lock |
| 115 | * |
| 116 | * Initialise a MMC card request queue. |
| 117 | */ |
| 118 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) |
| 119 | { |
| 120 | struct mmc_host *host = card->host; |
| 121 | u64 limit = BLK_BOUNCE_HIGH; |
| 122 | int ret; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 123 | unsigned int bouncesz; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | |
Greg Kroah-Hartman | fcaf71f | 2006-09-12 17:00:10 +0200 | [diff] [blame] | 125 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
| 126 | limit = *mmc_dev(host)->dma_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | |
| 128 | mq->card = card; |
| 129 | mq->queue = blk_init_queue(mmc_request, lock); |
| 130 | if (!mq->queue) |
| 131 | return -ENOMEM; |
| 132 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | mq->queue->queuedata = mq; |
| 134 | mq->req = NULL; |
| 135 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 136 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
| 137 | |
| 138 | #ifdef CONFIG_MMC_BLOCK_BOUNCE |
| 139 | if (host->max_hw_segs == 1) { |
| 140 | bouncesz = MMC_QUEUE_BOUNCESZ; |
| 141 | |
| 142 | if (bouncesz > host->max_req_size) |
| 143 | bouncesz = host->max_req_size; |
| 144 | if (bouncesz > host->max_seg_size) |
| 145 | bouncesz = host->max_seg_size; |
| 146 | |
| 147 | mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); |
| 148 | if (!mq->bounce_buf) { |
| 149 | printk(KERN_WARNING "%s: unable to allocate " |
| 150 | "bounce buffer\n", mmc_card_name(card)); |
| 151 | } else { |
| 152 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); |
| 153 | blk_queue_max_sectors(mq->queue, bouncesz / 512); |
| 154 | blk_queue_max_phys_segments(mq->queue, bouncesz / 512); |
| 155 | blk_queue_max_hw_segments(mq->queue, bouncesz / 512); |
| 156 | blk_queue_max_segment_size(mq->queue, bouncesz); |
| 157 | |
| 158 | mq->sg = kmalloc(sizeof(struct scatterlist), |
| 159 | GFP_KERNEL); |
| 160 | if (!mq->sg) { |
| 161 | ret = -ENOMEM; |
| 162 | goto free_bounce_buf; |
| 163 | } |
| 164 | |
| 165 | mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * |
| 166 | bouncesz / 512, GFP_KERNEL); |
| 167 | if (!mq->bounce_sg) { |
| 168 | ret = -ENOMEM; |
| 169 | goto free_sg; |
| 170 | } |
| 171 | } |
| 172 | } |
| 173 | #endif |
| 174 | |
| 175 | if (!mq->bounce_buf) { |
| 176 | blk_queue_bounce_limit(mq->queue, limit); |
| 177 | blk_queue_max_sectors(mq->queue, host->max_req_size / 512); |
| 178 | blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); |
| 179 | blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); |
| 180 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
| 181 | |
| 182 | mq->sg = kmalloc(sizeof(struct scatterlist) * |
| 183 | host->max_phys_segs, GFP_KERNEL); |
| 184 | if (!mq->sg) { |
| 185 | ret = -ENOMEM; |
| 186 | goto cleanup_queue; |
| 187 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | } |
| 189 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | init_MUTEX(&mq->thread_sem); |
| 191 | |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 192 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); |
| 193 | if (IS_ERR(mq->thread)) { |
| 194 | ret = PTR_ERR(mq->thread); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 195 | goto free_bounce_sg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | } |
| 197 | |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 198 | return 0; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 199 | free_bounce_sg: |
| 200 | if (mq->bounce_sg) |
| 201 | kfree(mq->bounce_sg); |
| 202 | mq->bounce_sg = NULL; |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 203 | free_sg: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | kfree(mq->sg); |
| 205 | mq->sg = NULL; |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 206 | free_bounce_buf: |
| 207 | if (mq->bounce_buf) |
| 208 | kfree(mq->bounce_buf); |
| 209 | mq->bounce_buf = NULL; |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 210 | cleanup_queue: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | blk_cleanup_queue(mq->queue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | return ret; |
| 213 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | |
| 215 | void mmc_cleanup_queue(struct mmc_queue *mq) |
| 216 | { |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 217 | request_queue_t *q = mq->queue; |
| 218 | unsigned long flags; |
| 219 | |
| 220 | /* Mark that we should start throwing out stragglers */ |
| 221 | spin_lock_irqsave(q->queue_lock, flags); |
| 222 | q->queuedata = NULL; |
| 223 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 224 | |
Pierre Ossman | d2b46f6 | 2007-04-28 16:52:12 +0200 | [diff] [blame] | 225 | /* Make sure the queue isn't suspended, as that will deadlock */ |
| 226 | mmc_queue_resume(mq); |
| 227 | |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 228 | /* Then terminate our worker thread */ |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 229 | kthread_stop(mq->thread); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 231 | if (mq->bounce_sg) |
| 232 | kfree(mq->bounce_sg); |
| 233 | mq->bounce_sg = NULL; |
| 234 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | kfree(mq->sg); |
| 236 | mq->sg = NULL; |
| 237 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 238 | if (mq->bounce_buf) |
| 239 | kfree(mq->bounce_buf); |
| 240 | mq->bounce_buf = NULL; |
| 241 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | blk_cleanup_queue(mq->queue); |
| 243 | |
| 244 | mq->card = NULL; |
| 245 | } |
| 246 | EXPORT_SYMBOL(mmc_cleanup_queue); |
| 247 | |
| 248 | /** |
| 249 | * mmc_queue_suspend - suspend a MMC request queue |
| 250 | * @mq: MMC queue to suspend |
| 251 | * |
| 252 | * Stop the block request queue, and wait for our thread to |
| 253 | * complete any outstanding requests. This ensures that we |
| 254 | * won't suspend while a request is being processed. |
| 255 | */ |
| 256 | void mmc_queue_suspend(struct mmc_queue *mq) |
| 257 | { |
| 258 | request_queue_t *q = mq->queue; |
| 259 | unsigned long flags; |
| 260 | |
| 261 | if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { |
| 262 | mq->flags |= MMC_QUEUE_SUSPENDED; |
| 263 | |
| 264 | spin_lock_irqsave(q->queue_lock, flags); |
| 265 | blk_stop_queue(q); |
| 266 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 267 | |
| 268 | down(&mq->thread_sem); |
| 269 | } |
| 270 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | |
| 272 | /** |
| 273 | * mmc_queue_resume - resume a previously suspended MMC request queue |
| 274 | * @mq: MMC queue to resume |
| 275 | */ |
| 276 | void mmc_queue_resume(struct mmc_queue *mq) |
| 277 | { |
| 278 | request_queue_t *q = mq->queue; |
| 279 | unsigned long flags; |
| 280 | |
| 281 | if (mq->flags & MMC_QUEUE_SUSPENDED) { |
| 282 | mq->flags &= ~MMC_QUEUE_SUSPENDED; |
| 283 | |
| 284 | up(&mq->thread_sem); |
| 285 | |
| 286 | spin_lock_irqsave(q->queue_lock, flags); |
| 287 | blk_start_queue(q); |
| 288 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 289 | } |
| 290 | } |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 291 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 292 | static void copy_sg(struct scatterlist *dst, unsigned int dst_len, |
| 293 | struct scatterlist *src, unsigned int src_len) |
| 294 | { |
| 295 | unsigned int chunk; |
| 296 | char *dst_buf, *src_buf; |
| 297 | unsigned int dst_size, src_size; |
| 298 | |
| 299 | dst_buf = NULL; |
| 300 | src_buf = NULL; |
| 301 | dst_size = 0; |
| 302 | src_size = 0; |
| 303 | |
| 304 | while (src_len) { |
| 305 | BUG_ON(dst_len == 0); |
| 306 | |
| 307 | if (dst_size == 0) { |
| 308 | dst_buf = page_address(dst->page) + dst->offset; |
| 309 | dst_size = dst->length; |
| 310 | } |
| 311 | |
| 312 | if (src_size == 0) { |
| 313 | src_buf = page_address(src->page) + src->offset; |
| 314 | src_size = src->length; |
| 315 | } |
| 316 | |
| 317 | chunk = min(dst_size, src_size); |
| 318 | |
| 319 | memcpy(dst_buf, src_buf, chunk); |
| 320 | |
| 321 | dst_buf += chunk; |
| 322 | src_buf += chunk; |
| 323 | dst_size -= chunk; |
| 324 | src_size -= chunk; |
| 325 | |
| 326 | if (dst_size == 0) { |
| 327 | dst++; |
| 328 | dst_len--; |
| 329 | } |
| 330 | |
| 331 | if (src_size == 0) { |
| 332 | src++; |
| 333 | src_len--; |
| 334 | } |
| 335 | } |
| 336 | } |
| 337 | |
| 338 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq) |
| 339 | { |
| 340 | unsigned int sg_len; |
| 341 | |
| 342 | if (!mq->bounce_buf) |
| 343 | return blk_rq_map_sg(mq->queue, mq->req, mq->sg); |
| 344 | |
| 345 | BUG_ON(!mq->bounce_sg); |
| 346 | |
| 347 | sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); |
| 348 | |
| 349 | mq->bounce_sg_len = sg_len; |
| 350 | |
| 351 | /* |
| 352 | * Shortcut in the event we only get a single entry. |
| 353 | */ |
| 354 | if (sg_len == 1) { |
| 355 | memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist)); |
| 356 | return 1; |
| 357 | } |
| 358 | |
| 359 | mq->sg[0].page = virt_to_page(mq->bounce_buf); |
| 360 | mq->sg[0].offset = offset_in_page(mq->bounce_buf); |
| 361 | mq->sg[0].length = 0; |
| 362 | |
| 363 | while (sg_len) { |
| 364 | mq->sg[0].length += mq->bounce_sg[sg_len - 1].length; |
| 365 | sg_len--; |
| 366 | } |
| 367 | |
| 368 | return 1; |
| 369 | } |
| 370 | |
| 371 | void mmc_queue_bounce_pre(struct mmc_queue *mq) |
| 372 | { |
| 373 | if (!mq->bounce_buf) |
| 374 | return; |
| 375 | |
| 376 | if (mq->bounce_sg_len == 1) |
| 377 | return; |
| 378 | if (rq_data_dir(mq->req) != WRITE) |
| 379 | return; |
| 380 | |
| 381 | copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len); |
| 382 | } |
| 383 | |
| 384 | void mmc_queue_bounce_post(struct mmc_queue *mq) |
| 385 | { |
| 386 | if (!mq->bounce_buf) |
| 387 | return; |
| 388 | |
| 389 | if (mq->bounce_sg_len == 1) |
| 390 | return; |
| 391 | if (rq_data_dir(mq->req) != READ) |
| 392 | return; |
| 393 | |
| 394 | copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1); |
| 395 | } |
| 396 | |