blob: a2f76adbfd34384c346e448bcffae7c327f6fc44 [file] [log] [blame]
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +01001/*
2 * linux/drivers/mmc/host/tmio_mmc_pio.c
3 *
4 * Copyright (C) 2011 Guennadi Liakhovetski
5 * Copyright (C) 2007 Ian Molton
6 * Copyright (C) 2004 Ian Molton
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Driver for the MMC / SD / SDIO IP found in:
13 *
14 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
15 *
16 * This driver draws mainly on scattered spec sheets, Reverse engineering
17 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
18 * support). (Further 4 bit support from a later datasheet).
19 *
20 * TODO:
21 * Investigate using a workqueue for PIO transfers
22 * Eliminate FIXMEs
23 * SDIO support
24 * Better Power management
25 * Handle MMC errors better
26 * double buffer support
27 *
28 */
29
30#include <linux/delay.h>
31#include <linux/device.h>
32#include <linux/highmem.h>
33#include <linux/interrupt.h>
34#include <linux/io.h>
35#include <linux/irq.h>
36#include <linux/mfd/tmio.h>
37#include <linux/mmc/host.h>
Simon Hormancba179a2011-03-24 09:48:36 +010038#include <linux/mmc/tmio.h>
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +010039#include <linux/module.h>
40#include <linux/pagemap.h>
41#include <linux/platform_device.h>
Guennadi Liakhovetskie6ee7182011-05-05 16:13:12 +000042#include <linux/pm_runtime.h>
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +010043#include <linux/scatterlist.h>
44#include <linux/workqueue.h>
45#include <linux/spinlock.h>
46
47#include "tmio_mmc.h"
48
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +010049void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
50{
51 u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ);
52 sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
53}
54
55void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
56{
57 u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ);
58 sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
59}
60
61static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
62{
63 sd_ctrl_write32(host, CTL_STATUS, ~i);
64}
65
66static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
67{
68 host->sg_len = data->sg_len;
69 host->sg_ptr = data->sg;
70 host->sg_orig = data->sg;
71 host->sg_off = 0;
72}
73
74static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
75{
76 host->sg_ptr = sg_next(host->sg_ptr);
77 host->sg_off = 0;
78 return --host->sg_len;
79}
80
81#ifdef CONFIG_MMC_DEBUG
82
83#define STATUS_TO_TEXT(a, status, i) \
84 do { \
85 if (status & TMIO_STAT_##a) { \
86 if (i++) \
87 printk(" | "); \
88 printk(#a); \
89 } \
90 } while (0)
91
92static void pr_debug_status(u32 status)
93{
94 int i = 0;
95 printk(KERN_DEBUG "status: %08x = ", status);
96 STATUS_TO_TEXT(CARD_REMOVE, status, i);
97 STATUS_TO_TEXT(CARD_INSERT, status, i);
98 STATUS_TO_TEXT(SIGSTATE, status, i);
99 STATUS_TO_TEXT(WRPROTECT, status, i);
100 STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
101 STATUS_TO_TEXT(CARD_INSERT_A, status, i);
102 STATUS_TO_TEXT(SIGSTATE_A, status, i);
103 STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
104 STATUS_TO_TEXT(STOPBIT_ERR, status, i);
105 STATUS_TO_TEXT(ILL_FUNC, status, i);
106 STATUS_TO_TEXT(CMD_BUSY, status, i);
107 STATUS_TO_TEXT(CMDRESPEND, status, i);
108 STATUS_TO_TEXT(DATAEND, status, i);
109 STATUS_TO_TEXT(CRCFAIL, status, i);
110 STATUS_TO_TEXT(DATATIMEOUT, status, i);
111 STATUS_TO_TEXT(CMDTIMEOUT, status, i);
112 STATUS_TO_TEXT(RXOVERFLOW, status, i);
113 STATUS_TO_TEXT(TXUNDERRUN, status, i);
114 STATUS_TO_TEXT(RXRDY, status, i);
115 STATUS_TO_TEXT(TXRQ, status, i);
116 STATUS_TO_TEXT(ILL_ACCESS, status, i);
117 printk("\n");
118}
119
120#else
121#define pr_debug_status(s) do { } while (0)
122#endif
123
124static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
125{
126 struct tmio_mmc_host *host = mmc_priv(mmc);
127
128 if (enable) {
129 host->sdio_irq_enabled = 1;
130 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
131 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
132 (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
133 } else {
134 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
135 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
136 host->sdio_irq_enabled = 0;
137 }
138}
139
140static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
141{
142 u32 clk = 0, clock;
143
144 if (new_clock) {
145 for (clock = host->mmc->f_min, clk = 0x80000080;
146 new_clock >= (clock<<1); clk >>= 1)
147 clock <<= 1;
148 clk |= 0x100;
149 }
150
151 if (host->set_clk_div)
152 host->set_clk_div(host->pdev, (clk>>22) & 1);
153
154 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
155}
156
157static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
158{
Guennadi Liakhovetski69d1fe12011-03-09 17:28:55 +0100159 struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100160
Guennadi Liakhovetski69d1fe12011-03-09 17:28:55 +0100161 /* implicit BUG_ON(!res) */
162 if (resource_size(res) > 0x100) {
163 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
164 msleep(10);
165 }
Guennadi Liakhovetskid9b03422011-03-10 18:43:07 +0100166
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100167 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
168 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
169 msleep(10);
170}
171
172static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
173{
Guennadi Liakhovetski69d1fe12011-03-09 17:28:55 +0100174 struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100175
176 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
177 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
178 msleep(10);
Guennadi Liakhovetskid9b03422011-03-10 18:43:07 +0100179
Guennadi Liakhovetski69d1fe12011-03-09 17:28:55 +0100180 /* implicit BUG_ON(!res) */
181 if (resource_size(res) > 0x100) {
182 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
183 msleep(10);
184 }
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100185}
186
187static void tmio_mmc_reset(struct tmio_mmc_host *host)
188{
Guennadi Liakhovetski69d1fe12011-03-09 17:28:55 +0100189 struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
190
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100191 /* FIXME - should we set stop clock reg here */
192 sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
Guennadi Liakhovetski69d1fe12011-03-09 17:28:55 +0100193 /* implicit BUG_ON(!res) */
194 if (resource_size(res) > 0x100)
195 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100196 msleep(10);
197 sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
Guennadi Liakhovetski69d1fe12011-03-09 17:28:55 +0100198 if (resource_size(res) > 0x100)
199 sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100200 msleep(10);
201}
202
203static void tmio_mmc_reset_work(struct work_struct *work)
204{
205 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
206 delayed_reset_work.work);
207 struct mmc_request *mrq;
208 unsigned long flags;
209
210 spin_lock_irqsave(&host->lock, flags);
211 mrq = host->mrq;
212
Guennadi Liakhovetskidf3ef2d2011-04-21 07:20:16 +0000213 /*
214 * is request already finished? Since we use a non-blocking
215 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
216 * us, so, have to check for IS_ERR(host->mrq)
217 */
218 if (IS_ERR_OR_NULL(mrq)
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100219 || time_is_after_jiffies(host->last_req_ts +
220 msecs_to_jiffies(2000))) {
221 spin_unlock_irqrestore(&host->lock, flags);
222 return;
223 }
224
225 dev_warn(&host->pdev->dev,
226 "timeout waiting for hardware interrupt (CMD%u)\n",
227 mrq->cmd->opcode);
228
229 if (host->data)
230 host->data->error = -ETIMEDOUT;
231 else if (host->cmd)
232 host->cmd->error = -ETIMEDOUT;
233 else
234 mrq->cmd->error = -ETIMEDOUT;
235
236 host->cmd = NULL;
237 host->data = NULL;
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100238 host->force_pio = false;
239
240 spin_unlock_irqrestore(&host->lock, flags);
241
242 tmio_mmc_reset(host);
243
Guennadi Liakhovetskidf3ef2d2011-04-21 07:20:16 +0000244 /* Ready for new calls */
245 host->mrq = NULL;
246
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100247 mmc_request_done(host->mmc, mrq);
248}
249
Guennadi Liakhovetskidf3ef2d2011-04-21 07:20:16 +0000250/* called with host->lock held, interrupts disabled */
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100251static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
252{
Guennadi Liakhovetskib9269fd2011-07-14 12:12:38 +0200253 struct mmc_request *mrq;
254 unsigned long flags;
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100255
Guennadi Liakhovetskib9269fd2011-07-14 12:12:38 +0200256 spin_lock_irqsave(&host->lock, flags);
257
258 mrq = host->mrq;
259 if (IS_ERR_OR_NULL(mrq)) {
260 spin_unlock_irqrestore(&host->lock, flags);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100261 return;
Guennadi Liakhovetskib9269fd2011-07-14 12:12:38 +0200262 }
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100263
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100264 host->cmd = NULL;
265 host->data = NULL;
266 host->force_pio = false;
267
268 cancel_delayed_work(&host->delayed_reset_work);
269
Guennadi Liakhovetskidf3ef2d2011-04-21 07:20:16 +0000270 host->mrq = NULL;
Guennadi Liakhovetskib9269fd2011-07-14 12:12:38 +0200271 spin_unlock_irqrestore(&host->lock, flags);
Guennadi Liakhovetskidf3ef2d2011-04-21 07:20:16 +0000272
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100273 mmc_request_done(host->mmc, mrq);
274}
275
Guennadi Liakhovetskib9269fd2011-07-14 12:12:38 +0200276static void tmio_mmc_done_work(struct work_struct *work)
277{
278 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
279 done);
280 tmio_mmc_finish_request(host);
281}
282
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100283/* These are the bitmasks the tmio chip requires to implement the MMC response
284 * types. Note that R1 and R6 are the same in this scheme. */
285#define APP_CMD 0x0040
286#define RESP_NONE 0x0300
287#define RESP_R1 0x0400
288#define RESP_R1B 0x0500
289#define RESP_R2 0x0600
290#define RESP_R3 0x0700
291#define DATA_PRESENT 0x0800
292#define TRANSFER_READ 0x1000
293#define TRANSFER_MULTI 0x2000
294#define SECURITY_CMD 0x4000
295
296static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
297{
298 struct mmc_data *data = host->data;
299 int c = cmd->opcode;
300
301 /* Command 12 is handled by hardware */
302 if (cmd->opcode == 12 && !cmd->arg) {
303 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
304 return 0;
305 }
306
307 switch (mmc_resp_type(cmd)) {
308 case MMC_RSP_NONE: c |= RESP_NONE; break;
309 case MMC_RSP_R1: c |= RESP_R1; break;
310 case MMC_RSP_R1B: c |= RESP_R1B; break;
311 case MMC_RSP_R2: c |= RESP_R2; break;
312 case MMC_RSP_R3: c |= RESP_R3; break;
313 default:
314 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
315 return -EINVAL;
316 }
317
318 host->cmd = cmd;
319
320/* FIXME - this seems to be ok commented out but the spec suggest this bit
321 * should be set when issuing app commands.
322 * if(cmd->flags & MMC_FLAG_ACMD)
323 * c |= APP_CMD;
324 */
325 if (data) {
326 c |= DATA_PRESENT;
327 if (data->blocks > 1) {
328 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
329 c |= TRANSFER_MULTI;
330 }
331 if (data->flags & MMC_DATA_READ)
332 c |= TRANSFER_READ;
333 }
334
335 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD);
336
337 /* Fire off the command */
338 sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
339 sd_ctrl_write16(host, CTL_SD_CMD, c);
340
341 return 0;
342}
343
344/*
345 * This chip always returns (at least?) as much data as you ask for.
346 * I'm unsure what happens if you ask for less than a block. This should be
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300347 * looked into to ensure that a funny length read doesn't hose the controller.
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100348 */
349static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
350{
351 struct mmc_data *data = host->data;
352 void *sg_virt;
353 unsigned short *buf;
354 unsigned int count;
355 unsigned long flags;
356
357 if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
358 pr_err("PIO IRQ in DMA mode!\n");
359 return;
360 } else if (!data) {
361 pr_debug("Spurious PIO IRQ\n");
362 return;
363 }
364
365 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
366 buf = (unsigned short *)(sg_virt + host->sg_off);
367
368 count = host->sg_ptr->length - host->sg_off;
369 if (count > data->blksz)
370 count = data->blksz;
371
372 pr_debug("count: %08x offset: %08x flags %08x\n",
373 count, host->sg_off, data->flags);
374
375 /* Transfer the data */
376 if (data->flags & MMC_DATA_READ)
377 sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
378 else
379 sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
380
381 host->sg_off += count;
382
383 tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
384
385 if (host->sg_off == host->sg_ptr->length)
386 tmio_mmc_next_sg(host);
387
388 return;
389}
390
391static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
392{
393 if (host->sg_ptr == &host->bounce_sg) {
394 unsigned long flags;
395 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
396 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
397 tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
398 }
399}
400
401/* needs to be called with host->lock held */
402void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
403{
404 struct mmc_data *data = host->data;
405 struct mmc_command *stop;
406
407 host->data = NULL;
408
409 if (!data) {
410 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
411 return;
412 }
413 stop = data->stop;
414
415 /* FIXME - return correct transfer count on errors */
416 if (!data->error)
417 data->bytes_xfered = data->blocks * data->blksz;
418 else
419 data->bytes_xfered = 0;
420
421 pr_debug("Completed data request\n");
422
423 /*
424 * FIXME: other drivers allow an optional stop command of any given type
425 * which we dont do, as the chip can auto generate them.
426 * Perhaps we can be smarter about when to use auto CMD12 and
427 * only issue the auto request when we know this is the desired
428 * stop command, allowing fallback to the stop command the
429 * upper layers expect. For now, we do what works.
430 */
431
432 if (data->flags & MMC_DATA_READ) {
433 if (host->chan_rx && !host->force_pio)
434 tmio_mmc_check_bounce_buffer(host);
435 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
436 host->mrq);
437 } else {
438 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
439 host->mrq);
440 }
441
442 if (stop) {
443 if (stop->opcode == 12 && !stop->arg)
444 sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
445 else
446 BUG();
447 }
448
Guennadi Liakhovetskib9269fd2011-07-14 12:12:38 +0200449 schedule_work(&host->done);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100450}
451
452static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
453{
454 struct mmc_data *data;
455 spin_lock(&host->lock);
456 data = host->data;
457
458 if (!data)
459 goto out;
460
461 if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
462 /*
463 * Has all data been written out yet? Testing on SuperH showed,
464 * that in most cases the first interrupt comes already with the
465 * BUSY status bit clear, but on some operations, like mount or
466 * in the beginning of a write / sync / umount, there is one
467 * DATAEND interrupt with the BUSY bit set, in this cases
468 * waiting for one more interrupt fixes the problem.
469 */
470 if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
471 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
472 tasklet_schedule(&host->dma_complete);
473 }
474 } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
475 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
476 tasklet_schedule(&host->dma_complete);
477 } else {
478 tmio_mmc_do_data_irq(host);
479 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
480 }
481out:
482 spin_unlock(&host->lock);
483}
484
485static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
486 unsigned int stat)
487{
488 struct mmc_command *cmd = host->cmd;
489 int i, addr;
490
491 spin_lock(&host->lock);
492
493 if (!host->cmd) {
494 pr_debug("Spurious CMD irq\n");
495 goto out;
496 }
497
498 host->cmd = NULL;
499
500 /* This controller is sicker than the PXA one. Not only do we need to
501 * drop the top 8 bits of the first response word, we also need to
502 * modify the order of the response for short response command types.
503 */
504
505 for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
506 cmd->resp[i] = sd_ctrl_read32(host, addr);
507
508 if (cmd->flags & MMC_RSP_136) {
509 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
510 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
511 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
512 cmd->resp[3] <<= 8;
513 } else if (cmd->flags & MMC_RSP_R3) {
514 cmd->resp[0] = cmd->resp[3];
515 }
516
517 if (stat & TMIO_STAT_CMDTIMEOUT)
518 cmd->error = -ETIMEDOUT;
519 else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
520 cmd->error = -EILSEQ;
521
522 /* If there is data to handle we enable data IRQs here, and
523 * we will ultimatley finish the request in the data_end handler.
524 * If theres no data or we encountered an error, finish now.
525 */
526 if (host->data && !cmd->error) {
527 if (host->data->flags & MMC_DATA_READ) {
528 if (host->force_pio || !host->chan_rx)
529 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
530 else
531 tasklet_schedule(&host->dma_issue);
532 } else {
533 if (host->force_pio || !host->chan_tx)
534 tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
535 else
536 tasklet_schedule(&host->dma_issue);
537 }
538 } else {
Guennadi Liakhovetskib9269fd2011-07-14 12:12:38 +0200539 schedule_work(&host->done);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100540 }
541
542out:
543 spin_unlock(&host->lock);
544}
545
Magnus Damm8e7bfdb2011-05-06 11:02:33 +0000546irqreturn_t tmio_mmc_irq(int irq, void *devid)
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100547{
548 struct tmio_mmc_host *host = devid;
549 struct tmio_mmc_data *pdata = host->pdata;
550 unsigned int ireg, irq_mask, status;
551 unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
552
553 pr_debug("MMC IRQ begin\n");
554
555 status = sd_ctrl_read32(host, CTL_STATUS);
556 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
557 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
558
559 sdio_ireg = 0;
560 if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
561 sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
562 sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
563 sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
564
565 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
566
567 if (sdio_ireg && !host->sdio_irq_enabled) {
568 pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
569 sdio_status, sdio_irq_mask, sdio_ireg);
570 tmio_mmc_enable_sdio_irq(host->mmc, 0);
571 goto out;
572 }
573
574 if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
575 sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
576 mmc_signal_sdio_irq(host->mmc);
577
578 if (sdio_ireg)
579 goto out;
580 }
581
582 pr_debug_status(status);
583 pr_debug_status(ireg);
584
Paul Parsonse312eb12011-05-15 13:24:41 +0000585 /* Card insert / remove attempts */
586 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
587 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
588 TMIO_STAT_CARD_REMOVE);
Guennadi Liakhovetskib9269fd2011-07-14 12:12:38 +0200589 if (!work_pending(&host->mmc->detect.work))
590 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100591 goto out;
592 }
593
Paul Parsonse312eb12011-05-15 13:24:41 +0000594 /* CRC and other errors */
595/* if (ireg & TMIO_STAT_ERR_IRQ)
596 * handled |= tmio_error_irq(host, irq, stat);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100597 */
598
Paul Parsonse312eb12011-05-15 13:24:41 +0000599 /* Command completion */
600 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
601 tmio_mmc_ack_mmc_irqs(host,
602 TMIO_STAT_CMDRESPEND |
603 TMIO_STAT_CMDTIMEOUT);
604 tmio_mmc_cmd_irq(host, status);
605 goto out;
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100606 }
Paul Parsonse312eb12011-05-15 13:24:41 +0000607
608 /* Data transfer */
609 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
610 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
611 tmio_mmc_pio_irq(host);
612 goto out;
613 }
614
615 /* Data transfer completion */
616 if (ireg & TMIO_STAT_DATAEND) {
617 tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
618 tmio_mmc_data_irq(host);
619 goto out;
620 }
621
622 pr_warning("tmio_mmc: Spurious irq, disabling! "
623 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
624 pr_debug_status(status);
625 tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100626
627out:
628 return IRQ_HANDLED;
629}
Magnus Damm8e7bfdb2011-05-06 11:02:33 +0000630EXPORT_SYMBOL(tmio_mmc_irq);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100631
632static int tmio_mmc_start_data(struct tmio_mmc_host *host,
633 struct mmc_data *data)
634{
635 struct tmio_mmc_data *pdata = host->pdata;
636
637 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
638 data->blksz, data->blocks);
639
640 /* Some hardware cannot perform 2 byte requests in 4 bit mode */
641 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
642 int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
643
644 if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
645 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
646 mmc_hostname(host->mmc), data->blksz);
647 return -EINVAL;
648 }
649 }
650
651 tmio_mmc_init_sg(host, data);
652 host->data = data;
653
654 /* Set transfer length / blocksize */
655 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
656 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
657
658 tmio_mmc_start_dma(host, data);
659
660 return 0;
661}
662
663/* Process requests from the MMC layer */
664static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
665{
666 struct tmio_mmc_host *host = mmc_priv(mmc);
Guennadi Liakhovetskidf3ef2d2011-04-21 07:20:16 +0000667 unsigned long flags;
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100668 int ret;
669
Guennadi Liakhovetskidf3ef2d2011-04-21 07:20:16 +0000670 spin_lock_irqsave(&host->lock, flags);
671
672 if (host->mrq) {
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100673 pr_debug("request not null\n");
Guennadi Liakhovetskidf3ef2d2011-04-21 07:20:16 +0000674 if (IS_ERR(host->mrq)) {
675 spin_unlock_irqrestore(&host->lock, flags);
676 mrq->cmd->error = -EAGAIN;
677 mmc_request_done(mmc, mrq);
678 return;
679 }
680 }
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100681
682 host->last_req_ts = jiffies;
683 wmb();
684 host->mrq = mrq;
685
Guennadi Liakhovetskidf3ef2d2011-04-21 07:20:16 +0000686 spin_unlock_irqrestore(&host->lock, flags);
687
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100688 if (mrq->data) {
689 ret = tmio_mmc_start_data(host, mrq->data);
690 if (ret)
691 goto fail;
692 }
693
694 ret = tmio_mmc_start_command(host, mrq->cmd);
695 if (!ret) {
696 schedule_delayed_work(&host->delayed_reset_work,
697 msecs_to_jiffies(2000));
698 return;
699 }
700
701fail:
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100702 host->force_pio = false;
Guennadi Liakhovetskidf3ef2d2011-04-21 07:20:16 +0000703 host->mrq = NULL;
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100704 mrq->cmd->error = ret;
705 mmc_request_done(mmc, mrq);
706}
707
708/* Set MMC clock / power.
709 * Note: This controller uses a simple divider scheme therefore it cannot
710 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
711 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
712 * slowest setting.
713 */
714static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
715{
716 struct tmio_mmc_host *host = mmc_priv(mmc);
Guennadi Liakhovetski7311bef2011-05-11 16:51:11 +0000717 struct tmio_mmc_data *pdata = host->pdata;
Guennadi Liakhovetskidf3ef2d2011-04-21 07:20:16 +0000718 unsigned long flags;
719
Guennadi Liakhovetskib9269fd2011-07-14 12:12:38 +0200720 mutex_lock(&host->ios_lock);
721
Guennadi Liakhovetskidf3ef2d2011-04-21 07:20:16 +0000722 spin_lock_irqsave(&host->lock, flags);
723 if (host->mrq) {
724 if (IS_ERR(host->mrq)) {
725 dev_dbg(&host->pdev->dev,
726 "%s.%d: concurrent .set_ios(), clk %u, mode %u\n",
727 current->comm, task_pid_nr(current),
728 ios->clock, ios->power_mode);
729 host->mrq = ERR_PTR(-EINTR);
730 } else {
731 dev_dbg(&host->pdev->dev,
732 "%s.%d: CMD%u active since %lu, now %lu!\n",
733 current->comm, task_pid_nr(current),
734 host->mrq->cmd->opcode, host->last_req_ts, jiffies);
735 }
736 spin_unlock_irqrestore(&host->lock, flags);
Guennadi Liakhovetskib9269fd2011-07-14 12:12:38 +0200737
738 mutex_unlock(&host->ios_lock);
Guennadi Liakhovetskidf3ef2d2011-04-21 07:20:16 +0000739 return;
740 }
741
742 host->mrq = ERR_PTR(-EBUSY);
743
744 spin_unlock_irqrestore(&host->lock, flags);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100745
746 if (ios->clock)
747 tmio_mmc_set_clock(host, ios->clock);
748
Guennadi Liakhovetskia7edbe32011-03-09 14:38:58 +0100749 /* Power sequence - OFF -> UP -> ON */
Guennadi Liakhovetskic919c2a2011-04-21 09:09:59 +0200750 if (ios->power_mode == MMC_POWER_UP) {
Guennadi Liakhovetski7311bef2011-05-11 16:51:11 +0000751 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && !pdata->power) {
752 pm_runtime_get_sync(&host->pdev->dev);
753 pdata->power = true;
754 }
Guennadi Liakhovetskic919c2a2011-04-21 09:09:59 +0200755 /* power up SD bus */
756 if (host->set_pwr)
757 host->set_pwr(host->pdev, 1);
758 } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
Guennadi Liakhovetski5fd01572011-03-09 14:45:44 +0100759 /* power down SD bus */
Guennadi Liakhovetski7311bef2011-05-11 16:51:11 +0000760 if (ios->power_mode == MMC_POWER_OFF) {
761 if (host->set_pwr)
762 host->set_pwr(host->pdev, 0);
763 if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
764 pdata->power) {
765 pdata->power = false;
766 pm_runtime_put(&host->pdev->dev);
767 }
768 }
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100769 tmio_mmc_clk_stop(host);
Guennadi Liakhovetski5fd01572011-03-09 14:45:44 +0100770 } else {
771 /* start bus clock */
772 tmio_mmc_clk_start(host);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100773 }
774
775 switch (ios->bus_width) {
776 case MMC_BUS_WIDTH_1:
777 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
778 break;
779 case MMC_BUS_WIDTH_4:
780 sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
781 break;
782 }
783
784 /* Let things settle. delay taken from winCE driver */
785 udelay(140);
Guennadi Liakhovetskidf3ef2d2011-04-21 07:20:16 +0000786 if (PTR_ERR(host->mrq) == -EINTR)
787 dev_dbg(&host->pdev->dev,
788 "%s.%d: IOS interrupted: clk %u, mode %u",
789 current->comm, task_pid_nr(current),
790 ios->clock, ios->power_mode);
791 host->mrq = NULL;
Guennadi Liakhovetskib9269fd2011-07-14 12:12:38 +0200792
793 mutex_unlock(&host->ios_lock);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100794}
795
796static int tmio_mmc_get_ro(struct mmc_host *mmc)
797{
798 struct tmio_mmc_host *host = mmc_priv(mmc);
799 struct tmio_mmc_data *pdata = host->pdata;
800
Guennadi Liakhovetski7d8b4c22011-06-20 16:51:10 +0200801 return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
802 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100803}
804
805static int tmio_mmc_get_cd(struct mmc_host *mmc)
806{
807 struct tmio_mmc_host *host = mmc_priv(mmc);
808 struct tmio_mmc_data *pdata = host->pdata;
809
810 if (!pdata->get_cd)
811 return -ENOSYS;
812 else
813 return pdata->get_cd(host->pdev);
814}
815
816static const struct mmc_host_ops tmio_mmc_ops = {
817 .request = tmio_mmc_request,
818 .set_ios = tmio_mmc_set_ios,
819 .get_ro = tmio_mmc_get_ro,
820 .get_cd = tmio_mmc_get_cd,
821 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
822};
823
824int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
825 struct platform_device *pdev,
826 struct tmio_mmc_data *pdata)
827{
828 struct tmio_mmc_host *_host;
829 struct mmc_host *mmc;
830 struct resource *res_ctl;
831 int ret;
832 u32 irq_mask = TMIO_MASK_CMD;
833
834 res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
835 if (!res_ctl)
836 return -EINVAL;
837
838 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
839 if (!mmc)
840 return -ENOMEM;
841
Guennadi Liakhovetski7311bef2011-05-11 16:51:11 +0000842 pdata->dev = &pdev->dev;
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100843 _host = mmc_priv(mmc);
844 _host->pdata = pdata;
845 _host->mmc = mmc;
846 _host->pdev = pdev;
847 platform_set_drvdata(pdev, mmc);
848
849 _host->set_pwr = pdata->set_pwr;
850 _host->set_clk_div = pdata->set_clk_div;
851
852 /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
853 _host->bus_shift = resource_size(res_ctl) >> 10;
854
855 _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
856 if (!_host->ctl) {
857 ret = -ENOMEM;
858 goto host_free;
859 }
860
861 mmc->ops = &tmio_mmc_ops;
862 mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
863 mmc->f_max = pdata->hclk;
864 mmc->f_min = mmc->f_max / 512;
865 mmc->max_segs = 32;
866 mmc->max_blk_size = 512;
867 mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
868 mmc->max_segs;
869 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
870 mmc->max_seg_size = mmc->max_req_size;
871 if (pdata->ocr_mask)
872 mmc->ocr_avail = pdata->ocr_mask;
873 else
874 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
875
Guennadi Liakhovetski7311bef2011-05-11 16:51:11 +0000876 pdata->power = false;
Guennadi Liakhovetskie6ee7182011-05-05 16:13:12 +0000877 pm_runtime_enable(&pdev->dev);
878 ret = pm_runtime_resume(&pdev->dev);
879 if (ret < 0)
880 goto pm_disable;
881
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100882 tmio_mmc_clk_stop(_host);
883 tmio_mmc_reset(_host);
884
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100885 tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
886 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
887 tmio_mmc_enable_sdio_irq(mmc, 0);
888
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100889 spin_lock_init(&_host->lock);
Guennadi Liakhovetskib9269fd2011-07-14 12:12:38 +0200890 mutex_init(&_host->ios_lock);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100891
892 /* Init delayed work for request timeouts */
893 INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
Guennadi Liakhovetskib9269fd2011-07-14 12:12:38 +0200894 INIT_WORK(&_host->done, tmio_mmc_done_work);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100895
896 /* See if we also get DMA */
897 tmio_mmc_request_dma(_host, pdata);
898
Guennadi Liakhovetskie6ee7182011-05-05 16:13:12 +0000899 /* We have to keep the device powered for its card detection to work */
Guennadi Liakhovetski7311bef2011-05-11 16:51:11 +0000900 if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD))
901 pm_runtime_get_noresume(&pdev->dev);
Guennadi Liakhovetskie6ee7182011-05-05 16:13:12 +0000902
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100903 mmc_add_host(mmc);
904
905 /* Unmask the IRQs we want to know about */
906 if (!_host->chan_rx)
907 irq_mask |= TMIO_MASK_READOP;
908 if (!_host->chan_tx)
909 irq_mask |= TMIO_MASK_WRITEOP;
910
911 tmio_mmc_enable_mmc_irqs(_host, irq_mask);
912
913 *host = _host;
914
915 return 0;
916
Guennadi Liakhovetskie6ee7182011-05-05 16:13:12 +0000917pm_disable:
918 pm_runtime_disable(&pdev->dev);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100919 iounmap(_host->ctl);
920host_free:
921 mmc_free_host(mmc);
922
923 return ret;
924}
925EXPORT_SYMBOL(tmio_mmc_host_probe);
926
927void tmio_mmc_host_remove(struct tmio_mmc_host *host)
928{
Guennadi Liakhovetskie6ee7182011-05-05 16:13:12 +0000929 struct platform_device *pdev = host->pdev;
930
Guennadi Liakhovetski7311bef2011-05-11 16:51:11 +0000931 /*
932 * We don't have to manipulate pdata->power here: if there is a card in
933 * the slot, the runtime PM is active and our .runtime_resume() will not
934 * be run. If there is no card in the slot and the platform can suspend
935 * the controller, the runtime PM is suspended and pdata->power == false,
936 * so, our .runtime_resume() will not try to detect a card in the slot.
937 */
938 if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD)
939 pm_runtime_get_sync(&pdev->dev);
940
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100941 mmc_remove_host(host->mmc);
Guennadi Liakhovetskib9269fd2011-07-14 12:12:38 +0200942 cancel_work_sync(&host->done);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100943 cancel_delayed_work_sync(&host->delayed_reset_work);
944 tmio_mmc_release_dma(host);
Guennadi Liakhovetskie6ee7182011-05-05 16:13:12 +0000945
Guennadi Liakhovetskie6ee7182011-05-05 16:13:12 +0000946 pm_runtime_put_sync(&pdev->dev);
947 pm_runtime_disable(&pdev->dev);
Guennadi Liakhovetski7311bef2011-05-11 16:51:11 +0000948
949 iounmap(host->ctl);
950 mmc_free_host(host->mmc);
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +0100951}
952EXPORT_SYMBOL(tmio_mmc_host_remove);
953
Guennadi Liakhovetskie6ee7182011-05-05 16:13:12 +0000954#ifdef CONFIG_PM
955int tmio_mmc_host_suspend(struct device *dev)
956{
957 struct mmc_host *mmc = dev_get_drvdata(dev);
958 struct tmio_mmc_host *host = mmc_priv(mmc);
959 int ret = mmc_suspend_host(mmc);
960
961 if (!ret)
962 tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
963
964 host->pm_error = pm_runtime_put_sync(dev);
965
966 return ret;
967}
968EXPORT_SYMBOL(tmio_mmc_host_suspend);
969
970int tmio_mmc_host_resume(struct device *dev)
971{
972 struct mmc_host *mmc = dev_get_drvdata(dev);
973 struct tmio_mmc_host *host = mmc_priv(mmc);
974
Guennadi Liakhovetski7311bef2011-05-11 16:51:11 +0000975 /* The MMC core will perform the complete set up */
976 host->pdata->power = false;
977
Guennadi Liakhovetskie6ee7182011-05-05 16:13:12 +0000978 if (!host->pm_error)
979 pm_runtime_get_sync(dev);
980
981 tmio_mmc_reset(mmc_priv(mmc));
982 tmio_mmc_request_dma(host, host->pdata);
983
984 return mmc_resume_host(mmc);
985}
986EXPORT_SYMBOL(tmio_mmc_host_resume);
987
988#endif /* CONFIG_PM */
989
Guennadi Liakhovetski7311bef2011-05-11 16:51:11 +0000990int tmio_mmc_host_runtime_suspend(struct device *dev)
991{
992 return 0;
993}
994EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend);
995
996int tmio_mmc_host_runtime_resume(struct device *dev)
997{
998 struct mmc_host *mmc = dev_get_drvdata(dev);
999 struct tmio_mmc_host *host = mmc_priv(mmc);
1000 struct tmio_mmc_data *pdata = host->pdata;
1001
1002 tmio_mmc_reset(host);
1003
1004 if (pdata->power) {
1005 /* Only entered after a card-insert interrupt */
1006 tmio_mmc_set_ios(mmc, &mmc->ios);
1007 mmc_detect_change(mmc, msecs_to_jiffies(100));
1008 }
1009
1010 return 0;
1011}
1012EXPORT_SYMBOL(tmio_mmc_host_runtime_resume);
1013
Guennadi Liakhovetskib6147492011-03-23 12:42:44 +01001014MODULE_LICENSE("GPL v2");