Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1 | /* |
| 2 | * linux/drivers/mmc/host/tmio_mmc_pio.c |
| 3 | * |
Wolfram Sang | bf96208 | 2016-01-19 12:32:58 +0100 | [diff] [blame] | 4 | * Copyright (C) 2016 Sang Engineering, Wolfram Sang |
| 5 | * Copyright (C) 2015-16 Renesas Electronics Corporation |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 6 | * Copyright (C) 2011 Guennadi Liakhovetski |
| 7 | * Copyright (C) 2007 Ian Molton |
| 8 | * Copyright (C) 2004 Ian Molton |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of the GNU General Public License version 2 as |
| 12 | * published by the Free Software Foundation. |
| 13 | * |
| 14 | * Driver for the MMC / SD / SDIO IP found in: |
| 15 | * |
| 16 | * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs |
| 17 | * |
| 18 | * This driver draws mainly on scattered spec sheets, Reverse engineering |
| 19 | * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit |
| 20 | * support). (Further 4 bit support from a later datasheet). |
| 21 | * |
| 22 | * TODO: |
| 23 | * Investigate using a workqueue for PIO transfers |
| 24 | * Eliminate FIXMEs |
| 25 | * SDIO support |
| 26 | * Better Power management |
| 27 | * Handle MMC errors better |
| 28 | * double buffer support |
| 29 | * |
| 30 | */ |
| 31 | |
| 32 | #include <linux/delay.h> |
| 33 | #include <linux/device.h> |
| 34 | #include <linux/highmem.h> |
| 35 | #include <linux/interrupt.h> |
| 36 | #include <linux/io.h> |
| 37 | #include <linux/irq.h> |
| 38 | #include <linux/mfd/tmio.h> |
| 39 | #include <linux/mmc/host.h> |
Guennadi Liakhovetski | 0f506a9 | 2012-06-20 19:10:35 +0200 | [diff] [blame] | 40 | #include <linux/mmc/mmc.h> |
Guennadi Liakhovetski | fd0ea65 | 2012-04-30 23:31:57 +0200 | [diff] [blame] | 41 | #include <linux/mmc/slot-gpio.h> |
Simon Horman | cba179a | 2011-03-24 09:48:36 +0100 | [diff] [blame] | 42 | #include <linux/mmc/tmio.h> |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 43 | #include <linux/module.h> |
| 44 | #include <linux/pagemap.h> |
| 45 | #include <linux/platform_device.h> |
Rafael J. Wysocki | c419e61 | 2012-03-13 01:01:51 +0100 | [diff] [blame] | 46 | #include <linux/pm_qos.h> |
Guennadi Liakhovetski | e6ee718 | 2011-05-05 16:13:12 +0000 | [diff] [blame] | 47 | #include <linux/pm_runtime.h> |
Guennadi Liakhovetski | 619b08d | 2013-02-15 16:14:00 +0100 | [diff] [blame] | 48 | #include <linux/regulator/consumer.h> |
Shinobu Uehara | b8d1196 | 2014-08-24 20:00:25 -0700 | [diff] [blame] | 49 | #include <linux/mmc/sdio.h> |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 50 | #include <linux/scatterlist.h> |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 51 | #include <linux/spinlock.h> |
Guennadi Liakhovetski | e3de2be | 2012-01-06 13:06:51 +0100 | [diff] [blame] | 52 | #include <linux/workqueue.h> |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 53 | |
| 54 | #include "tmio_mmc.h" |
| 55 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 56 | void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) |
| 57 | { |
Simon Horman | 54680fe | 2011-08-25 10:27:25 +0900 | [diff] [blame] | 58 | host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ); |
| 59 | sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 60 | } |
| 61 | |
| 62 | void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) |
| 63 | { |
Simon Horman | 54680fe | 2011-08-25 10:27:25 +0900 | [diff] [blame] | 64 | host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ); |
| 65 | sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 66 | } |
| 67 | |
| 68 | static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) |
| 69 | { |
| 70 | sd_ctrl_write32(host, CTL_STATUS, ~i); |
| 71 | } |
| 72 | |
| 73 | static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) |
| 74 | { |
| 75 | host->sg_len = data->sg_len; |
| 76 | host->sg_ptr = data->sg; |
| 77 | host->sg_orig = data->sg; |
| 78 | host->sg_off = 0; |
| 79 | } |
| 80 | |
| 81 | static int tmio_mmc_next_sg(struct tmio_mmc_host *host) |
| 82 | { |
| 83 | host->sg_ptr = sg_next(host->sg_ptr); |
| 84 | host->sg_off = 0; |
| 85 | return --host->sg_len; |
| 86 | } |
| 87 | |
Takeshi Kihara | 0df9d2e | 2015-07-20 01:39:59 +0900 | [diff] [blame] | 88 | #define CMDREQ_TIMEOUT 5000 |
| 89 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 90 | #ifdef CONFIG_MMC_DEBUG |
| 91 | |
| 92 | #define STATUS_TO_TEXT(a, status, i) \ |
| 93 | do { \ |
| 94 | if (status & TMIO_STAT_##a) { \ |
| 95 | if (i++) \ |
| 96 | printk(" | "); \ |
| 97 | printk(#a); \ |
| 98 | } \ |
| 99 | } while (0) |
| 100 | |
| 101 | static void pr_debug_status(u32 status) |
| 102 | { |
| 103 | int i = 0; |
Girish K S | a3c76eb | 2011-10-11 11:44:09 +0530 | [diff] [blame] | 104 | pr_debug("status: %08x = ", status); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 105 | STATUS_TO_TEXT(CARD_REMOVE, status, i); |
| 106 | STATUS_TO_TEXT(CARD_INSERT, status, i); |
| 107 | STATUS_TO_TEXT(SIGSTATE, status, i); |
| 108 | STATUS_TO_TEXT(WRPROTECT, status, i); |
| 109 | STATUS_TO_TEXT(CARD_REMOVE_A, status, i); |
| 110 | STATUS_TO_TEXT(CARD_INSERT_A, status, i); |
| 111 | STATUS_TO_TEXT(SIGSTATE_A, status, i); |
| 112 | STATUS_TO_TEXT(CMD_IDX_ERR, status, i); |
| 113 | STATUS_TO_TEXT(STOPBIT_ERR, status, i); |
| 114 | STATUS_TO_TEXT(ILL_FUNC, status, i); |
| 115 | STATUS_TO_TEXT(CMD_BUSY, status, i); |
| 116 | STATUS_TO_TEXT(CMDRESPEND, status, i); |
| 117 | STATUS_TO_TEXT(DATAEND, status, i); |
| 118 | STATUS_TO_TEXT(CRCFAIL, status, i); |
| 119 | STATUS_TO_TEXT(DATATIMEOUT, status, i); |
| 120 | STATUS_TO_TEXT(CMDTIMEOUT, status, i); |
| 121 | STATUS_TO_TEXT(RXOVERFLOW, status, i); |
| 122 | STATUS_TO_TEXT(TXUNDERRUN, status, i); |
| 123 | STATUS_TO_TEXT(RXRDY, status, i); |
| 124 | STATUS_TO_TEXT(TXRQ, status, i); |
| 125 | STATUS_TO_TEXT(ILL_ACCESS, status, i); |
| 126 | printk("\n"); |
| 127 | } |
| 128 | |
| 129 | #else |
| 130 | #define pr_debug_status(s) do { } while (0) |
| 131 | #endif |
| 132 | |
| 133 | static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) |
| 134 | { |
| 135 | struct tmio_mmc_host *host = mmc_priv(mmc); |
| 136 | |
Ulf Hansson | 7501c43 | 2013-10-24 15:58:45 +0200 | [diff] [blame] | 137 | if (enable && !host->sdio_irq_enabled) { |
| 138 | /* Keep device active while SDIO irq is enabled */ |
| 139 | pm_runtime_get_sync(mmc_dev(mmc)); |
| 140 | host->sdio_irq_enabled = true; |
| 141 | |
Simon Horman | 54680fe | 2011-08-25 10:27:25 +0900 | [diff] [blame] | 142 | host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & |
| 143 | ~TMIO_SDIO_STAT_IOIRQ; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 144 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); |
Simon Horman | 54680fe | 2011-08-25 10:27:25 +0900 | [diff] [blame] | 145 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); |
Ulf Hansson | 7501c43 | 2013-10-24 15:58:45 +0200 | [diff] [blame] | 146 | } else if (!enable && host->sdio_irq_enabled) { |
Simon Horman | 54680fe | 2011-08-25 10:27:25 +0900 | [diff] [blame] | 147 | host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; |
| 148 | sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 149 | sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); |
Ulf Hansson | 7501c43 | 2013-10-24 15:58:45 +0200 | [diff] [blame] | 150 | |
| 151 | host->sdio_irq_enabled = false; |
Ulf Hansson | 0369483 | 2013-10-24 16:42:33 +0200 | [diff] [blame] | 152 | pm_runtime_mark_last_busy(mmc_dev(mmc)); |
| 153 | pm_runtime_put_autosuspend(mmc_dev(mmc)); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 154 | } |
| 155 | } |
| 156 | |
Ulf Hansson | ae12d25 | 2013-10-30 00:16:17 +0100 | [diff] [blame] | 157 | static void tmio_mmc_set_clock(struct tmio_mmc_host *host, |
| 158 | unsigned int new_clock) |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 159 | { |
| 160 | u32 clk = 0, clock; |
| 161 | |
| 162 | if (new_clock) { |
| 163 | for (clock = host->mmc->f_min, clk = 0x80000080; |
Wolfram Sang | bf96208 | 2016-01-19 12:32:58 +0100 | [diff] [blame] | 164 | new_clock >= (clock << 1); |
| 165 | clk >>= 1) |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 166 | clock <<= 1; |
Shinobu Uehara | da29fe2 | 2014-08-24 20:03:00 -0700 | [diff] [blame] | 167 | |
| 168 | /* 1/1 clock is option */ |
| 169 | if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && |
Wolfram Sang | bf96208 | 2016-01-19 12:32:58 +0100 | [diff] [blame] | 170 | ((clk >> 22) & 0x1)) |
Shinobu Uehara | da29fe2 | 2014-08-24 20:03:00 -0700 | [diff] [blame] | 171 | clk |= 0xff; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 172 | } |
| 173 | |
| 174 | if (host->set_clk_div) |
Wolfram Sang | bf96208 | 2016-01-19 12:32:58 +0100 | [diff] [blame] | 175 | host->set_clk_div(host->pdev, (clk >> 22) & 1); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 176 | |
Wolfram Sang | 14d5828 | 2016-01-19 12:32:58 +0100 | [diff] [blame^] | 177 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & |
| 178 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); |
Wolfram Sang | bf96208 | 2016-01-19 12:32:58 +0100 | [diff] [blame] | 179 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK); |
Wolfram Sang | 04e24b8 | 2016-01-19 12:28:31 +0100 | [diff] [blame] | 180 | if (!(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG)) |
| 181 | msleep(10); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 182 | } |
| 183 | |
| 184 | static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) |
| 185 | { |
Kuninori Morimoto | 5d60e50 | 2013-11-20 00:31:06 -0800 | [diff] [blame] | 186 | if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { |
Guennadi Liakhovetski | 69d1fe1 | 2011-03-09 17:28:55 +0100 | [diff] [blame] | 187 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); |
| 188 | msleep(10); |
| 189 | } |
Guennadi Liakhovetski | d9b0342 | 2011-03-10 18:43:07 +0100 | [diff] [blame] | 190 | |
Wolfram Sang | bf96208 | 2016-01-19 12:32:58 +0100 | [diff] [blame] | 191 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 192 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); |
Wolfram Sang | 04e24b8 | 2016-01-19 12:28:31 +0100 | [diff] [blame] | 193 | msleep(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG ? 5 : 10); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 194 | } |
| 195 | |
| 196 | static void tmio_mmc_clk_start(struct tmio_mmc_host *host) |
| 197 | { |
Wolfram Sang | bf96208 | 2016-01-19 12:32:58 +0100 | [diff] [blame] | 198 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 199 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); |
Wolfram Sang | 04e24b8 | 2016-01-19 12:28:31 +0100 | [diff] [blame] | 200 | msleep(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG ? 1 : 10); |
Guennadi Liakhovetski | d9b0342 | 2011-03-10 18:43:07 +0100 | [diff] [blame] | 201 | |
Kuninori Morimoto | 5d60e50 | 2013-11-20 00:31:06 -0800 | [diff] [blame] | 202 | if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { |
Guennadi Liakhovetski | 69d1fe1 | 2011-03-09 17:28:55 +0100 | [diff] [blame] | 203 | sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); |
| 204 | msleep(10); |
| 205 | } |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 206 | } |
| 207 | |
| 208 | static void tmio_mmc_reset(struct tmio_mmc_host *host) |
| 209 | { |
| 210 | /* FIXME - should we set stop clock reg here */ |
| 211 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); |
Kuninori Morimoto | 5d60e50 | 2013-11-20 00:31:06 -0800 | [diff] [blame] | 212 | if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) |
Guennadi Liakhovetski | 69d1fe1 | 2011-03-09 17:28:55 +0100 | [diff] [blame] | 213 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 214 | msleep(10); |
| 215 | sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); |
Kuninori Morimoto | 5d60e50 | 2013-11-20 00:31:06 -0800 | [diff] [blame] | 216 | if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) |
Guennadi Liakhovetski | 69d1fe1 | 2011-03-09 17:28:55 +0100 | [diff] [blame] | 217 | sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 218 | msleep(10); |
| 219 | } |
| 220 | |
| 221 | static void tmio_mmc_reset_work(struct work_struct *work) |
| 222 | { |
| 223 | struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, |
| 224 | delayed_reset_work.work); |
| 225 | struct mmc_request *mrq; |
| 226 | unsigned long flags; |
| 227 | |
| 228 | spin_lock_irqsave(&host->lock, flags); |
| 229 | mrq = host->mrq; |
| 230 | |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 231 | /* |
| 232 | * is request already finished? Since we use a non-blocking |
| 233 | * cancel_delayed_work(), it can happen, that a .set_ios() call preempts |
| 234 | * us, so, have to check for IS_ERR(host->mrq) |
| 235 | */ |
| 236 | if (IS_ERR_OR_NULL(mrq) |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 237 | || time_is_after_jiffies(host->last_req_ts + |
Takeshi Kihara | 0df9d2e | 2015-07-20 01:39:59 +0900 | [diff] [blame] | 238 | msecs_to_jiffies(CMDREQ_TIMEOUT))) { |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 239 | spin_unlock_irqrestore(&host->lock, flags); |
| 240 | return; |
| 241 | } |
| 242 | |
| 243 | dev_warn(&host->pdev->dev, |
| 244 | "timeout waiting for hardware interrupt (CMD%u)\n", |
| 245 | mrq->cmd->opcode); |
| 246 | |
| 247 | if (host->data) |
| 248 | host->data->error = -ETIMEDOUT; |
| 249 | else if (host->cmd) |
| 250 | host->cmd->error = -ETIMEDOUT; |
| 251 | else |
| 252 | mrq->cmd->error = -ETIMEDOUT; |
| 253 | |
| 254 | host->cmd = NULL; |
| 255 | host->data = NULL; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 256 | host->force_pio = false; |
| 257 | |
| 258 | spin_unlock_irqrestore(&host->lock, flags); |
| 259 | |
| 260 | tmio_mmc_reset(host); |
| 261 | |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 262 | /* Ready for new calls */ |
| 263 | host->mrq = NULL; |
| 264 | |
Guennadi Liakhovetski | e3de2be | 2012-01-06 13:06:51 +0100 | [diff] [blame] | 265 | tmio_mmc_abort_dma(host); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 266 | mmc_request_done(host->mmc, mrq); |
Ulf Hansson | 0369483 | 2013-10-24 16:42:33 +0200 | [diff] [blame] | 267 | |
| 268 | pm_runtime_mark_last_busy(mmc_dev(host->mmc)); |
| 269 | pm_runtime_put_autosuspend(mmc_dev(host->mmc)); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 270 | } |
| 271 | |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 272 | /* called with host->lock held, interrupts disabled */ |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 273 | static void tmio_mmc_finish_request(struct tmio_mmc_host *host) |
| 274 | { |
Guennadi Liakhovetski | b9269fd | 2011-07-14 12:12:38 +0200 | [diff] [blame] | 275 | struct mmc_request *mrq; |
| 276 | unsigned long flags; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 277 | |
Guennadi Liakhovetski | b9269fd | 2011-07-14 12:12:38 +0200 | [diff] [blame] | 278 | spin_lock_irqsave(&host->lock, flags); |
| 279 | |
| 280 | mrq = host->mrq; |
| 281 | if (IS_ERR_OR_NULL(mrq)) { |
| 282 | spin_unlock_irqrestore(&host->lock, flags); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 283 | return; |
Guennadi Liakhovetski | b9269fd | 2011-07-14 12:12:38 +0200 | [diff] [blame] | 284 | } |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 285 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 286 | host->cmd = NULL; |
| 287 | host->data = NULL; |
| 288 | host->force_pio = false; |
| 289 | |
| 290 | cancel_delayed_work(&host->delayed_reset_work); |
| 291 | |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 292 | host->mrq = NULL; |
Guennadi Liakhovetski | b9269fd | 2011-07-14 12:12:38 +0200 | [diff] [blame] | 293 | spin_unlock_irqrestore(&host->lock, flags); |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 294 | |
Guennadi Liakhovetski | e3de2be | 2012-01-06 13:06:51 +0100 | [diff] [blame] | 295 | if (mrq->cmd->error || (mrq->data && mrq->data->error)) |
| 296 | tmio_mmc_abort_dma(host); |
| 297 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 298 | mmc_request_done(host->mmc, mrq); |
Ulf Hansson | 0369483 | 2013-10-24 16:42:33 +0200 | [diff] [blame] | 299 | |
| 300 | pm_runtime_mark_last_busy(mmc_dev(host->mmc)); |
| 301 | pm_runtime_put_autosuspend(mmc_dev(host->mmc)); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 302 | } |
| 303 | |
Guennadi Liakhovetski | b9269fd | 2011-07-14 12:12:38 +0200 | [diff] [blame] | 304 | static void tmio_mmc_done_work(struct work_struct *work) |
| 305 | { |
| 306 | struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, |
| 307 | done); |
| 308 | tmio_mmc_finish_request(host); |
| 309 | } |
| 310 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 311 | /* These are the bitmasks the tmio chip requires to implement the MMC response |
| 312 | * types. Note that R1 and R6 are the same in this scheme. */ |
| 313 | #define APP_CMD 0x0040 |
| 314 | #define RESP_NONE 0x0300 |
| 315 | #define RESP_R1 0x0400 |
| 316 | #define RESP_R1B 0x0500 |
| 317 | #define RESP_R2 0x0600 |
| 318 | #define RESP_R3 0x0700 |
| 319 | #define DATA_PRESENT 0x0800 |
| 320 | #define TRANSFER_READ 0x1000 |
| 321 | #define TRANSFER_MULTI 0x2000 |
| 322 | #define SECURITY_CMD 0x4000 |
Shinobu Uehara | b8d1196 | 2014-08-24 20:00:25 -0700 | [diff] [blame] | 323 | #define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */ |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 324 | |
| 325 | static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) |
| 326 | { |
| 327 | struct mmc_data *data = host->data; |
| 328 | int c = cmd->opcode; |
Guennadi Liakhovetski | e23cd53 | 2012-02-22 13:16:09 +0100 | [diff] [blame] | 329 | u32 irq_mask = TMIO_MASK_CMD; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 330 | |
Guennadi Liakhovetski | 0f506a9 | 2012-06-20 19:10:35 +0200 | [diff] [blame] | 331 | /* CMD12 is handled by hardware */ |
| 332 | if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) { |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 333 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); |
| 334 | return 0; |
| 335 | } |
| 336 | |
| 337 | switch (mmc_resp_type(cmd)) { |
| 338 | case MMC_RSP_NONE: c |= RESP_NONE; break; |
| 339 | case MMC_RSP_R1: c |= RESP_R1; break; |
| 340 | case MMC_RSP_R1B: c |= RESP_R1B; break; |
| 341 | case MMC_RSP_R2: c |= RESP_R2; break; |
| 342 | case MMC_RSP_R3: c |= RESP_R3; break; |
| 343 | default: |
| 344 | pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); |
| 345 | return -EINVAL; |
| 346 | } |
| 347 | |
| 348 | host->cmd = cmd; |
| 349 | |
| 350 | /* FIXME - this seems to be ok commented out but the spec suggest this bit |
| 351 | * should be set when issuing app commands. |
| 352 | * if(cmd->flags & MMC_FLAG_ACMD) |
| 353 | * c |= APP_CMD; |
| 354 | */ |
| 355 | if (data) { |
| 356 | c |= DATA_PRESENT; |
| 357 | if (data->blocks > 1) { |
| 358 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); |
| 359 | c |= TRANSFER_MULTI; |
Shinobu Uehara | b8d1196 | 2014-08-24 20:00:25 -0700 | [diff] [blame] | 360 | |
| 361 | /* |
| 362 | * Disable auto CMD12 at IO_RW_EXTENDED when |
| 363 | * multiple block transfer |
| 364 | */ |
| 365 | if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) && |
| 366 | (cmd->opcode == SD_IO_RW_EXTENDED)) |
| 367 | c |= NO_CMD12_ISSUE; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 368 | } |
| 369 | if (data->flags & MMC_DATA_READ) |
| 370 | c |= TRANSFER_READ; |
| 371 | } |
| 372 | |
Guennadi Liakhovetski | e23cd53 | 2012-02-22 13:16:09 +0100 | [diff] [blame] | 373 | if (!host->native_hotplug) |
| 374 | irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); |
| 375 | tmio_mmc_enable_mmc_irqs(host, irq_mask); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 376 | |
| 377 | /* Fire off the command */ |
| 378 | sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); |
| 379 | sd_ctrl_write16(host, CTL_SD_CMD, c); |
| 380 | |
| 381 | return 0; |
| 382 | } |
| 383 | |
Kuninori Morimoto | b9bd7ff | 2014-09-10 00:23:24 -0700 | [diff] [blame] | 384 | static void tmio_mmc_transfer_data(struct tmio_mmc_host *host, |
| 385 | unsigned short *buf, |
| 386 | unsigned int count) |
| 387 | { |
| 388 | int is_read = host->data->flags & MMC_DATA_READ; |
| 389 | u8 *buf8; |
| 390 | |
| 391 | /* |
| 392 | * Transfer the data |
| 393 | */ |
| 394 | if (is_read) |
| 395 | sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); |
| 396 | else |
| 397 | sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); |
| 398 | |
| 399 | /* if count was even number */ |
| 400 | if (!(count & 0x1)) |
| 401 | return; |
| 402 | |
| 403 | /* if count was odd number */ |
| 404 | buf8 = (u8 *)(buf + (count >> 1)); |
| 405 | |
| 406 | /* |
| 407 | * FIXME |
| 408 | * |
| 409 | * driver and this function are assuming that |
| 410 | * it is used as little endian |
| 411 | */ |
| 412 | if (is_read) |
| 413 | *buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff; |
| 414 | else |
| 415 | sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8); |
| 416 | } |
| 417 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 418 | /* |
| 419 | * This chip always returns (at least?) as much data as you ask for. |
| 420 | * I'm unsure what happens if you ask for less than a block. This should be |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 421 | * looked into to ensure that a funny length read doesn't hose the controller. |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 422 | */ |
| 423 | static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) |
| 424 | { |
| 425 | struct mmc_data *data = host->data; |
| 426 | void *sg_virt; |
| 427 | unsigned short *buf; |
| 428 | unsigned int count; |
| 429 | unsigned long flags; |
| 430 | |
| 431 | if ((host->chan_tx || host->chan_rx) && !host->force_pio) { |
| 432 | pr_err("PIO IRQ in DMA mode!\n"); |
| 433 | return; |
| 434 | } else if (!data) { |
| 435 | pr_debug("Spurious PIO IRQ\n"); |
| 436 | return; |
| 437 | } |
| 438 | |
| 439 | sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); |
| 440 | buf = (unsigned short *)(sg_virt + host->sg_off); |
| 441 | |
| 442 | count = host->sg_ptr->length - host->sg_off; |
| 443 | if (count > data->blksz) |
| 444 | count = data->blksz; |
| 445 | |
| 446 | pr_debug("count: %08x offset: %08x flags %08x\n", |
| 447 | count, host->sg_off, data->flags); |
| 448 | |
| 449 | /* Transfer the data */ |
Kuninori Morimoto | b9bd7ff | 2014-09-10 00:23:24 -0700 | [diff] [blame] | 450 | tmio_mmc_transfer_data(host, buf, count); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 451 | |
| 452 | host->sg_off += count; |
| 453 | |
| 454 | tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); |
| 455 | |
| 456 | if (host->sg_off == host->sg_ptr->length) |
| 457 | tmio_mmc_next_sg(host); |
| 458 | |
| 459 | return; |
| 460 | } |
| 461 | |
| 462 | static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) |
| 463 | { |
| 464 | if (host->sg_ptr == &host->bounce_sg) { |
| 465 | unsigned long flags; |
| 466 | void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); |
| 467 | memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); |
| 468 | tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); |
| 469 | } |
| 470 | } |
| 471 | |
| 472 | /* needs to be called with host->lock held */ |
| 473 | void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) |
| 474 | { |
| 475 | struct mmc_data *data = host->data; |
| 476 | struct mmc_command *stop; |
| 477 | |
| 478 | host->data = NULL; |
| 479 | |
| 480 | if (!data) { |
| 481 | dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); |
| 482 | return; |
| 483 | } |
| 484 | stop = data->stop; |
| 485 | |
| 486 | /* FIXME - return correct transfer count on errors */ |
| 487 | if (!data->error) |
| 488 | data->bytes_xfered = data->blocks * data->blksz; |
| 489 | else |
| 490 | data->bytes_xfered = 0; |
| 491 | |
| 492 | pr_debug("Completed data request\n"); |
| 493 | |
| 494 | /* |
| 495 | * FIXME: other drivers allow an optional stop command of any given type |
| 496 | * which we dont do, as the chip can auto generate them. |
| 497 | * Perhaps we can be smarter about when to use auto CMD12 and |
| 498 | * only issue the auto request when we know this is the desired |
| 499 | * stop command, allowing fallback to the stop command the |
| 500 | * upper layers expect. For now, we do what works. |
| 501 | */ |
| 502 | |
| 503 | if (data->flags & MMC_DATA_READ) { |
| 504 | if (host->chan_rx && !host->force_pio) |
| 505 | tmio_mmc_check_bounce_buffer(host); |
| 506 | dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", |
| 507 | host->mrq); |
| 508 | } else { |
| 509 | dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", |
| 510 | host->mrq); |
| 511 | } |
| 512 | |
| 513 | if (stop) { |
Guennadi Liakhovetski | 0f506a9 | 2012-06-20 19:10:35 +0200 | [diff] [blame] | 514 | if (stop->opcode == MMC_STOP_TRANSMISSION && !stop->arg) |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 515 | sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); |
| 516 | else |
| 517 | BUG(); |
| 518 | } |
| 519 | |
Guennadi Liakhovetski | b9269fd | 2011-07-14 12:12:38 +0200 | [diff] [blame] | 520 | schedule_work(&host->done); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 521 | } |
| 522 | |
| 523 | static void tmio_mmc_data_irq(struct tmio_mmc_host *host) |
| 524 | { |
| 525 | struct mmc_data *data; |
| 526 | spin_lock(&host->lock); |
| 527 | data = host->data; |
| 528 | |
| 529 | if (!data) |
| 530 | goto out; |
| 531 | |
| 532 | if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { |
Shinobu Uehara | 81e888d | 2014-08-24 20:01:32 -0700 | [diff] [blame] | 533 | u32 status = sd_ctrl_read32(host, CTL_STATUS); |
| 534 | bool done = false; |
| 535 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 536 | /* |
| 537 | * Has all data been written out yet? Testing on SuperH showed, |
| 538 | * that in most cases the first interrupt comes already with the |
| 539 | * BUSY status bit clear, but on some operations, like mount or |
| 540 | * in the beginning of a write / sync / umount, there is one |
| 541 | * DATAEND interrupt with the BUSY bit set, in this cases |
| 542 | * waiting for one more interrupt fixes the problem. |
| 543 | */ |
Shinobu Uehara | 81e888d | 2014-08-24 20:01:32 -0700 | [diff] [blame] | 544 | if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) { |
| 545 | if (status & TMIO_STAT_ILL_FUNC) |
| 546 | done = true; |
| 547 | } else { |
| 548 | if (!(status & TMIO_STAT_CMD_BUSY)) |
| 549 | done = true; |
| 550 | } |
| 551 | |
| 552 | if (done) { |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 553 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); |
| 554 | tasklet_schedule(&host->dma_complete); |
| 555 | } |
| 556 | } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { |
| 557 | tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); |
| 558 | tasklet_schedule(&host->dma_complete); |
| 559 | } else { |
| 560 | tmio_mmc_do_data_irq(host); |
| 561 | tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); |
| 562 | } |
| 563 | out: |
| 564 | spin_unlock(&host->lock); |
| 565 | } |
| 566 | |
| 567 | static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, |
| 568 | unsigned int stat) |
| 569 | { |
| 570 | struct mmc_command *cmd = host->cmd; |
| 571 | int i, addr; |
| 572 | |
| 573 | spin_lock(&host->lock); |
| 574 | |
| 575 | if (!host->cmd) { |
| 576 | pr_debug("Spurious CMD irq\n"); |
| 577 | goto out; |
| 578 | } |
| 579 | |
| 580 | host->cmd = NULL; |
| 581 | |
| 582 | /* This controller is sicker than the PXA one. Not only do we need to |
| 583 | * drop the top 8 bits of the first response word, we also need to |
| 584 | * modify the order of the response for short response command types. |
| 585 | */ |
| 586 | |
| 587 | for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) |
| 588 | cmd->resp[i] = sd_ctrl_read32(host, addr); |
| 589 | |
| 590 | if (cmd->flags & MMC_RSP_136) { |
| 591 | cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); |
| 592 | cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); |
| 593 | cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); |
| 594 | cmd->resp[3] <<= 8; |
| 595 | } else if (cmd->flags & MMC_RSP_R3) { |
| 596 | cmd->resp[0] = cmd->resp[3]; |
| 597 | } |
| 598 | |
| 599 | if (stat & TMIO_STAT_CMDTIMEOUT) |
| 600 | cmd->error = -ETIMEDOUT; |
| 601 | else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) |
| 602 | cmd->error = -EILSEQ; |
| 603 | |
| 604 | /* If there is data to handle we enable data IRQs here, and |
| 605 | * we will ultimatley finish the request in the data_end handler. |
| 606 | * If theres no data or we encountered an error, finish now. |
| 607 | */ |
| 608 | if (host->data && !cmd->error) { |
| 609 | if (host->data->flags & MMC_DATA_READ) { |
| 610 | if (host->force_pio || !host->chan_rx) |
| 611 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); |
| 612 | else |
| 613 | tasklet_schedule(&host->dma_issue); |
| 614 | } else { |
| 615 | if (host->force_pio || !host->chan_tx) |
| 616 | tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); |
| 617 | else |
| 618 | tasklet_schedule(&host->dma_issue); |
| 619 | } |
| 620 | } else { |
Guennadi Liakhovetski | b9269fd | 2011-07-14 12:12:38 +0200 | [diff] [blame] | 621 | schedule_work(&host->done); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 622 | } |
| 623 | |
| 624 | out: |
| 625 | spin_unlock(&host->lock); |
| 626 | } |
| 627 | |
Simon Horman | 7729c7a | 2011-08-25 10:27:26 +0900 | [diff] [blame] | 628 | static void tmio_mmc_card_irq_status(struct tmio_mmc_host *host, |
| 629 | int *ireg, int *status) |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 630 | { |
Simon Horman | 7729c7a | 2011-08-25 10:27:26 +0900 | [diff] [blame] | 631 | *status = sd_ctrl_read32(host, CTL_STATUS); |
| 632 | *ireg = *status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; |
| 633 | |
| 634 | pr_debug_status(*status); |
| 635 | pr_debug_status(*ireg); |
Shinobu Uehara | f83bfa7 | 2014-08-24 19:59:22 -0700 | [diff] [blame] | 636 | |
| 637 | /* Clear the status except the interrupt status */ |
| 638 | sd_ctrl_write32(host, CTL_STATUS, TMIO_MASK_IRQ); |
Simon Horman | 7729c7a | 2011-08-25 10:27:26 +0900 | [diff] [blame] | 639 | } |
| 640 | |
| 641 | static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host, |
| 642 | int ireg, int status) |
| 643 | { |
Guennadi Liakhovetski | 71d111c | 2011-07-14 12:16:59 +0200 | [diff] [blame] | 644 | struct mmc_host *mmc = host->mmc; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 645 | |
Paul Parsons | e312eb1 | 2011-05-15 13:24:41 +0000 | [diff] [blame] | 646 | /* Card insert / remove attempts */ |
| 647 | if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { |
| 648 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | |
| 649 | TMIO_STAT_CARD_REMOVE); |
Guennadi Liakhovetski | 71d111c | 2011-07-14 12:16:59 +0200 | [diff] [blame] | 650 | if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) || |
| 651 | ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) && |
| 652 | !work_pending(&mmc->detect.work)) |
Guennadi Liakhovetski | b9269fd | 2011-07-14 12:12:38 +0200 | [diff] [blame] | 653 | mmc_detect_change(host->mmc, msecs_to_jiffies(100)); |
Simon Horman | 7729c7a | 2011-08-25 10:27:26 +0900 | [diff] [blame] | 654 | return true; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 655 | } |
| 656 | |
Simon Horman | 7729c7a | 2011-08-25 10:27:26 +0900 | [diff] [blame] | 657 | return false; |
| 658 | } |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 659 | |
Simon Horman | 7729c7a | 2011-08-25 10:27:26 +0900 | [diff] [blame] | 660 | irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid) |
| 661 | { |
| 662 | unsigned int ireg, status; |
| 663 | struct tmio_mmc_host *host = devid; |
| 664 | |
| 665 | tmio_mmc_card_irq_status(host, &ireg, &status); |
| 666 | __tmio_mmc_card_detect_irq(host, ireg, status); |
| 667 | |
| 668 | return IRQ_HANDLED; |
| 669 | } |
| 670 | EXPORT_SYMBOL(tmio_mmc_card_detect_irq); |
| 671 | |
| 672 | static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, |
| 673 | int ireg, int status) |
| 674 | { |
Paul Parsons | e312eb1 | 2011-05-15 13:24:41 +0000 | [diff] [blame] | 675 | /* Command completion */ |
| 676 | if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { |
| 677 | tmio_mmc_ack_mmc_irqs(host, |
| 678 | TMIO_STAT_CMDRESPEND | |
| 679 | TMIO_STAT_CMDTIMEOUT); |
| 680 | tmio_mmc_cmd_irq(host, status); |
Simon Horman | 7729c7a | 2011-08-25 10:27:26 +0900 | [diff] [blame] | 681 | return true; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 682 | } |
Paul Parsons | e312eb1 | 2011-05-15 13:24:41 +0000 | [diff] [blame] | 683 | |
| 684 | /* Data transfer */ |
| 685 | if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { |
| 686 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); |
| 687 | tmio_mmc_pio_irq(host); |
Simon Horman | 7729c7a | 2011-08-25 10:27:26 +0900 | [diff] [blame] | 688 | return true; |
Paul Parsons | e312eb1 | 2011-05-15 13:24:41 +0000 | [diff] [blame] | 689 | } |
| 690 | |
| 691 | /* Data transfer completion */ |
| 692 | if (ireg & TMIO_STAT_DATAEND) { |
| 693 | tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); |
| 694 | tmio_mmc_data_irq(host); |
Simon Horman | 7729c7a | 2011-08-25 10:27:26 +0900 | [diff] [blame] | 695 | return true; |
Paul Parsons | e312eb1 | 2011-05-15 13:24:41 +0000 | [diff] [blame] | 696 | } |
| 697 | |
Simon Horman | 7729c7a | 2011-08-25 10:27:26 +0900 | [diff] [blame] | 698 | return false; |
| 699 | } |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 700 | |
Simon Horman | 7729c7a | 2011-08-25 10:27:26 +0900 | [diff] [blame] | 701 | irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid) |
| 702 | { |
| 703 | unsigned int ireg, status; |
| 704 | struct tmio_mmc_host *host = devid; |
| 705 | |
| 706 | tmio_mmc_card_irq_status(host, &ireg, &status); |
| 707 | __tmio_mmc_sdcard_irq(host, ireg, status); |
| 708 | |
| 709 | return IRQ_HANDLED; |
| 710 | } |
| 711 | EXPORT_SYMBOL(tmio_mmc_sdcard_irq); |
| 712 | |
| 713 | irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid) |
| 714 | { |
| 715 | struct tmio_mmc_host *host = devid; |
| 716 | struct mmc_host *mmc = host->mmc; |
| 717 | struct tmio_mmc_data *pdata = host->pdata; |
| 718 | unsigned int ireg, status; |
Shinobu Uehara | 6b98757 | 2014-08-24 20:00:52 -0700 | [diff] [blame] | 719 | unsigned int sdio_status; |
Simon Horman | 7729c7a | 2011-08-25 10:27:26 +0900 | [diff] [blame] | 720 | |
| 721 | if (!(pdata->flags & TMIO_MMC_SDIO_IRQ)) |
| 722 | return IRQ_HANDLED; |
| 723 | |
| 724 | status = sd_ctrl_read16(host, CTL_SDIO_STATUS); |
| 725 | ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask; |
| 726 | |
Shinobu Uehara | 6b98757 | 2014-08-24 20:00:52 -0700 | [diff] [blame] | 727 | sdio_status = status & ~TMIO_SDIO_MASK_ALL; |
| 728 | if (pdata->flags & TMIO_MMC_SDIO_STATUS_QUIRK) |
| 729 | sdio_status |= 6; |
| 730 | |
| 731 | sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status); |
Simon Horman | 7729c7a | 2011-08-25 10:27:26 +0900 | [diff] [blame] | 732 | |
| 733 | if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ) |
| 734 | mmc_signal_sdio_irq(mmc); |
| 735 | |
| 736 | return IRQ_HANDLED; |
| 737 | } |
| 738 | EXPORT_SYMBOL(tmio_mmc_sdio_irq); |
| 739 | |
| 740 | irqreturn_t tmio_mmc_irq(int irq, void *devid) |
| 741 | { |
| 742 | struct tmio_mmc_host *host = devid; |
| 743 | unsigned int ireg, status; |
| 744 | |
| 745 | pr_debug("MMC IRQ begin\n"); |
| 746 | |
| 747 | tmio_mmc_card_irq_status(host, &ireg, &status); |
| 748 | if (__tmio_mmc_card_detect_irq(host, ireg, status)) |
| 749 | return IRQ_HANDLED; |
| 750 | if (__tmio_mmc_sdcard_irq(host, ireg, status)) |
| 751 | return IRQ_HANDLED; |
| 752 | |
| 753 | tmio_mmc_sdio_irq(irq, devid); |
| 754 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 755 | return IRQ_HANDLED; |
| 756 | } |
Magnus Damm | 8e7bfdb | 2011-05-06 11:02:33 +0000 | [diff] [blame] | 757 | EXPORT_SYMBOL(tmio_mmc_irq); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 758 | |
| 759 | static int tmio_mmc_start_data(struct tmio_mmc_host *host, |
| 760 | struct mmc_data *data) |
| 761 | { |
| 762 | struct tmio_mmc_data *pdata = host->pdata; |
| 763 | |
| 764 | pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", |
| 765 | data->blksz, data->blocks); |
| 766 | |
| 767 | /* Some hardware cannot perform 2 byte requests in 4 bit mode */ |
| 768 | if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { |
| 769 | int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; |
| 770 | |
| 771 | if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { |
| 772 | pr_err("%s: %d byte block unsupported in 4 bit mode\n", |
| 773 | mmc_hostname(host->mmc), data->blksz); |
| 774 | return -EINVAL; |
| 775 | } |
| 776 | } |
| 777 | |
| 778 | tmio_mmc_init_sg(host, data); |
| 779 | host->data = data; |
| 780 | |
| 781 | /* Set transfer length / blocksize */ |
| 782 | sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); |
| 783 | sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); |
| 784 | |
| 785 | tmio_mmc_start_dma(host, data); |
| 786 | |
| 787 | return 0; |
| 788 | } |
| 789 | |
| 790 | /* Process requests from the MMC layer */ |
| 791 | static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) |
| 792 | { |
| 793 | struct tmio_mmc_host *host = mmc_priv(mmc); |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 794 | unsigned long flags; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 795 | int ret; |
| 796 | |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 797 | spin_lock_irqsave(&host->lock, flags); |
| 798 | |
| 799 | if (host->mrq) { |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 800 | pr_debug("request not null\n"); |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 801 | if (IS_ERR(host->mrq)) { |
| 802 | spin_unlock_irqrestore(&host->lock, flags); |
| 803 | mrq->cmd->error = -EAGAIN; |
| 804 | mmc_request_done(mmc, mrq); |
| 805 | return; |
| 806 | } |
| 807 | } |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 808 | |
| 809 | host->last_req_ts = jiffies; |
| 810 | wmb(); |
| 811 | host->mrq = mrq; |
| 812 | |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 813 | spin_unlock_irqrestore(&host->lock, flags); |
| 814 | |
Ulf Hansson | 0369483 | 2013-10-24 16:42:33 +0200 | [diff] [blame] | 815 | pm_runtime_get_sync(mmc_dev(mmc)); |
| 816 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 817 | if (mrq->data) { |
| 818 | ret = tmio_mmc_start_data(host, mrq->data); |
| 819 | if (ret) |
| 820 | goto fail; |
| 821 | } |
| 822 | |
| 823 | ret = tmio_mmc_start_command(host, mrq->cmd); |
| 824 | if (!ret) { |
| 825 | schedule_delayed_work(&host->delayed_reset_work, |
Takeshi Kihara | 0df9d2e | 2015-07-20 01:39:59 +0900 | [diff] [blame] | 826 | msecs_to_jiffies(CMDREQ_TIMEOUT)); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 827 | return; |
| 828 | } |
| 829 | |
| 830 | fail: |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 831 | host->force_pio = false; |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 832 | host->mrq = NULL; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 833 | mrq->cmd->error = ret; |
| 834 | mmc_request_done(mmc, mrq); |
Ulf Hansson | 0369483 | 2013-10-24 16:42:33 +0200 | [diff] [blame] | 835 | |
| 836 | pm_runtime_mark_last_busy(mmc_dev(mmc)); |
| 837 | pm_runtime_put_autosuspend(mmc_dev(mmc)); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 838 | } |
| 839 | |
Ulf Hansson | ae12d25 | 2013-10-30 00:16:17 +0100 | [diff] [blame] | 840 | static int tmio_mmc_clk_update(struct tmio_mmc_host *host) |
Guennadi Liakhovetski | 8c102a9 | 2012-06-20 19:10:31 +0200 | [diff] [blame] | 841 | { |
Ulf Hansson | ae12d25 | 2013-10-30 00:16:17 +0100 | [diff] [blame] | 842 | struct mmc_host *mmc = host->mmc; |
Guennadi Liakhovetski | 8c102a9 | 2012-06-20 19:10:31 +0200 | [diff] [blame] | 843 | int ret; |
| 844 | |
Kuninori Morimoto | 4fe2ec5 | 2015-01-13 04:57:52 +0000 | [diff] [blame] | 845 | if (!host->clk_enable) |
Guennadi Liakhovetski | 8c102a9 | 2012-06-20 19:10:31 +0200 | [diff] [blame] | 846 | return -ENOTSUPP; |
| 847 | |
Kuninori Morimoto | 4fe2ec5 | 2015-01-13 04:57:52 +0000 | [diff] [blame] | 848 | ret = host->clk_enable(host->pdev, &mmc->f_max); |
Guennadi Liakhovetski | 8c102a9 | 2012-06-20 19:10:31 +0200 | [diff] [blame] | 849 | if (!ret) |
| 850 | mmc->f_min = mmc->f_max / 512; |
| 851 | |
| 852 | return ret; |
| 853 | } |
| 854 | |
Guennadi Liakhovetski | 619b08d | 2013-02-15 16:14:00 +0100 | [diff] [blame] | 855 | static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd) |
| 856 | { |
| 857 | struct mmc_host *mmc = host->mmc; |
| 858 | int ret = 0; |
| 859 | |
| 860 | /* .set_ios() is returning void, so, no chance to report an error */ |
| 861 | |
Chris Ball | 9d731e7 | 2013-09-06 07:29:05 -0400 | [diff] [blame] | 862 | if (host->set_pwr) |
| 863 | host->set_pwr(host->pdev, 1); |
| 864 | |
Guennadi Liakhovetski | 619b08d | 2013-02-15 16:14:00 +0100 | [diff] [blame] | 865 | if (!IS_ERR(mmc->supply.vmmc)) { |
| 866 | ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); |
| 867 | /* |
| 868 | * Attention: empiric value. With a b43 WiFi SDIO card this |
| 869 | * delay proved necessary for reliable card-insertion probing. |
| 870 | * 100us were not enough. Is this the same 140us delay, as in |
| 871 | * tmio_mmc_set_ios()? |
| 872 | */ |
| 873 | udelay(200); |
| 874 | } |
| 875 | /* |
| 876 | * It seems, VccQ should be switched on after Vcc, this is also what the |
| 877 | * omap_hsmmc.c driver does. |
| 878 | */ |
| 879 | if (!IS_ERR(mmc->supply.vqmmc) && !ret) { |
Guennadi Liakhovetski | 6d1d6b4 | 2013-07-08 11:38:09 +0200 | [diff] [blame] | 880 | ret = regulator_enable(mmc->supply.vqmmc); |
Guennadi Liakhovetski | 619b08d | 2013-02-15 16:14:00 +0100 | [diff] [blame] | 881 | udelay(200); |
| 882 | } |
Guennadi Liakhovetski | 6d1d6b4 | 2013-07-08 11:38:09 +0200 | [diff] [blame] | 883 | |
| 884 | if (ret < 0) |
| 885 | dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n", |
| 886 | ret); |
Guennadi Liakhovetski | 619b08d | 2013-02-15 16:14:00 +0100 | [diff] [blame] | 887 | } |
| 888 | |
| 889 | static void tmio_mmc_power_off(struct tmio_mmc_host *host) |
Guennadi Liakhovetski | b958a67 | 2012-06-20 19:10:33 +0200 | [diff] [blame] | 890 | { |
| 891 | struct mmc_host *mmc = host->mmc; |
| 892 | |
Guennadi Liakhovetski | 619b08d | 2013-02-15 16:14:00 +0100 | [diff] [blame] | 893 | if (!IS_ERR(mmc->supply.vqmmc)) |
| 894 | regulator_disable(mmc->supply.vqmmc); |
| 895 | |
Guennadi Liakhovetski | b958a67 | 2012-06-20 19:10:33 +0200 | [diff] [blame] | 896 | if (!IS_ERR(mmc->supply.vmmc)) |
Guennadi Liakhovetski | 619b08d | 2013-02-15 16:14:00 +0100 | [diff] [blame] | 897 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); |
Chris Ball | 9d731e7 | 2013-09-06 07:29:05 -0400 | [diff] [blame] | 898 | |
| 899 | if (host->set_pwr) |
| 900 | host->set_pwr(host->pdev, 0); |
Guennadi Liakhovetski | b958a67 | 2012-06-20 19:10:33 +0200 | [diff] [blame] | 901 | } |
| 902 | |
Ulf Hansson | 9ae4ed7 | 2013-10-24 17:42:53 +0200 | [diff] [blame] | 903 | static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host, |
| 904 | unsigned char bus_width) |
| 905 | { |
| 906 | switch (bus_width) { |
| 907 | case MMC_BUS_WIDTH_1: |
| 908 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); |
| 909 | break; |
| 910 | case MMC_BUS_WIDTH_4: |
| 911 | sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); |
| 912 | break; |
| 913 | } |
| 914 | } |
| 915 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 916 | /* Set MMC clock / power. |
| 917 | * Note: This controller uses a simple divider scheme therefore it cannot |
| 918 | * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as |
| 919 | * MMC wont run that fast, it has to be clocked at 12MHz which is the next |
| 920 | * slowest setting. |
| 921 | */ |
| 922 | static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
| 923 | { |
| 924 | struct tmio_mmc_host *host = mmc_priv(mmc); |
Guennadi Liakhovetski | 4932bd6 | 2012-02-09 22:57:16 +0100 | [diff] [blame] | 925 | struct device *dev = &host->pdev->dev; |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 926 | unsigned long flags; |
| 927 | |
Ulf Hansson | 0369483 | 2013-10-24 16:42:33 +0200 | [diff] [blame] | 928 | pm_runtime_get_sync(mmc_dev(mmc)); |
| 929 | |
Guennadi Liakhovetski | b9269fd | 2011-07-14 12:12:38 +0200 | [diff] [blame] | 930 | mutex_lock(&host->ios_lock); |
| 931 | |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 932 | spin_lock_irqsave(&host->lock, flags); |
| 933 | if (host->mrq) { |
| 934 | if (IS_ERR(host->mrq)) { |
Guennadi Liakhovetski | 4932bd6 | 2012-02-09 22:57:16 +0100 | [diff] [blame] | 935 | dev_dbg(dev, |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 936 | "%s.%d: concurrent .set_ios(), clk %u, mode %u\n", |
| 937 | current->comm, task_pid_nr(current), |
| 938 | ios->clock, ios->power_mode); |
| 939 | host->mrq = ERR_PTR(-EINTR); |
| 940 | } else { |
Guennadi Liakhovetski | 4932bd6 | 2012-02-09 22:57:16 +0100 | [diff] [blame] | 941 | dev_dbg(dev, |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 942 | "%s.%d: CMD%u active since %lu, now %lu!\n", |
| 943 | current->comm, task_pid_nr(current), |
| 944 | host->mrq->cmd->opcode, host->last_req_ts, jiffies); |
| 945 | } |
| 946 | spin_unlock_irqrestore(&host->lock, flags); |
Guennadi Liakhovetski | b9269fd | 2011-07-14 12:12:38 +0200 | [diff] [blame] | 947 | |
| 948 | mutex_unlock(&host->ios_lock); |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 949 | return; |
| 950 | } |
| 951 | |
| 952 | host->mrq = ERR_PTR(-EBUSY); |
| 953 | |
| 954 | spin_unlock_irqrestore(&host->lock, flags); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 955 | |
Ulf Hansson | 3b292bb | 2013-10-24 17:53:15 +0200 | [diff] [blame] | 956 | switch (ios->power_mode) { |
| 957 | case MMC_POWER_OFF: |
| 958 | tmio_mmc_power_off(host); |
| 959 | tmio_mmc_clk_stop(host); |
| 960 | break; |
| 961 | case MMC_POWER_UP: |
Guennadi Liakhovetski | 71d111c | 2011-07-14 12:16:59 +0200 | [diff] [blame] | 962 | tmio_mmc_set_clock(host, ios->clock); |
Ulf Hansson | 3b292bb | 2013-10-24 17:53:15 +0200 | [diff] [blame] | 963 | tmio_mmc_power_on(host, ios->vdd); |
Guennadi Liakhovetski | 5fd0157 | 2011-03-09 14:45:44 +0100 | [diff] [blame] | 964 | tmio_mmc_clk_start(host); |
Ulf Hansson | 9ae4ed7 | 2013-10-24 17:42:53 +0200 | [diff] [blame] | 965 | tmio_mmc_set_bus_width(host, ios->bus_width); |
Ulf Hansson | 3b292bb | 2013-10-24 17:53:15 +0200 | [diff] [blame] | 966 | break; |
| 967 | case MMC_POWER_ON: |
| 968 | tmio_mmc_set_clock(host, ios->clock); |
| 969 | tmio_mmc_clk_start(host); |
| 970 | tmio_mmc_set_bus_width(host, ios->bus_width); |
| 971 | break; |
| 972 | } |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 973 | |
| 974 | /* Let things settle. delay taken from winCE driver */ |
| 975 | udelay(140); |
Guennadi Liakhovetski | df3ef2d | 2011-04-21 07:20:16 +0000 | [diff] [blame] | 976 | if (PTR_ERR(host->mrq) == -EINTR) |
| 977 | dev_dbg(&host->pdev->dev, |
| 978 | "%s.%d: IOS interrupted: clk %u, mode %u", |
| 979 | current->comm, task_pid_nr(current), |
| 980 | ios->clock, ios->power_mode); |
| 981 | host->mrq = NULL; |
Guennadi Liakhovetski | b9269fd | 2011-07-14 12:12:38 +0200 | [diff] [blame] | 982 | |
Ulf Hansson | ae12d25 | 2013-10-30 00:16:17 +0100 | [diff] [blame] | 983 | host->clk_cache = ios->clock; |
| 984 | |
Guennadi Liakhovetski | b9269fd | 2011-07-14 12:12:38 +0200 | [diff] [blame] | 985 | mutex_unlock(&host->ios_lock); |
Ulf Hansson | 0369483 | 2013-10-24 16:42:33 +0200 | [diff] [blame] | 986 | |
| 987 | pm_runtime_mark_last_busy(mmc_dev(mmc)); |
| 988 | pm_runtime_put_autosuspend(mmc_dev(mmc)); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 989 | } |
| 990 | |
| 991 | static int tmio_mmc_get_ro(struct mmc_host *mmc) |
| 992 | { |
| 993 | struct tmio_mmc_host *host = mmc_priv(mmc); |
| 994 | struct tmio_mmc_data *pdata = host->pdata; |
Guennadi Liakhovetski | 3071caf | 2012-05-01 17:11:56 +0200 | [diff] [blame] | 995 | int ret = mmc_gpio_get_ro(mmc); |
| 996 | if (ret >= 0) |
| 997 | return ret; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 998 | |
Ulf Hansson | 0369483 | 2013-10-24 16:42:33 +0200 | [diff] [blame] | 999 | pm_runtime_get_sync(mmc_dev(mmc)); |
| 1000 | ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || |
| 1001 | (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); |
| 1002 | pm_runtime_mark_last_busy(mmc_dev(mmc)); |
| 1003 | pm_runtime_put_autosuspend(mmc_dev(mmc)); |
| 1004 | |
| 1005 | return ret; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1006 | } |
| 1007 | |
Kuninori Morimoto | bbf0208 | 2014-09-08 23:45:25 -0700 | [diff] [blame] | 1008 | static int tmio_multi_io_quirk(struct mmc_card *card, |
| 1009 | unsigned int direction, int blk_size) |
| 1010 | { |
| 1011 | struct tmio_mmc_host *host = mmc_priv(card->host); |
Kuninori Morimoto | bbf0208 | 2014-09-08 23:45:25 -0700 | [diff] [blame] | 1012 | |
Kuninori Morimoto | 85c02dd | 2015-01-13 04:58:10 +0000 | [diff] [blame] | 1013 | if (host->multi_io_quirk) |
| 1014 | return host->multi_io_quirk(card, direction, blk_size); |
Kuninori Morimoto | bbf0208 | 2014-09-08 23:45:25 -0700 | [diff] [blame] | 1015 | |
| 1016 | return blk_size; |
| 1017 | } |
| 1018 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1019 | static const struct mmc_host_ops tmio_mmc_ops = { |
| 1020 | .request = tmio_mmc_request, |
| 1021 | .set_ios = tmio_mmc_set_ios, |
| 1022 | .get_ro = tmio_mmc_get_ro, |
Laurent Pinchart | 2b63b34 | 2013-08-08 12:38:43 +0200 | [diff] [blame] | 1023 | .get_cd = mmc_gpio_get_cd, |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1024 | .enable_sdio_irq = tmio_mmc_enable_sdio_irq, |
Kuninori Morimoto | bbf0208 | 2014-09-08 23:45:25 -0700 | [diff] [blame] | 1025 | .multi_io_quirk = tmio_multi_io_quirk, |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1026 | }; |
| 1027 | |
Kuninori Morimoto | 05fae4a | 2013-11-20 00:30:39 -0800 | [diff] [blame] | 1028 | static int tmio_mmc_init_ocr(struct tmio_mmc_host *host) |
Guennadi Liakhovetski | b958a67 | 2012-06-20 19:10:33 +0200 | [diff] [blame] | 1029 | { |
| 1030 | struct tmio_mmc_data *pdata = host->pdata; |
| 1031 | struct mmc_host *mmc = host->mmc; |
| 1032 | |
| 1033 | mmc_regulator_get_supply(mmc); |
| 1034 | |
Kuninori Morimoto | 05fae4a | 2013-11-20 00:30:39 -0800 | [diff] [blame] | 1035 | /* use ocr_mask if no regulator */ |
Guennadi Liakhovetski | b958a67 | 2012-06-20 19:10:33 +0200 | [diff] [blame] | 1036 | if (!mmc->ocr_avail) |
Kuninori Morimoto | 05fae4a | 2013-11-20 00:30:39 -0800 | [diff] [blame] | 1037 | mmc->ocr_avail = pdata->ocr_mask; |
| 1038 | |
| 1039 | /* |
| 1040 | * try again. |
| 1041 | * There is possibility that regulator has not been probed |
| 1042 | */ |
| 1043 | if (!mmc->ocr_avail) |
| 1044 | return -EPROBE_DEFER; |
| 1045 | |
| 1046 | return 0; |
Guennadi Liakhovetski | b958a67 | 2012-06-20 19:10:33 +0200 | [diff] [blame] | 1047 | } |
| 1048 | |
Guennadi Liakhovetski | 5a00a97 | 2013-02-15 16:13:56 +0100 | [diff] [blame] | 1049 | static void tmio_mmc_of_parse(struct platform_device *pdev, |
| 1050 | struct tmio_mmc_data *pdata) |
| 1051 | { |
| 1052 | const struct device_node *np = pdev->dev.of_node; |
| 1053 | if (!np) |
| 1054 | return; |
| 1055 | |
| 1056 | if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL)) |
| 1057 | pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE; |
| 1058 | } |
| 1059 | |
Kuninori Morimoto | 94b110a | 2015-01-13 04:57:22 +0000 | [diff] [blame] | 1060 | struct tmio_mmc_host* |
| 1061 | tmio_mmc_host_alloc(struct platform_device *pdev) |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1062 | { |
Kuninori Morimoto | 94b110a | 2015-01-13 04:57:22 +0000 | [diff] [blame] | 1063 | struct tmio_mmc_host *host; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1064 | struct mmc_host *mmc; |
Kuninori Morimoto | 94b110a | 2015-01-13 04:57:22 +0000 | [diff] [blame] | 1065 | |
| 1066 | mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); |
| 1067 | if (!mmc) |
| 1068 | return NULL; |
| 1069 | |
| 1070 | host = mmc_priv(mmc); |
| 1071 | host->mmc = mmc; |
| 1072 | host->pdev = pdev; |
| 1073 | |
| 1074 | return host; |
| 1075 | } |
| 1076 | EXPORT_SYMBOL(tmio_mmc_host_alloc); |
| 1077 | |
| 1078 | void tmio_mmc_host_free(struct tmio_mmc_host *host) |
| 1079 | { |
| 1080 | mmc_free_host(host->mmc); |
Kuninori Morimoto | 94b110a | 2015-01-13 04:57:22 +0000 | [diff] [blame] | 1081 | } |
| 1082 | EXPORT_SYMBOL(tmio_mmc_host_free); |
| 1083 | |
| 1084 | int tmio_mmc_host_probe(struct tmio_mmc_host *_host, |
| 1085 | struct tmio_mmc_data *pdata) |
| 1086 | { |
| 1087 | struct platform_device *pdev = _host->pdev; |
| 1088 | struct mmc_host *mmc = _host->mmc; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1089 | struct resource *res_ctl; |
| 1090 | int ret; |
| 1091 | u32 irq_mask = TMIO_MASK_CMD; |
| 1092 | |
Guennadi Liakhovetski | 5a00a97 | 2013-02-15 16:13:56 +0100 | [diff] [blame] | 1093 | tmio_mmc_of_parse(pdev, pdata); |
| 1094 | |
Guennadi Liakhovetski | 7b95213 | 2013-02-15 16:13:50 +0100 | [diff] [blame] | 1095 | if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT)) |
Kuninori Morimoto | dfe9a22 | 2015-01-13 04:57:42 +0000 | [diff] [blame] | 1096 | _host->write16_hook = NULL; |
Guennadi Liakhovetski | 7b95213 | 2013-02-15 16:13:50 +0100 | [diff] [blame] | 1097 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1098 | res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1099 | if (!res_ctl) |
| 1100 | return -EINVAL; |
| 1101 | |
Simon Baatz | 274a752 | 2013-06-09 22:14:13 +0200 | [diff] [blame] | 1102 | ret = mmc_of_parse(mmc); |
| 1103 | if (ret < 0) |
| 1104 | goto host_free; |
Guennadi Liakhovetski | 5a00a97 | 2013-02-15 16:13:56 +0100 | [diff] [blame] | 1105 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1106 | _host->pdata = pdata; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1107 | platform_set_drvdata(pdev, mmc); |
| 1108 | |
Chris Ball | 9d731e7 | 2013-09-06 07:29:05 -0400 | [diff] [blame] | 1109 | _host->set_pwr = pdata->set_pwr; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1110 | _host->set_clk_div = pdata->set_clk_div; |
| 1111 | |
Kuninori Morimoto | 05fae4a | 2013-11-20 00:30:39 -0800 | [diff] [blame] | 1112 | ret = tmio_mmc_init_ocr(_host); |
| 1113 | if (ret < 0) |
| 1114 | goto host_free; |
| 1115 | |
Ian Molton | 7df56bb | 2015-04-27 00:01:17 +0100 | [diff] [blame] | 1116 | _host->ctl = devm_ioremap(&pdev->dev, |
| 1117 | res_ctl->start, resource_size(res_ctl)); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1118 | if (!_host->ctl) { |
| 1119 | ret = -ENOMEM; |
| 1120 | goto host_free; |
| 1121 | } |
| 1122 | |
| 1123 | mmc->ops = &tmio_mmc_ops; |
Guennadi Liakhovetski | 5a00a97 | 2013-02-15 16:13:56 +0100 | [diff] [blame] | 1124 | mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities; |
Kuninori Morimoto | dd006b3 | 2013-11-20 00:16:14 -0800 | [diff] [blame] | 1125 | mmc->caps2 |= pdata->capabilities2; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1126 | mmc->max_segs = 32; |
| 1127 | mmc->max_blk_size = 512; |
| 1128 | mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * |
| 1129 | mmc->max_segs; |
| 1130 | mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; |
| 1131 | mmc->max_seg_size = mmc->max_req_size; |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1132 | |
Guennadi Liakhovetski | c8be24c | 2012-02-09 22:57:09 +0100 | [diff] [blame] | 1133 | _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || |
Guennadi Liakhovetski | 2b1ac5c | 2012-02-09 22:57:08 +0100 | [diff] [blame] | 1134 | mmc->caps & MMC_CAP_NEEDS_POLL || |
Guennadi Liakhovetski | 5a00a97 | 2013-02-15 16:13:56 +0100 | [diff] [blame] | 1135 | mmc->caps & MMC_CAP_NONREMOVABLE || |
| 1136 | mmc->slot.cd_irq >= 0); |
Guennadi Liakhovetski | 2b1ac5c | 2012-02-09 22:57:08 +0100 | [diff] [blame] | 1137 | |
Ulf Hansson | ae12d25 | 2013-10-30 00:16:17 +0100 | [diff] [blame] | 1138 | if (tmio_mmc_clk_update(_host) < 0) { |
Guennadi Liakhovetski | 8c102a9 | 2012-06-20 19:10:31 +0200 | [diff] [blame] | 1139 | mmc->f_max = pdata->hclk; |
| 1140 | mmc->f_min = mmc->f_max / 512; |
| 1141 | } |
| 1142 | |
Bastian Hecht | cbb18b3 | 2011-12-23 23:03:13 +0100 | [diff] [blame] | 1143 | /* |
Sergei Shtylyov | bb98d9d | 2014-09-18 23:33:49 +0400 | [diff] [blame] | 1144 | * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from |
| 1145 | * looping forever... |
| 1146 | */ |
| 1147 | if (mmc->f_min == 0) { |
| 1148 | ret = -EINVAL; |
| 1149 | goto host_free; |
| 1150 | } |
| 1151 | |
| 1152 | /* |
Ulf Hansson | 0369483 | 2013-10-24 16:42:33 +0200 | [diff] [blame] | 1153 | * While using internal tmio hardware logic for card detection, we need |
| 1154 | * to ensure it stays powered for it to work. |
Bastian Hecht | cbb18b3 | 2011-12-23 23:03:13 +0100 | [diff] [blame] | 1155 | */ |
Guennadi Liakhovetski | 2b1ac5c | 2012-02-09 22:57:08 +0100 | [diff] [blame] | 1156 | if (_host->native_hotplug) |
Bastian Hecht | cbb18b3 | 2011-12-23 23:03:13 +0100 | [diff] [blame] | 1157 | pm_runtime_get_noresume(&pdev->dev); |
| 1158 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1159 | tmio_mmc_clk_stop(_host); |
| 1160 | tmio_mmc_reset(_host); |
| 1161 | |
Simon Horman | 54680fe | 2011-08-25 10:27:25 +0900 | [diff] [blame] | 1162 | _host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1163 | tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); |
Guennadi Liakhovetski | e0337cc | 2012-06-20 19:10:30 +0200 | [diff] [blame] | 1164 | |
| 1165 | /* Unmask the IRQs we want to know about */ |
| 1166 | if (!_host->chan_rx) |
| 1167 | irq_mask |= TMIO_MASK_READOP; |
| 1168 | if (!_host->chan_tx) |
| 1169 | irq_mask |= TMIO_MASK_WRITEOP; |
| 1170 | if (!_host->native_hotplug) |
| 1171 | irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); |
| 1172 | |
| 1173 | _host->sdcard_irq_mask &= ~irq_mask; |
| 1174 | |
Ulf Hansson | 7501c43 | 2013-10-24 15:58:45 +0200 | [diff] [blame] | 1175 | _host->sdio_irq_enabled = false; |
| 1176 | if (pdata->flags & TMIO_MMC_SDIO_IRQ) { |
| 1177 | _host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; |
| 1178 | sd_ctrl_write16(_host, CTL_SDIO_IRQ_MASK, _host->sdio_irq_mask); |
| 1179 | sd_ctrl_write16(_host, CTL_TRANSACTION_CTL, 0x0000); |
| 1180 | } |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1181 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1182 | spin_lock_init(&_host->lock); |
Guennadi Liakhovetski | b9269fd | 2011-07-14 12:12:38 +0200 | [diff] [blame] | 1183 | mutex_init(&_host->ios_lock); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1184 | |
| 1185 | /* Init delayed work for request timeouts */ |
| 1186 | INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); |
Guennadi Liakhovetski | b9269fd | 2011-07-14 12:12:38 +0200 | [diff] [blame] | 1187 | INIT_WORK(&_host->done, tmio_mmc_done_work); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1188 | |
| 1189 | /* See if we also get DMA */ |
| 1190 | tmio_mmc_request_dma(_host, pdata); |
| 1191 | |
Ulf Hansson | 0369483 | 2013-10-24 16:42:33 +0200 | [diff] [blame] | 1192 | pm_runtime_set_active(&pdev->dev); |
| 1193 | pm_runtime_set_autosuspend_delay(&pdev->dev, 50); |
| 1194 | pm_runtime_use_autosuspend(&pdev->dev); |
| 1195 | pm_runtime_enable(&pdev->dev); |
| 1196 | |
Guennadi Liakhovetski | 8c102a9 | 2012-06-20 19:10:31 +0200 | [diff] [blame] | 1197 | ret = mmc_add_host(mmc); |
Guennadi Liakhovetski | 8c102a9 | 2012-06-20 19:10:31 +0200 | [diff] [blame] | 1198 | if (ret < 0) { |
| 1199 | tmio_mmc_host_remove(_host); |
| 1200 | return ret; |
| 1201 | } |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1202 | |
Rafael J. Wysocki | c419e61 | 2012-03-13 01:01:51 +0100 | [diff] [blame] | 1203 | dev_pm_qos_expose_latency_limit(&pdev->dev, 100); |
| 1204 | |
Guennadi Liakhovetski | c8be24c | 2012-02-09 22:57:09 +0100 | [diff] [blame] | 1205 | if (pdata->flags & TMIO_MMC_USE_GPIO_CD) { |
Laurent Pinchart | 214fc30 | 2013-08-08 12:38:31 +0200 | [diff] [blame] | 1206 | ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0); |
Guennadi Liakhovetski | c8be24c | 2012-02-09 22:57:09 +0100 | [diff] [blame] | 1207 | if (ret < 0) { |
| 1208 | tmio_mmc_host_remove(_host); |
| 1209 | return ret; |
| 1210 | } |
Stephen Warren | d4d1144 | 2014-09-22 09:57:42 -0600 | [diff] [blame] | 1211 | mmc_gpiod_request_cd_irq(mmc); |
Guennadi Liakhovetski | c8be24c | 2012-02-09 22:57:09 +0100 | [diff] [blame] | 1212 | } |
| 1213 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1214 | return 0; |
| 1215 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1216 | host_free: |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1217 | |
| 1218 | return ret; |
| 1219 | } |
| 1220 | EXPORT_SYMBOL(tmio_mmc_host_probe); |
| 1221 | |
| 1222 | void tmio_mmc_host_remove(struct tmio_mmc_host *host) |
| 1223 | { |
Guennadi Liakhovetski | e6ee718 | 2011-05-05 16:13:12 +0000 | [diff] [blame] | 1224 | struct platform_device *pdev = host->pdev; |
Guennadi Liakhovetski | c8be24c | 2012-02-09 22:57:09 +0100 | [diff] [blame] | 1225 | struct mmc_host *mmc = host->mmc; |
Guennadi Liakhovetski | e6ee718 | 2011-05-05 16:13:12 +0000 | [diff] [blame] | 1226 | |
Guennadi Liakhovetski | 2b1ac5c | 2012-02-09 22:57:08 +0100 | [diff] [blame] | 1227 | if (!host->native_hotplug) |
Guennadi Liakhovetski | 7311bef | 2011-05-11 16:51:11 +0000 | [diff] [blame] | 1228 | pm_runtime_get_sync(&pdev->dev); |
| 1229 | |
Rafael J. Wysocki | c419e61 | 2012-03-13 01:01:51 +0100 | [diff] [blame] | 1230 | dev_pm_qos_hide_latency_limit(&pdev->dev); |
| 1231 | |
Guennadi Liakhovetski | c8be24c | 2012-02-09 22:57:09 +0100 | [diff] [blame] | 1232 | mmc_remove_host(mmc); |
Guennadi Liakhovetski | b9269fd | 2011-07-14 12:12:38 +0200 | [diff] [blame] | 1233 | cancel_work_sync(&host->done); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1234 | cancel_delayed_work_sync(&host->delayed_reset_work); |
| 1235 | tmio_mmc_release_dma(host); |
Guennadi Liakhovetski | e6ee718 | 2011-05-05 16:13:12 +0000 | [diff] [blame] | 1236 | |
Guennadi Liakhovetski | e6ee718 | 2011-05-05 16:13:12 +0000 | [diff] [blame] | 1237 | pm_runtime_put_sync(&pdev->dev); |
| 1238 | pm_runtime_disable(&pdev->dev); |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1239 | } |
| 1240 | EXPORT_SYMBOL(tmio_mmc_host_remove); |
| 1241 | |
Ulf Hansson | 9ade7db | 2014-08-25 12:03:20 +0200 | [diff] [blame] | 1242 | #ifdef CONFIG_PM |
Guennadi Liakhovetski | 7311bef | 2011-05-11 16:51:11 +0000 | [diff] [blame] | 1243 | int tmio_mmc_host_runtime_suspend(struct device *dev) |
| 1244 | { |
Ulf Hansson | ae12d25 | 2013-10-30 00:16:17 +0100 | [diff] [blame] | 1245 | struct mmc_host *mmc = dev_get_drvdata(dev); |
| 1246 | struct tmio_mmc_host *host = mmc_priv(mmc); |
| 1247 | |
Ulf Hansson | 20e955c | 2014-08-25 11:55:57 +0200 | [diff] [blame] | 1248 | tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); |
| 1249 | |
Ulf Hansson | ae12d25 | 2013-10-30 00:16:17 +0100 | [diff] [blame] | 1250 | if (host->clk_cache) |
| 1251 | tmio_mmc_clk_stop(host); |
| 1252 | |
Kuninori Morimoto | 00452c1 | 2015-01-13 04:58:01 +0000 | [diff] [blame] | 1253 | if (host->clk_disable) |
| 1254 | host->clk_disable(host->pdev); |
Ulf Hansson | ae12d25 | 2013-10-30 00:16:17 +0100 | [diff] [blame] | 1255 | |
Guennadi Liakhovetski | 7311bef | 2011-05-11 16:51:11 +0000 | [diff] [blame] | 1256 | return 0; |
| 1257 | } |
| 1258 | EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend); |
| 1259 | |
| 1260 | int tmio_mmc_host_runtime_resume(struct device *dev) |
| 1261 | { |
| 1262 | struct mmc_host *mmc = dev_get_drvdata(dev); |
| 1263 | struct tmio_mmc_host *host = mmc_priv(mmc); |
Guennadi Liakhovetski | 7311bef | 2011-05-11 16:51:11 +0000 | [diff] [blame] | 1264 | |
Ulf Hansson | ae12d25 | 2013-10-30 00:16:17 +0100 | [diff] [blame] | 1265 | tmio_mmc_reset(host); |
| 1266 | tmio_mmc_clk_update(host); |
| 1267 | |
| 1268 | if (host->clk_cache) { |
| 1269 | tmio_mmc_set_clock(host, host->clk_cache); |
| 1270 | tmio_mmc_clk_start(host); |
| 1271 | } |
| 1272 | |
Guennadi Liakhovetski | 162f43e | 2011-07-14 18:39:10 +0200 | [diff] [blame] | 1273 | tmio_mmc_enable_dma(host, true); |
Guennadi Liakhovetski | 7311bef | 2011-05-11 16:51:11 +0000 | [diff] [blame] | 1274 | |
Guennadi Liakhovetski | 7311bef | 2011-05-11 16:51:11 +0000 | [diff] [blame] | 1275 | return 0; |
| 1276 | } |
| 1277 | EXPORT_SYMBOL(tmio_mmc_host_runtime_resume); |
Ulf Hansson | 710dec9 | 2013-10-23 14:55:07 +0200 | [diff] [blame] | 1278 | #endif |
Guennadi Liakhovetski | 7311bef | 2011-05-11 16:51:11 +0000 | [diff] [blame] | 1279 | |
Guennadi Liakhovetski | b614749 | 2011-03-23 12:42:44 +0100 | [diff] [blame] | 1280 | MODULE_LICENSE("GPL v2"); |