blob: aadb0d6aa63f1de78897f7978c3a4a96922514d4 [file] [log] [blame]
Will Newtonf95f3852011-01-02 01:11:59 -05001/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
Will Newtonf95f3852011-01-02 01:11:59 -050025#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
Seungwon Jeon90c21432013-08-31 00:14:05 +090032#include <linux/mmc/sdio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050033#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
Jaehoon Chungc07946a2011-02-25 11:08:14 +090035#include <linux/regulator/consumer.h>
James Hogan1791b13e2011-06-24 13:55:55 +010036#include <linux/workqueue.h>
Thomas Abrahamc91eab42012-09-17 18:16:40 +000037#include <linux/of.h>
Doug Anderson55a6ceb2013-01-11 17:03:53 +000038#include <linux/of_gpio.h>
Zhangfei Gaobf626e52014-01-09 22:35:10 +080039#include <linux/mmc/slot-gpio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050040
41#include "dw_mmc.h"
42
43/* Common flag combinations */
Jaehoon Chung3f7eec62013-05-27 13:47:57 +090044#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
Will Newtonf95f3852011-01-02 01:11:59 -050045 SDMMC_INT_HTO | SDMMC_INT_SBE | \
46 SDMMC_INT_EBE)
47#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48 SDMMC_INT_RESP_ERR)
49#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
51#define DW_MCI_SEND_STATUS 1
52#define DW_MCI_RECV_STATUS 2
53#define DW_MCI_DMA_THRESHOLD 16
54
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +090055#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
57
Will Newtonf95f3852011-01-02 01:11:59 -050058#ifdef CONFIG_MMC_DW_IDMAC
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +090059#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 SDMMC_IDMAC_INT_TI)
63
Will Newtonf95f3852011-01-02 01:11:59 -050064struct idmac_desc {
65 u32 des0; /* Control Descriptor */
66#define IDMAC_DES0_DIC BIT(1)
67#define IDMAC_DES0_LD BIT(2)
68#define IDMAC_DES0_FD BIT(3)
69#define IDMAC_DES0_CH BIT(4)
70#define IDMAC_DES0_ER BIT(5)
71#define IDMAC_DES0_CES BIT(30)
72#define IDMAC_DES0_OWN BIT(31)
73
74 u32 des1; /* Buffer sizes */
75#define IDMAC_SET_BUFFER1_SIZE(d, s) \
Shashidhar Hiremath9b7bbe12011-07-29 08:49:50 -040076 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
Will Newtonf95f3852011-01-02 01:11:59 -050077
78 u32 des2; /* buffer 1 physical address */
79
80 u32 des3; /* buffer 2 physical address */
81};
82#endif /* CONFIG_MMC_DW_IDMAC */
83
Seungwon Jeon0976f162013-08-31 00:12:42 +090084static const u8 tuning_blk_pattern_4bit[] = {
85 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
93};
Will Newtonf95f3852011-01-02 01:11:59 -050094
Seungwon Jeon0976f162013-08-31 00:12:42 +090095static const u8 tuning_blk_pattern_8bit[] = {
96 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
Will Newtonf95f3852011-01-02 01:11:59 -0500112};
113
Sonny Rao3a33a942014-08-04 18:19:50 -0700114static bool dw_mci_reset(struct dw_mci *host);
Seungwon Jeon31bff452013-08-31 00:14:23 +0900115
Will Newtonf95f3852011-01-02 01:11:59 -0500116#if defined(CONFIG_DEBUG_FS)
117static int dw_mci_req_show(struct seq_file *s, void *v)
118{
119 struct dw_mci_slot *slot = s->private;
120 struct mmc_request *mrq;
121 struct mmc_command *cmd;
122 struct mmc_command *stop;
123 struct mmc_data *data;
124
125 /* Make sure we get a consistent snapshot */
126 spin_lock_bh(&slot->host->lock);
127 mrq = slot->mrq;
128
129 if (mrq) {
130 cmd = mrq->cmd;
131 data = mrq->data;
132 stop = mrq->stop;
133
134 if (cmd)
135 seq_printf(s,
136 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
137 cmd->opcode, cmd->arg, cmd->flags,
138 cmd->resp[0], cmd->resp[1], cmd->resp[2],
139 cmd->resp[2], cmd->error);
140 if (data)
141 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
142 data->bytes_xfered, data->blocks,
143 data->blksz, data->flags, data->error);
144 if (stop)
145 seq_printf(s,
146 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
147 stop->opcode, stop->arg, stop->flags,
148 stop->resp[0], stop->resp[1], stop->resp[2],
149 stop->resp[2], stop->error);
150 }
151
152 spin_unlock_bh(&slot->host->lock);
153
154 return 0;
155}
156
157static int dw_mci_req_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, dw_mci_req_show, inode->i_private);
160}
161
162static const struct file_operations dw_mci_req_fops = {
163 .owner = THIS_MODULE,
164 .open = dw_mci_req_open,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = single_release,
168};
169
170static int dw_mci_regs_show(struct seq_file *s, void *v)
171{
172 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
173 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
174 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
175 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
176 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
177 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
178
179 return 0;
180}
181
182static int dw_mci_regs_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, dw_mci_regs_show, inode->i_private);
185}
186
187static const struct file_operations dw_mci_regs_fops = {
188 .owner = THIS_MODULE,
189 .open = dw_mci_regs_open,
190 .read = seq_read,
191 .llseek = seq_lseek,
192 .release = single_release,
193};
194
195static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
196{
197 struct mmc_host *mmc = slot->mmc;
198 struct dw_mci *host = slot->host;
199 struct dentry *root;
200 struct dentry *node;
201
202 root = mmc->debugfs_root;
203 if (!root)
204 return;
205
206 node = debugfs_create_file("regs", S_IRUSR, root, host,
207 &dw_mci_regs_fops);
208 if (!node)
209 goto err;
210
211 node = debugfs_create_file("req", S_IRUSR, root, slot,
212 &dw_mci_req_fops);
213 if (!node)
214 goto err;
215
216 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
217 if (!node)
218 goto err;
219
220 node = debugfs_create_x32("pending_events", S_IRUSR, root,
221 (u32 *)&host->pending_events);
222 if (!node)
223 goto err;
224
225 node = debugfs_create_x32("completed_events", S_IRUSR, root,
226 (u32 *)&host->completed_events);
227 if (!node)
228 goto err;
229
230 return;
231
232err:
233 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
234}
235#endif /* defined(CONFIG_DEBUG_FS) */
236
Will Newtonf95f3852011-01-02 01:11:59 -0500237static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
238{
239 struct mmc_data *data;
Thomas Abraham800d78b2012-09-17 18:16:42 +0000240 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000241 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Will Newtonf95f3852011-01-02 01:11:59 -0500242 u32 cmdr;
243 cmd->error = -EINPROGRESS;
244
245 cmdr = cmd->opcode;
246
Seungwon Jeon90c21432013-08-31 00:14:05 +0900247 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
248 cmd->opcode == MMC_GO_IDLE_STATE ||
249 cmd->opcode == MMC_GO_INACTIVE_STATE ||
250 (cmd->opcode == SD_IO_RW_DIRECT &&
251 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
Will Newtonf95f3852011-01-02 01:11:59 -0500252 cmdr |= SDMMC_CMD_STOP;
Jaehoon Chung4a1b27a2014-03-03 11:36:44 +0900253 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
254 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500255
256 if (cmd->flags & MMC_RSP_PRESENT) {
257 /* We expect a response, so set this bit */
258 cmdr |= SDMMC_CMD_RESP_EXP;
259 if (cmd->flags & MMC_RSP_136)
260 cmdr |= SDMMC_CMD_RESP_LONG;
261 }
262
263 if (cmd->flags & MMC_RSP_CRC)
264 cmdr |= SDMMC_CMD_RESP_CRC;
265
266 data = cmd->data;
267 if (data) {
268 cmdr |= SDMMC_CMD_DAT_EXP;
269 if (data->flags & MMC_DATA_STREAM)
270 cmdr |= SDMMC_CMD_STRM_MODE;
271 if (data->flags & MMC_DATA_WRITE)
272 cmdr |= SDMMC_CMD_DAT_WR;
273 }
274
James Hogancb27a842012-10-16 09:43:08 +0100275 if (drv_data && drv_data->prepare_command)
276 drv_data->prepare_command(slot->host, &cmdr);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000277
Will Newtonf95f3852011-01-02 01:11:59 -0500278 return cmdr;
279}
280
Seungwon Jeon90c21432013-08-31 00:14:05 +0900281static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
282{
283 struct mmc_command *stop;
284 u32 cmdr;
285
286 if (!cmd->data)
287 return 0;
288
289 stop = &host->stop_abort;
290 cmdr = cmd->opcode;
291 memset(stop, 0, sizeof(struct mmc_command));
292
293 if (cmdr == MMC_READ_SINGLE_BLOCK ||
294 cmdr == MMC_READ_MULTIPLE_BLOCK ||
295 cmdr == MMC_WRITE_BLOCK ||
296 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
297 stop->opcode = MMC_STOP_TRANSMISSION;
298 stop->arg = 0;
299 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
300 } else if (cmdr == SD_IO_RW_EXTENDED) {
301 stop->opcode = SD_IO_RW_DIRECT;
302 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
303 ((cmd->arg >> 28) & 0x7);
304 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
305 } else {
306 return 0;
307 }
308
309 cmdr = stop->opcode | SDMMC_CMD_STOP |
310 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
311
312 return cmdr;
313}
314
Will Newtonf95f3852011-01-02 01:11:59 -0500315static void dw_mci_start_command(struct dw_mci *host,
316 struct mmc_command *cmd, u32 cmd_flags)
317{
318 host->cmd = cmd;
Thomas Abraham4a909202012-09-17 18:16:35 +0000319 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500320 "start command: ARGR=0x%08x CMDR=0x%08x\n",
321 cmd->arg, cmd_flags);
322
323 mci_writel(host, CMDARG, cmd->arg);
324 wmb();
325
326 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
327}
328
Seungwon Jeon90c21432013-08-31 00:14:05 +0900329static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
Will Newtonf95f3852011-01-02 01:11:59 -0500330{
Seungwon Jeon90c21432013-08-31 00:14:05 +0900331 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
332 dw_mci_start_command(host, stop, host->stop_cmdr);
Will Newtonf95f3852011-01-02 01:11:59 -0500333}
334
335/* DMA interface functions */
336static void dw_mci_stop_dma(struct dw_mci *host)
337{
James Hogan03e8cb532011-06-29 09:28:43 +0100338 if (host->using_dma) {
Will Newtonf95f3852011-01-02 01:11:59 -0500339 host->dma_ops->stop(host);
340 host->dma_ops->cleanup(host);
Will Newtonf95f3852011-01-02 01:11:59 -0500341 }
Seungwon Jeonaa50f252013-08-31 00:14:38 +0900342
343 /* Data transfer was stopped by the interrupt handler */
344 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -0500345}
346
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900347static int dw_mci_get_dma_dir(struct mmc_data *data)
348{
349 if (data->flags & MMC_DATA_WRITE)
350 return DMA_TO_DEVICE;
351 else
352 return DMA_FROM_DEVICE;
353}
354
Jaehoon Chung9beee912012-02-16 11:19:38 +0900355#ifdef CONFIG_MMC_DW_IDMAC
Will Newtonf95f3852011-01-02 01:11:59 -0500356static void dw_mci_dma_cleanup(struct dw_mci *host)
357{
358 struct mmc_data *data = host->data;
359
360 if (data)
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900361 if (!data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000362 dma_unmap_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900363 data->sg,
364 data->sg_len,
365 dw_mci_get_dma_dir(data));
Will Newtonf95f3852011-01-02 01:11:59 -0500366}
367
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900368static void dw_mci_idmac_reset(struct dw_mci *host)
369{
370 u32 bmod = mci_readl(host, BMOD);
371 /* Software reset of DMA */
372 bmod |= SDMMC_IDMAC_SWRESET;
373 mci_writel(host, BMOD, bmod);
374}
375
Will Newtonf95f3852011-01-02 01:11:59 -0500376static void dw_mci_idmac_stop_dma(struct dw_mci *host)
377{
378 u32 temp;
379
380 /* Disable and reset the IDMAC interface */
381 temp = mci_readl(host, CTRL);
382 temp &= ~SDMMC_CTRL_USE_IDMAC;
383 temp |= SDMMC_CTRL_DMA_RESET;
384 mci_writel(host, CTRL, temp);
385
386 /* Stop the IDMAC running */
387 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900388 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900389 temp |= SDMMC_IDMAC_SWRESET;
Will Newtonf95f3852011-01-02 01:11:59 -0500390 mci_writel(host, BMOD, temp);
391}
392
393static void dw_mci_idmac_complete_dma(struct dw_mci *host)
394{
395 struct mmc_data *data = host->data;
396
Thomas Abraham4a909202012-09-17 18:16:35 +0000397 dev_vdbg(host->dev, "DMA complete\n");
Will Newtonf95f3852011-01-02 01:11:59 -0500398
399 host->dma_ops->cleanup(host);
400
401 /*
402 * If the card was removed, data will be NULL. No point in trying to
403 * send the stop command or waiting for NBUSY in this case.
404 */
405 if (data) {
406 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
407 tasklet_schedule(&host->tasklet);
408 }
409}
410
411static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
412 unsigned int sg_len)
413{
414 int i;
415 struct idmac_desc *desc = host->sg_cpu;
416
417 for (i = 0; i < sg_len; i++, desc++) {
418 unsigned int length = sg_dma_len(&data->sg[i]);
419 u32 mem_addr = sg_dma_address(&data->sg[i]);
420
421 /* Set the OWN bit and disable interrupts for this descriptor */
422 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
423
424 /* Buffer length */
425 IDMAC_SET_BUFFER1_SIZE(desc, length);
426
427 /* Physical address to DMA to/from */
428 desc->des2 = mem_addr;
429 }
430
431 /* Set first descriptor */
432 desc = host->sg_cpu;
433 desc->des0 |= IDMAC_DES0_FD;
434
435 /* Set last descriptor */
436 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
437 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
438 desc->des0 |= IDMAC_DES0_LD;
439
440 wmb();
441}
442
443static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
444{
445 u32 temp;
446
447 dw_mci_translate_sglist(host, host->data, sg_len);
448
449 /* Select IDMAC interface */
450 temp = mci_readl(host, CTRL);
451 temp |= SDMMC_CTRL_USE_IDMAC;
452 mci_writel(host, CTRL, temp);
453
454 wmb();
455
456 /* Enable the IDMAC */
457 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900458 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
Will Newtonf95f3852011-01-02 01:11:59 -0500459 mci_writel(host, BMOD, temp);
460
461 /* Start it running */
462 mci_writel(host, PLDMND, 1);
463}
464
465static int dw_mci_idmac_init(struct dw_mci *host)
466{
467 struct idmac_desc *p;
Seungwon Jeon897b69e2012-09-19 13:58:31 +0800468 int i;
Will Newtonf95f3852011-01-02 01:11:59 -0500469
470 /* Number of descriptors in the ring buffer */
471 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
472
473 /* Forward link the descriptor list */
474 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
475 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
476
477 /* Set the last descriptor as the end-of-ring descriptor */
478 p->des3 = host->sg_dma;
479 p->des0 = IDMAC_DES0_ER;
480
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900481 dw_mci_idmac_reset(host);
Seungwon Jeon141a7122012-05-22 13:01:03 +0900482
Will Newtonf95f3852011-01-02 01:11:59 -0500483 /* Mask out interrupts - get Tx & Rx complete only */
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +0900484 mci_writel(host, IDSTS, IDMAC_INT_CLR);
Will Newtonf95f3852011-01-02 01:11:59 -0500485 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
486 SDMMC_IDMAC_INT_TI);
487
488 /* Set the descriptor base address */
489 mci_writel(host, DBADDR, host->sg_dma);
490 return 0;
491}
492
Arnd Bergmann8e2b36e2012-11-06 22:55:31 +0100493static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
Seungwon Jeon885c3e82012-02-20 11:01:43 +0900494 .init = dw_mci_idmac_init,
495 .start = dw_mci_idmac_start_dma,
496 .stop = dw_mci_idmac_stop_dma,
497 .complete = dw_mci_idmac_complete_dma,
498 .cleanup = dw_mci_dma_cleanup,
499};
500#endif /* CONFIG_MMC_DW_IDMAC */
501
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900502static int dw_mci_pre_dma_transfer(struct dw_mci *host,
503 struct mmc_data *data,
504 bool next)
Will Newtonf95f3852011-01-02 01:11:59 -0500505{
506 struct scatterlist *sg;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900507 unsigned int i, sg_len;
Will Newtonf95f3852011-01-02 01:11:59 -0500508
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900509 if (!next && data->host_cookie)
510 return data->host_cookie;
Will Newtonf95f3852011-01-02 01:11:59 -0500511
512 /*
513 * We don't do DMA on "complex" transfers, i.e. with
514 * non-word-aligned buffers or lengths. Also, we don't bother
515 * with all the DMA setup overhead for short transfers.
516 */
517 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
518 return -EINVAL;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900519
Will Newtonf95f3852011-01-02 01:11:59 -0500520 if (data->blksz & 3)
521 return -EINVAL;
522
523 for_each_sg(data->sg, sg, data->sg_len, i) {
524 if (sg->offset & 3 || sg->length & 3)
525 return -EINVAL;
526 }
527
Thomas Abraham4a909202012-09-17 18:16:35 +0000528 sg_len = dma_map_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900529 data->sg,
530 data->sg_len,
531 dw_mci_get_dma_dir(data));
532 if (sg_len == 0)
533 return -EINVAL;
534
535 if (next)
536 data->host_cookie = sg_len;
537
538 return sg_len;
539}
540
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900541static void dw_mci_pre_req(struct mmc_host *mmc,
542 struct mmc_request *mrq,
543 bool is_first_req)
544{
545 struct dw_mci_slot *slot = mmc_priv(mmc);
546 struct mmc_data *data = mrq->data;
547
548 if (!slot->host->use_dma || !data)
549 return;
550
551 if (data->host_cookie) {
552 data->host_cookie = 0;
553 return;
554 }
555
556 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
557 data->host_cookie = 0;
558}
559
560static void dw_mci_post_req(struct mmc_host *mmc,
561 struct mmc_request *mrq,
562 int err)
563{
564 struct dw_mci_slot *slot = mmc_priv(mmc);
565 struct mmc_data *data = mrq->data;
566
567 if (!slot->host->use_dma || !data)
568 return;
569
570 if (data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000571 dma_unmap_sg(slot->host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900572 data->sg,
573 data->sg_len,
574 dw_mci_get_dma_dir(data));
575 data->host_cookie = 0;
576}
577
Seungwon Jeon52426892013-08-31 00:13:42 +0900578static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
579{
580#ifdef CONFIG_MMC_DW_IDMAC
581 unsigned int blksz = data->blksz;
582 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
583 u32 fifo_width = 1 << host->data_shift;
584 u32 blksz_depth = blksz / fifo_width, fifoth_val;
585 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
586 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
587
588 tx_wmark = (host->fifo_depth) / 2;
589 tx_wmark_invers = host->fifo_depth - tx_wmark;
590
591 /*
592 * MSIZE is '1',
593 * if blksz is not a multiple of the FIFO width
594 */
595 if (blksz % fifo_width) {
596 msize = 0;
597 rx_wmark = 1;
598 goto done;
599 }
600
601 do {
602 if (!((blksz_depth % mszs[idx]) ||
603 (tx_wmark_invers % mszs[idx]))) {
604 msize = idx;
605 rx_wmark = mszs[idx] - 1;
606 break;
607 }
608 } while (--idx > 0);
609 /*
610 * If idx is '0', it won't be tried
611 * Thus, initial values are uesed
612 */
613done:
614 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
615 mci_writel(host, FIFOTH, fifoth_val);
616#endif
617}
618
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900619static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
620{
621 unsigned int blksz = data->blksz;
622 u32 blksz_depth, fifo_depth;
623 u16 thld_size;
624
625 WARN_ON(!(data->flags & MMC_DATA_READ));
626
627 if (host->timing != MMC_TIMING_MMC_HS200 &&
628 host->timing != MMC_TIMING_UHS_SDR104)
629 goto disable;
630
631 blksz_depth = blksz / (1 << host->data_shift);
632 fifo_depth = host->fifo_depth;
633
634 if (blksz_depth > fifo_depth)
635 goto disable;
636
637 /*
638 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
639 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
640 * Currently just choose blksz.
641 */
642 thld_size = blksz;
643 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
644 return;
645
646disable:
647 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
648}
649
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900650static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
651{
652 int sg_len;
653 u32 temp;
654
655 host->using_dma = 0;
656
657 /* If we don't have a channel, we can't do DMA */
658 if (!host->use_dma)
659 return -ENODEV;
660
661 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900662 if (sg_len < 0) {
663 host->dma_ops->stop(host);
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900664 return sg_len;
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900665 }
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900666
James Hogan03e8cb532011-06-29 09:28:43 +0100667 host->using_dma = 1;
668
Thomas Abraham4a909202012-09-17 18:16:35 +0000669 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500670 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
671 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
672 sg_len);
673
Seungwon Jeon52426892013-08-31 00:13:42 +0900674 /*
675 * Decide the MSIZE and RX/TX Watermark.
676 * If current block size is same with previous size,
677 * no need to update fifoth.
678 */
679 if (host->prev_blksz != data->blksz)
680 dw_mci_adjust_fifoth(host, data);
681
Will Newtonf95f3852011-01-02 01:11:59 -0500682 /* Enable the DMA interface */
683 temp = mci_readl(host, CTRL);
684 temp |= SDMMC_CTRL_DMA_ENABLE;
685 mci_writel(host, CTRL, temp);
686
687 /* Disable RX/TX IRQs, let DMA handle it */
688 temp = mci_readl(host, INTMASK);
689 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
690 mci_writel(host, INTMASK, temp);
691
692 host->dma_ops->start(host, sg_len);
693
694 return 0;
695}
696
697static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
698{
699 u32 temp;
700
701 data->error = -EINPROGRESS;
702
703 WARN_ON(host->data);
704 host->sg = NULL;
705 host->data = data;
706
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900707 if (data->flags & MMC_DATA_READ) {
James Hogan55c5efbc2011-06-29 09:29:58 +0100708 host->dir_status = DW_MCI_RECV_STATUS;
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900709 dw_mci_ctrl_rd_thld(host, data);
710 } else {
James Hogan55c5efbc2011-06-29 09:29:58 +0100711 host->dir_status = DW_MCI_SEND_STATUS;
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900712 }
James Hogan55c5efbc2011-06-29 09:29:58 +0100713
Will Newtonf95f3852011-01-02 01:11:59 -0500714 if (dw_mci_submit_data_dma(host, data)) {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +0900715 int flags = SG_MITER_ATOMIC;
716 if (host->data->flags & MMC_DATA_READ)
717 flags |= SG_MITER_TO_SG;
718 else
719 flags |= SG_MITER_FROM_SG;
720
721 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
Will Newtonf95f3852011-01-02 01:11:59 -0500722 host->sg = data->sg;
James Hogan34b664a2011-06-24 13:57:56 +0100723 host->part_buf_start = 0;
724 host->part_buf_count = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500725
James Hoganb40af3a2011-06-24 13:54:06 +0100726 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
Will Newtonf95f3852011-01-02 01:11:59 -0500727 temp = mci_readl(host, INTMASK);
728 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
729 mci_writel(host, INTMASK, temp);
730
731 temp = mci_readl(host, CTRL);
732 temp &= ~SDMMC_CTRL_DMA_ENABLE;
733 mci_writel(host, CTRL, temp);
Seungwon Jeon52426892013-08-31 00:13:42 +0900734
735 /*
736 * Use the initial fifoth_val for PIO mode.
737 * If next issued data may be transfered by DMA mode,
738 * prev_blksz should be invalidated.
739 */
740 mci_writel(host, FIFOTH, host->fifoth_val);
741 host->prev_blksz = 0;
742 } else {
743 /*
744 * Keep the current block size.
745 * It will be used to decide whether to update
746 * fifoth register next time.
747 */
748 host->prev_blksz = data->blksz;
Will Newtonf95f3852011-01-02 01:11:59 -0500749 }
750}
751
752static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
753{
754 struct dw_mci *host = slot->host;
755 unsigned long timeout = jiffies + msecs_to_jiffies(500);
756 unsigned int cmd_status = 0;
757
758 mci_writel(host, CMDARG, arg);
759 wmb();
760 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
761
762 while (time_before(jiffies, timeout)) {
763 cmd_status = mci_readl(host, CMD);
764 if (!(cmd_status & SDMMC_CMD_START))
765 return;
766 }
767 dev_err(&slot->mmc->class_dev,
768 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
769 cmd, arg, cmd_status);
770}
771
Abhilash Kesavanab269122012-11-19 10:26:21 +0530772static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
Will Newtonf95f3852011-01-02 01:11:59 -0500773{
774 struct dw_mci *host = slot->host;
Doug Andersonfdf492a2013-08-31 00:11:43 +0900775 unsigned int clock = slot->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500776 u32 div;
Doug Anderson9623b5b2012-07-25 08:33:17 -0700777 u32 clk_en_a;
Will Newtonf95f3852011-01-02 01:11:59 -0500778
Doug Andersonfdf492a2013-08-31 00:11:43 +0900779 if (!clock) {
780 mci_writel(host, CLKENA, 0);
781 mci_send_cmd(slot,
782 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
783 } else if (clock != host->current_speed || force_clkinit) {
784 div = host->bus_hz / clock;
785 if (host->bus_hz % clock && host->bus_hz > clock)
Will Newtonf95f3852011-01-02 01:11:59 -0500786 /*
787 * move the + 1 after the divide to prevent
788 * over-clocking the card.
789 */
Seungwon Jeone4199902012-05-22 13:01:21 +0900790 div += 1;
791
Doug Andersonfdf492a2013-08-31 00:11:43 +0900792 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500793
Doug Andersonfdf492a2013-08-31 00:11:43 +0900794 if ((clock << div) != slot->__clk_old || force_clkinit)
795 dev_info(&slot->mmc->class_dev,
796 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
797 slot->id, host->bus_hz, clock,
798 div ? ((host->bus_hz / div) >> 1) :
799 host->bus_hz, div);
Will Newtonf95f3852011-01-02 01:11:59 -0500800
801 /* disable clock */
802 mci_writel(host, CLKENA, 0);
803 mci_writel(host, CLKSRC, 0);
804
805 /* inform CIU */
806 mci_send_cmd(slot,
807 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
808
809 /* set clock to desired speed */
810 mci_writel(host, CLKDIV, div);
811
812 /* inform CIU */
813 mci_send_cmd(slot,
814 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
815
Doug Anderson9623b5b2012-07-25 08:33:17 -0700816 /* enable clock; only low power if no SDIO */
817 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
818 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
819 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
820 mci_writel(host, CLKENA, clk_en_a);
Will Newtonf95f3852011-01-02 01:11:59 -0500821
822 /* inform CIU */
823 mci_send_cmd(slot,
824 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
825
Doug Andersonfdf492a2013-08-31 00:11:43 +0900826 /* keep the clock with reflecting clock dividor */
827 slot->__clk_old = clock << div;
Will Newtonf95f3852011-01-02 01:11:59 -0500828 }
829
Doug Andersonfdf492a2013-08-31 00:11:43 +0900830 host->current_speed = clock;
831
Will Newtonf95f3852011-01-02 01:11:59 -0500832 /* Set the current slot bus width */
Seungwon Jeon1d56c452011-06-20 17:23:53 +0900833 mci_writel(host, CTYPE, (slot->ctype << slot->id));
Will Newtonf95f3852011-01-02 01:11:59 -0500834}
835
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900836static void __dw_mci_start_request(struct dw_mci *host,
837 struct dw_mci_slot *slot,
838 struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -0500839{
840 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -0500841 struct mmc_data *data;
842 u32 cmdflags;
843
844 mrq = slot->mrq;
Will Newtonf95f3852011-01-02 01:11:59 -0500845
Will Newtonf95f3852011-01-02 01:11:59 -0500846 host->cur_slot = slot;
847 host->mrq = mrq;
848
849 host->pending_events = 0;
850 host->completed_events = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +0900851 host->cmd_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500852 host->data_status = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +0900853 host->dir_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500854
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900855 data = cmd->data;
Will Newtonf95f3852011-01-02 01:11:59 -0500856 if (data) {
Jaehoon Chungf16afa82014-03-03 11:36:45 +0900857 mci_writel(host, TMOUT, 0xFFFFFFFF);
Will Newtonf95f3852011-01-02 01:11:59 -0500858 mci_writel(host, BYTCNT, data->blksz*data->blocks);
859 mci_writel(host, BLKSIZ, data->blksz);
860 }
861
Will Newtonf95f3852011-01-02 01:11:59 -0500862 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
863
864 /* this is the first command, send the initialization clock */
865 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
866 cmdflags |= SDMMC_CMD_INIT;
867
868 if (data) {
869 dw_mci_submit_data(host, data);
870 wmb();
871 }
872
873 dw_mci_start_command(host, cmd, cmdflags);
874
875 if (mrq->stop)
876 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +0900877 else
878 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
Will Newtonf95f3852011-01-02 01:11:59 -0500879}
880
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900881static void dw_mci_start_request(struct dw_mci *host,
882 struct dw_mci_slot *slot)
883{
884 struct mmc_request *mrq = slot->mrq;
885 struct mmc_command *cmd;
886
887 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
888 __dw_mci_start_request(host, slot, cmd);
889}
890
James Hogan7456caa2011-06-24 13:55:10 +0100891/* must be called with host->lock held */
Will Newtonf95f3852011-01-02 01:11:59 -0500892static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
893 struct mmc_request *mrq)
894{
895 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
896 host->state);
897
Will Newtonf95f3852011-01-02 01:11:59 -0500898 slot->mrq = mrq;
899
900 if (host->state == STATE_IDLE) {
901 host->state = STATE_SENDING_CMD;
902 dw_mci_start_request(host, slot);
903 } else {
904 list_add_tail(&slot->queue_node, &host->queue);
905 }
Will Newtonf95f3852011-01-02 01:11:59 -0500906}
907
908static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
909{
910 struct dw_mci_slot *slot = mmc_priv(mmc);
911 struct dw_mci *host = slot->host;
912
913 WARN_ON(slot->mrq);
914
James Hogan7456caa2011-06-24 13:55:10 +0100915 /*
916 * The check for card presence and queueing of the request must be
917 * atomic, otherwise the card could be removed in between and the
918 * request wouldn't fail until another card was inserted.
919 */
920 spin_lock_bh(&host->lock);
921
Will Newtonf95f3852011-01-02 01:11:59 -0500922 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
James Hogan7456caa2011-06-24 13:55:10 +0100923 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500924 mrq->cmd->error = -ENOMEDIUM;
925 mmc_request_done(mmc, mrq);
926 return;
927 }
928
Will Newtonf95f3852011-01-02 01:11:59 -0500929 dw_mci_queue_request(host, slot, mrq);
James Hogan7456caa2011-06-24 13:55:10 +0100930
931 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500932}
933
934static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
935{
936 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000937 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900938 u32 regs;
Yuvaraj CD51da2242014-08-22 19:17:50 +0530939 int ret;
Will Newtonf95f3852011-01-02 01:11:59 -0500940
Will Newtonf95f3852011-01-02 01:11:59 -0500941 switch (ios->bus_width) {
Will Newtonf95f3852011-01-02 01:11:59 -0500942 case MMC_BUS_WIDTH_4:
943 slot->ctype = SDMMC_CTYPE_4BIT;
944 break;
Jaehoon Chungc9b2a062011-02-17 16:12:38 +0900945 case MMC_BUS_WIDTH_8:
946 slot->ctype = SDMMC_CTYPE_8BIT;
947 break;
Jaehoon Chungb2f7cb42012-11-08 17:35:31 +0900948 default:
949 /* set default 1 bit mode */
950 slot->ctype = SDMMC_CTYPE_1BIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500951 }
952
Seungwon Jeon3f514292012-01-02 16:00:02 +0900953 regs = mci_readl(slot->host, UHS_REG);
954
Jaehoon Chung41babf72011-02-24 13:46:11 +0900955 /* DDR mode set */
Seungwon Jeoncab3a802014-03-14 21:12:43 +0900956 if (ios->timing == MMC_TIMING_MMC_DDR52)
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900957 regs |= ((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900958 else
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900959 regs &= ~((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900960
961 mci_writel(slot->host, UHS_REG, regs);
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900962 slot->host->timing = ios->timing;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900963
Doug Andersonfdf492a2013-08-31 00:11:43 +0900964 /*
965 * Use mirror of ios->clock to prevent race with mmc
966 * core ios update when finding the minimum.
967 */
968 slot->clock = ios->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500969
James Hogancb27a842012-10-16 09:43:08 +0100970 if (drv_data && drv_data->set_ios)
971 drv_data->set_ios(slot->host, ios);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000972
Jaehoon Chungbf7cb222012-11-08 17:35:29 +0900973 /* Slot specific timing and width adjustment */
974 dw_mci_setup_bus(slot, false);
975
Will Newtonf95f3852011-01-02 01:11:59 -0500976 switch (ios->power_mode) {
977 case MMC_POWER_UP:
Yuvaraj CD51da2242014-08-22 19:17:50 +0530978 if (!IS_ERR(mmc->supply.vmmc)) {
979 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
980 ios->vdd);
981 if (ret) {
982 dev_err(slot->host->dev,
983 "failed to enable vmmc regulator\n");
984 /*return, if failed turn on vmmc*/
985 return;
986 }
987 }
988 if (!IS_ERR(mmc->supply.vqmmc) && !slot->host->vqmmc_enabled) {
989 ret = regulator_enable(mmc->supply.vqmmc);
990 if (ret < 0)
991 dev_err(slot->host->dev,
992 "failed to enable vqmmc regulator\n");
993 else
994 slot->host->vqmmc_enabled = true;
995 }
Will Newtonf95f3852011-01-02 01:11:59 -0500996 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900997 regs = mci_readl(slot->host, PWREN);
998 regs |= (1 << slot->id);
999 mci_writel(slot->host, PWREN, regs);
James Hogane6f34e22013-03-12 10:43:32 +00001000 break;
1001 case MMC_POWER_OFF:
Yuvaraj CD51da2242014-08-22 19:17:50 +05301002 if (!IS_ERR(mmc->supply.vmmc))
1003 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1004
1005 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) {
1006 regulator_disable(mmc->supply.vqmmc);
1007 slot->host->vqmmc_enabled = false;
1008 }
1009
Jaehoon Chung4366dcc2013-03-26 21:36:14 +09001010 regs = mci_readl(slot->host, PWREN);
1011 regs &= ~(1 << slot->id);
1012 mci_writel(slot->host, PWREN, regs);
Will Newtonf95f3852011-01-02 01:11:59 -05001013 break;
1014 default:
1015 break;
1016 }
1017}
1018
1019static int dw_mci_get_ro(struct mmc_host *mmc)
1020{
1021 int read_only;
1022 struct dw_mci_slot *slot = mmc_priv(mmc);
Jaehoon Chung9795a842014-03-03 11:36:46 +09001023 int gpio_ro = mmc_gpio_get_ro(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -05001024
1025 /* Use platform get_ro function, else try on board write protect */
Jaehoon Chung26375b52014-08-07 16:37:58 +09001026 if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
1027 (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
Thomas Abrahamb4967aa2012-09-17 18:16:39 +00001028 read_only = 0;
Jaehoon Chung9795a842014-03-03 11:36:46 +09001029 else if (!IS_ERR_VALUE(gpio_ro))
1030 read_only = gpio_ro;
Will Newtonf95f3852011-01-02 01:11:59 -05001031 else
1032 read_only =
1033 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1034
1035 dev_dbg(&mmc->class_dev, "card is %s\n",
1036 read_only ? "read-only" : "read-write");
1037
1038 return read_only;
1039}
1040
1041static int dw_mci_get_cd(struct mmc_host *mmc)
1042{
1043 int present;
1044 struct dw_mci_slot *slot = mmc_priv(mmc);
1045 struct dw_mci_board *brd = slot->host->pdata;
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001046 struct dw_mci *host = slot->host;
1047 int gpio_cd = mmc_gpio_get_cd(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -05001048
1049 /* Use platform get_cd function, else try onboard card detect */
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09001050 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1051 present = 1;
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001052 else if (!IS_ERR_VALUE(gpio_cd))
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001053 present = gpio_cd;
Will Newtonf95f3852011-01-02 01:11:59 -05001054 else
1055 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1056 == 0 ? 1 : 0;
1057
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001058 spin_lock_bh(&host->lock);
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001059 if (present) {
1060 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001061 dev_dbg(&mmc->class_dev, "card is present\n");
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001062 } else {
1063 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001064 dev_dbg(&mmc->class_dev, "card is not present\n");
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001065 }
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001066 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -05001067
1068 return present;
1069}
1070
Doug Anderson9623b5b2012-07-25 08:33:17 -07001071/*
1072 * Disable lower power mode.
1073 *
1074 * Low power mode will stop the card clock when idle. According to the
1075 * description of the CLKENA register we should disable low power mode
1076 * for SDIO cards if we need SDIO interrupts to work.
1077 *
1078 * This function is fast if low power mode is already disabled.
1079 */
1080static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1081{
1082 struct dw_mci *host = slot->host;
1083 u32 clk_en_a;
1084 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1085
1086 clk_en_a = mci_readl(host, CLKENA);
1087
1088 if (clk_en_a & clken_low_pwr) {
1089 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1090 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1091 SDMMC_CMD_PRV_DAT_WAIT, 0);
1092 }
1093}
1094
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301095static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1096{
1097 struct dw_mci_slot *slot = mmc_priv(mmc);
1098 struct dw_mci *host = slot->host;
1099 u32 int_mask;
1100
1101 /* Enable/disable Slot Specific SDIO interrupt */
1102 int_mask = mci_readl(host, INTMASK);
1103 if (enb) {
Doug Anderson9623b5b2012-07-25 08:33:17 -07001104 /*
1105 * Turn off low power mode if it was enabled. This is a bit of
1106 * a heavy operation and we disable / enable IRQs a lot, so
1107 * we'll leave low power mode disabled and it will get
1108 * re-enabled again in dw_mci_setup_bus().
1109 */
1110 dw_mci_disable_low_power(slot);
1111
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301112 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +09001113 (int_mask | SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301114 } else {
1115 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +09001116 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301117 }
1118}
1119
Seungwon Jeon0976f162013-08-31 00:12:42 +09001120static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1121{
1122 struct dw_mci_slot *slot = mmc_priv(mmc);
1123 struct dw_mci *host = slot->host;
1124 const struct dw_mci_drv_data *drv_data = host->drv_data;
1125 struct dw_mci_tuning_data tuning_data;
1126 int err = -ENOSYS;
1127
1128 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1129 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1130 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1131 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1132 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1133 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1134 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1135 } else {
1136 return -EINVAL;
1137 }
1138 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1139 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1140 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1141 } else {
1142 dev_err(host->dev,
1143 "Undefined command(%d) for tuning\n", opcode);
1144 return -EINVAL;
1145 }
1146
1147 if (drv_data && drv_data->execute_tuning)
1148 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1149 return err;
1150}
1151
Will Newtonf95f3852011-01-02 01:11:59 -05001152static const struct mmc_host_ops dw_mci_ops = {
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301153 .request = dw_mci_request,
Seungwon Jeon9aa51402012-02-06 16:55:07 +09001154 .pre_req = dw_mci_pre_req,
1155 .post_req = dw_mci_post_req,
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301156 .set_ios = dw_mci_set_ios,
1157 .get_ro = dw_mci_get_ro,
1158 .get_cd = dw_mci_get_cd,
1159 .enable_sdio_irq = dw_mci_enable_sdio_irq,
Seungwon Jeon0976f162013-08-31 00:12:42 +09001160 .execute_tuning = dw_mci_execute_tuning,
Will Newtonf95f3852011-01-02 01:11:59 -05001161};
1162
1163static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1164 __releases(&host->lock)
1165 __acquires(&host->lock)
1166{
1167 struct dw_mci_slot *slot;
1168 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1169
1170 WARN_ON(host->cmd || host->data);
1171
1172 host->cur_slot->mrq = NULL;
1173 host->mrq = NULL;
1174 if (!list_empty(&host->queue)) {
1175 slot = list_entry(host->queue.next,
1176 struct dw_mci_slot, queue_node);
1177 list_del(&slot->queue_node);
Thomas Abraham4a909202012-09-17 18:16:35 +00001178 dev_vdbg(host->dev, "list not empty: %s is next\n",
Will Newtonf95f3852011-01-02 01:11:59 -05001179 mmc_hostname(slot->mmc));
1180 host->state = STATE_SENDING_CMD;
1181 dw_mci_start_request(host, slot);
1182 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00001183 dev_vdbg(host->dev, "list empty\n");
Will Newtonf95f3852011-01-02 01:11:59 -05001184 host->state = STATE_IDLE;
1185 }
1186
1187 spin_unlock(&host->lock);
1188 mmc_request_done(prev_mmc, mrq);
1189 spin_lock(&host->lock);
1190}
1191
Seungwon Jeone352c812013-08-31 00:14:17 +09001192static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -05001193{
1194 u32 status = host->cmd_status;
1195
1196 host->cmd_status = 0;
1197
1198 /* Read the response from the card (up to 16 bytes) */
1199 if (cmd->flags & MMC_RSP_PRESENT) {
1200 if (cmd->flags & MMC_RSP_136) {
1201 cmd->resp[3] = mci_readl(host, RESP0);
1202 cmd->resp[2] = mci_readl(host, RESP1);
1203 cmd->resp[1] = mci_readl(host, RESP2);
1204 cmd->resp[0] = mci_readl(host, RESP3);
1205 } else {
1206 cmd->resp[0] = mci_readl(host, RESP0);
1207 cmd->resp[1] = 0;
1208 cmd->resp[2] = 0;
1209 cmd->resp[3] = 0;
1210 }
1211 }
1212
1213 if (status & SDMMC_INT_RTO)
1214 cmd->error = -ETIMEDOUT;
1215 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1216 cmd->error = -EILSEQ;
1217 else if (status & SDMMC_INT_RESP_ERR)
1218 cmd->error = -EIO;
1219 else
1220 cmd->error = 0;
1221
1222 if (cmd->error) {
1223 /* newer ip versions need a delay between retries */
1224 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1225 mdelay(20);
Will Newtonf95f3852011-01-02 01:11:59 -05001226 }
Seungwon Jeone352c812013-08-31 00:14:17 +09001227
1228 return cmd->error;
1229}
1230
1231static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1232{
Seungwon Jeon31bff452013-08-31 00:14:23 +09001233 u32 status = host->data_status;
Seungwon Jeone352c812013-08-31 00:14:17 +09001234
1235 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1236 if (status & SDMMC_INT_DRTO) {
1237 data->error = -ETIMEDOUT;
1238 } else if (status & SDMMC_INT_DCRC) {
1239 data->error = -EILSEQ;
1240 } else if (status & SDMMC_INT_EBE) {
1241 if (host->dir_status ==
1242 DW_MCI_SEND_STATUS) {
1243 /*
1244 * No data CRC status was returned.
1245 * The number of bytes transferred
1246 * will be exaggerated in PIO mode.
1247 */
1248 data->bytes_xfered = 0;
1249 data->error = -ETIMEDOUT;
1250 } else if (host->dir_status ==
1251 DW_MCI_RECV_STATUS) {
1252 data->error = -EIO;
1253 }
1254 } else {
1255 /* SDMMC_INT_SBE is included */
1256 data->error = -EIO;
1257 }
1258
Doug Andersone6cc0122014-04-22 16:51:21 -07001259 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
Seungwon Jeone352c812013-08-31 00:14:17 +09001260
1261 /*
1262 * After an error, there may be data lingering
Seungwon Jeon31bff452013-08-31 00:14:23 +09001263 * in the FIFO
Seungwon Jeone352c812013-08-31 00:14:17 +09001264 */
Sonny Rao3a33a942014-08-04 18:19:50 -07001265 dw_mci_reset(host);
Seungwon Jeone352c812013-08-31 00:14:17 +09001266 } else {
1267 data->bytes_xfered = data->blocks * data->blksz;
1268 data->error = 0;
1269 }
1270
1271 return data->error;
Will Newtonf95f3852011-01-02 01:11:59 -05001272}
1273
1274static void dw_mci_tasklet_func(unsigned long priv)
1275{
1276 struct dw_mci *host = (struct dw_mci *)priv;
1277 struct mmc_data *data;
1278 struct mmc_command *cmd;
Seungwon Jeone352c812013-08-31 00:14:17 +09001279 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001280 enum dw_mci_state state;
1281 enum dw_mci_state prev_state;
Seungwon Jeone352c812013-08-31 00:14:17 +09001282 unsigned int err;
Will Newtonf95f3852011-01-02 01:11:59 -05001283
1284 spin_lock(&host->lock);
1285
1286 state = host->state;
1287 data = host->data;
Seungwon Jeone352c812013-08-31 00:14:17 +09001288 mrq = host->mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001289
1290 do {
1291 prev_state = state;
1292
1293 switch (state) {
1294 case STATE_IDLE:
1295 break;
1296
1297 case STATE_SENDING_CMD:
1298 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1299 &host->pending_events))
1300 break;
1301
1302 cmd = host->cmd;
1303 host->cmd = NULL;
1304 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001305 err = dw_mci_command_complete(host, cmd);
1306 if (cmd == mrq->sbc && !err) {
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001307 prev_state = state = STATE_SENDING_CMD;
1308 __dw_mci_start_request(host, host->cur_slot,
Seungwon Jeone352c812013-08-31 00:14:17 +09001309 mrq->cmd);
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001310 goto unlock;
1311 }
1312
Seungwon Jeone352c812013-08-31 00:14:17 +09001313 if (cmd->data && err) {
Seungwon Jeon71abb132013-08-31 00:13:59 +09001314 dw_mci_stop_dma(host);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001315 send_stop_abort(host, data);
1316 state = STATE_SENDING_STOP;
1317 break;
Seungwon Jeon71abb132013-08-31 00:13:59 +09001318 }
1319
Seungwon Jeone352c812013-08-31 00:14:17 +09001320 if (!cmd->data || err) {
1321 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05001322 goto unlock;
1323 }
1324
1325 prev_state = state = STATE_SENDING_DATA;
1326 /* fall through */
1327
1328 case STATE_SENDING_DATA:
Doug Anderson2aa35462014-08-13 08:13:43 -07001329 /*
1330 * We could get a data error and never a transfer
1331 * complete so we'd better check for it here.
1332 *
1333 * Note that we don't really care if we also got a
1334 * transfer complete; stopping the DMA and sending an
1335 * abort won't hurt.
1336 */
Will Newtonf95f3852011-01-02 01:11:59 -05001337 if (test_and_clear_bit(EVENT_DATA_ERROR,
1338 &host->pending_events)) {
1339 dw_mci_stop_dma(host);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001340 send_stop_abort(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001341 state = STATE_DATA_ERROR;
1342 break;
1343 }
1344
1345 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1346 &host->pending_events))
1347 break;
1348
1349 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
Doug Anderson2aa35462014-08-13 08:13:43 -07001350
1351 /*
1352 * Handle an EVENT_DATA_ERROR that might have shown up
1353 * before the transfer completed. This might not have
1354 * been caught by the check above because the interrupt
1355 * could have gone off between the previous check and
1356 * the check for transfer complete.
1357 *
1358 * Technically this ought not be needed assuming we
1359 * get a DATA_COMPLETE eventually (we'll notice the
1360 * error and end the request), but it shouldn't hurt.
1361 *
1362 * This has the advantage of sending the stop command.
1363 */
1364 if (test_and_clear_bit(EVENT_DATA_ERROR,
1365 &host->pending_events)) {
1366 dw_mci_stop_dma(host);
1367 send_stop_abort(host, data);
1368 state = STATE_DATA_ERROR;
1369 break;
1370 }
Will Newtonf95f3852011-01-02 01:11:59 -05001371 prev_state = state = STATE_DATA_BUSY;
Doug Anderson2aa35462014-08-13 08:13:43 -07001372
Will Newtonf95f3852011-01-02 01:11:59 -05001373 /* fall through */
1374
1375 case STATE_DATA_BUSY:
1376 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1377 &host->pending_events))
1378 break;
1379
1380 host->data = NULL;
1381 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001382 err = dw_mci_data_complete(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001383
Seungwon Jeone352c812013-08-31 00:14:17 +09001384 if (!err) {
1385 if (!data->stop || mrq->sbc) {
Sachin Kamat17c8bc82014-02-25 15:18:28 +05301386 if (mrq->sbc && data->stop)
Seungwon Jeone352c812013-08-31 00:14:17 +09001387 data->stop->error = 0;
1388 dw_mci_request_end(host, mrq);
1389 goto unlock;
Will Newtonf95f3852011-01-02 01:11:59 -05001390 }
Will Newtonf95f3852011-01-02 01:11:59 -05001391
Seungwon Jeon90c21432013-08-31 00:14:05 +09001392 /* stop command for open-ended transfer*/
Seungwon Jeone352c812013-08-31 00:14:17 +09001393 if (data->stop)
1394 send_stop_abort(host, data);
Doug Anderson2aa35462014-08-13 08:13:43 -07001395 } else {
1396 /*
1397 * If we don't have a command complete now we'll
1398 * never get one since we just reset everything;
1399 * better end the request.
1400 *
1401 * If we do have a command complete we'll fall
1402 * through to the SENDING_STOP command and
1403 * everything will be peachy keen.
1404 */
1405 if (!test_bit(EVENT_CMD_COMPLETE,
1406 &host->pending_events)) {
1407 host->cmd = NULL;
1408 dw_mci_request_end(host, mrq);
1409 goto unlock;
1410 }
Seungwon Jeon90c21432013-08-31 00:14:05 +09001411 }
Seungwon Jeone352c812013-08-31 00:14:17 +09001412
1413 /*
1414 * If err has non-zero,
1415 * stop-abort command has been already issued.
1416 */
1417 prev_state = state = STATE_SENDING_STOP;
1418
Will Newtonf95f3852011-01-02 01:11:59 -05001419 /* fall through */
1420
1421 case STATE_SENDING_STOP:
1422 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1423 &host->pending_events))
1424 break;
1425
Seungwon Jeon71abb132013-08-31 00:13:59 +09001426 /* CMD error in data command */
Seungwon Jeon31bff452013-08-31 00:14:23 +09001427 if (mrq->cmd->error && mrq->data)
Sonny Rao3a33a942014-08-04 18:19:50 -07001428 dw_mci_reset(host);
Seungwon Jeon71abb132013-08-31 00:13:59 +09001429
Will Newtonf95f3852011-01-02 01:11:59 -05001430 host->cmd = NULL;
Seungwon Jeon71abb132013-08-31 00:13:59 +09001431 host->data = NULL;
Seungwon Jeon90c21432013-08-31 00:14:05 +09001432
Seungwon Jeone352c812013-08-31 00:14:17 +09001433 if (mrq->stop)
1434 dw_mci_command_complete(host, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001435 else
1436 host->cmd_status = 0;
1437
Seungwon Jeone352c812013-08-31 00:14:17 +09001438 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05001439 goto unlock;
1440
1441 case STATE_DATA_ERROR:
1442 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1443 &host->pending_events))
1444 break;
1445
1446 state = STATE_DATA_BUSY;
1447 break;
1448 }
1449 } while (state != prev_state);
1450
1451 host->state = state;
1452unlock:
1453 spin_unlock(&host->lock);
1454
1455}
1456
James Hogan34b664a2011-06-24 13:57:56 +01001457/* push final bytes to part_buf, only use during push */
1458static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1459{
1460 memcpy((void *)&host->part_buf, buf, cnt);
1461 host->part_buf_count = cnt;
1462}
1463
1464/* append bytes to part_buf, only use during push */
1465static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1466{
1467 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1468 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1469 host->part_buf_count += cnt;
1470 return cnt;
1471}
1472
1473/* pull first bytes from part_buf, only use during pull */
1474static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1475{
1476 cnt = min(cnt, (int)host->part_buf_count);
1477 if (cnt) {
1478 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1479 cnt);
1480 host->part_buf_count -= cnt;
1481 host->part_buf_start += cnt;
1482 }
1483 return cnt;
1484}
1485
1486/* pull final bytes from the part_buf, assuming it's just been filled */
1487static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1488{
1489 memcpy(buf, &host->part_buf, cnt);
1490 host->part_buf_start = cnt;
1491 host->part_buf_count = (1 << host->data_shift) - cnt;
1492}
1493
Will Newtonf95f3852011-01-02 01:11:59 -05001494static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1495{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001496 struct mmc_data *data = host->data;
1497 int init_cnt = cnt;
1498
James Hogan34b664a2011-06-24 13:57:56 +01001499 /* try and push anything in the part_buf */
1500 if (unlikely(host->part_buf_count)) {
1501 int len = dw_mci_push_part_bytes(host, buf, cnt);
1502 buf += len;
1503 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001504 if (host->part_buf_count == 2) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001505 mci_writew(host, DATA(host->data_offset),
1506 host->part_buf16);
James Hogan34b664a2011-06-24 13:57:56 +01001507 host->part_buf_count = 0;
1508 }
1509 }
1510#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1511 if (unlikely((unsigned long)buf & 0x1)) {
1512 while (cnt >= 2) {
1513 u16 aligned_buf[64];
1514 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1515 int items = len >> 1;
1516 int i;
1517 /* memcpy from input buffer into aligned buffer */
1518 memcpy(aligned_buf, buf, len);
1519 buf += len;
1520 cnt -= len;
1521 /* push data from aligned buffer into fifo */
1522 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001523 mci_writew(host, DATA(host->data_offset),
1524 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001525 }
1526 } else
1527#endif
1528 {
1529 u16 *pdata = buf;
1530 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001531 mci_writew(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001532 buf = pdata;
1533 }
1534 /* put anything remaining in the part_buf */
1535 if (cnt) {
1536 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001537 /* Push data if we have reached the expected data length */
1538 if ((data->bytes_xfered + init_cnt) ==
1539 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001540 mci_writew(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001541 host->part_buf16);
Will Newtonf95f3852011-01-02 01:11:59 -05001542 }
1543}
1544
1545static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1546{
James Hogan34b664a2011-06-24 13:57:56 +01001547#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1548 if (unlikely((unsigned long)buf & 0x1)) {
1549 while (cnt >= 2) {
1550 /* pull data from fifo into aligned buffer */
1551 u16 aligned_buf[64];
1552 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1553 int items = len >> 1;
1554 int i;
1555 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001556 aligned_buf[i] = mci_readw(host,
1557 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001558 /* memcpy from aligned buffer into output buffer */
1559 memcpy(buf, aligned_buf, len);
1560 buf += len;
1561 cnt -= len;
1562 }
1563 } else
1564#endif
1565 {
1566 u16 *pdata = buf;
1567 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001568 *pdata++ = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001569 buf = pdata;
1570 }
1571 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001572 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001573 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001574 }
1575}
1576
1577static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1578{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001579 struct mmc_data *data = host->data;
1580 int init_cnt = cnt;
1581
James Hogan34b664a2011-06-24 13:57:56 +01001582 /* try and push anything in the part_buf */
1583 if (unlikely(host->part_buf_count)) {
1584 int len = dw_mci_push_part_bytes(host, buf, cnt);
1585 buf += len;
1586 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001587 if (host->part_buf_count == 4) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001588 mci_writel(host, DATA(host->data_offset),
1589 host->part_buf32);
James Hogan34b664a2011-06-24 13:57:56 +01001590 host->part_buf_count = 0;
1591 }
1592 }
1593#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1594 if (unlikely((unsigned long)buf & 0x3)) {
1595 while (cnt >= 4) {
1596 u32 aligned_buf[32];
1597 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1598 int items = len >> 2;
1599 int i;
1600 /* memcpy from input buffer into aligned buffer */
1601 memcpy(aligned_buf, buf, len);
1602 buf += len;
1603 cnt -= len;
1604 /* push data from aligned buffer into fifo */
1605 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001606 mci_writel(host, DATA(host->data_offset),
1607 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001608 }
1609 } else
1610#endif
1611 {
1612 u32 *pdata = buf;
1613 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001614 mci_writel(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001615 buf = pdata;
1616 }
1617 /* put anything remaining in the part_buf */
1618 if (cnt) {
1619 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001620 /* Push data if we have reached the expected data length */
1621 if ((data->bytes_xfered + init_cnt) ==
1622 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001623 mci_writel(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001624 host->part_buf32);
Will Newtonf95f3852011-01-02 01:11:59 -05001625 }
1626}
1627
1628static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1629{
James Hogan34b664a2011-06-24 13:57:56 +01001630#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1631 if (unlikely((unsigned long)buf & 0x3)) {
1632 while (cnt >= 4) {
1633 /* pull data from fifo into aligned buffer */
1634 u32 aligned_buf[32];
1635 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1636 int items = len >> 2;
1637 int i;
1638 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001639 aligned_buf[i] = mci_readl(host,
1640 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001641 /* memcpy from aligned buffer into output buffer */
1642 memcpy(buf, aligned_buf, len);
1643 buf += len;
1644 cnt -= len;
1645 }
1646 } else
1647#endif
1648 {
1649 u32 *pdata = buf;
1650 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001651 *pdata++ = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001652 buf = pdata;
1653 }
1654 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001655 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001656 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001657 }
1658}
1659
1660static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1661{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001662 struct mmc_data *data = host->data;
1663 int init_cnt = cnt;
1664
James Hogan34b664a2011-06-24 13:57:56 +01001665 /* try and push anything in the part_buf */
1666 if (unlikely(host->part_buf_count)) {
1667 int len = dw_mci_push_part_bytes(host, buf, cnt);
1668 buf += len;
1669 cnt -= len;
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001670
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001671 if (host->part_buf_count == 8) {
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001672 mci_writeq(host, DATA(host->data_offset),
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001673 host->part_buf);
James Hogan34b664a2011-06-24 13:57:56 +01001674 host->part_buf_count = 0;
1675 }
1676 }
1677#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1678 if (unlikely((unsigned long)buf & 0x7)) {
1679 while (cnt >= 8) {
1680 u64 aligned_buf[16];
1681 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1682 int items = len >> 3;
1683 int i;
1684 /* memcpy from input buffer into aligned buffer */
1685 memcpy(aligned_buf, buf, len);
1686 buf += len;
1687 cnt -= len;
1688 /* push data from aligned buffer into fifo */
1689 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001690 mci_writeq(host, DATA(host->data_offset),
1691 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001692 }
1693 } else
1694#endif
1695 {
1696 u64 *pdata = buf;
1697 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001698 mci_writeq(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001699 buf = pdata;
1700 }
1701 /* put anything remaining in the part_buf */
1702 if (cnt) {
1703 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001704 /* Push data if we have reached the expected data length */
1705 if ((data->bytes_xfered + init_cnt) ==
1706 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001707 mci_writeq(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001708 host->part_buf);
Will Newtonf95f3852011-01-02 01:11:59 -05001709 }
1710}
1711
1712static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1713{
James Hogan34b664a2011-06-24 13:57:56 +01001714#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1715 if (unlikely((unsigned long)buf & 0x7)) {
1716 while (cnt >= 8) {
1717 /* pull data from fifo into aligned buffer */
1718 u64 aligned_buf[16];
1719 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1720 int items = len >> 3;
1721 int i;
1722 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001723 aligned_buf[i] = mci_readq(host,
1724 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001725 /* memcpy from aligned buffer into output buffer */
1726 memcpy(buf, aligned_buf, len);
1727 buf += len;
1728 cnt -= len;
1729 }
1730 } else
1731#endif
1732 {
1733 u64 *pdata = buf;
1734 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001735 *pdata++ = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001736 buf = pdata;
Will Newtonf95f3852011-01-02 01:11:59 -05001737 }
James Hogan34b664a2011-06-24 13:57:56 +01001738 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001739 host->part_buf = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001740 dw_mci_pull_final_bytes(host, buf, cnt);
1741 }
1742}
1743
1744static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1745{
1746 int len;
1747
1748 /* get remaining partial bytes */
1749 len = dw_mci_pull_part_bytes(host, buf, cnt);
1750 if (unlikely(len == cnt))
1751 return;
1752 buf += len;
1753 cnt -= len;
1754
1755 /* get the rest of the data */
1756 host->pull_data(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001757}
1758
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001759static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
Will Newtonf95f3852011-01-02 01:11:59 -05001760{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001761 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1762 void *buf;
1763 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001764 struct mmc_data *data = host->data;
1765 int shift = host->data_shift;
1766 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001767 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001768 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001769
1770 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001771 if (!sg_miter_next(sg_miter))
1772 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001773
Imre Deak4225fc82013-02-27 17:02:57 -08001774 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001775 buf = sg_miter->addr;
1776 remain = sg_miter->length;
1777 offset = 0;
1778
1779 do {
1780 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1781 << shift) + host->part_buf_count;
1782 len = min(remain, fcnt);
1783 if (!len)
1784 break;
1785 dw_mci_pull_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001786 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001787 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001788 remain -= len;
1789 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001790
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001791 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001792 status = mci_readl(host, MINTSTS);
1793 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001794 /* if the RXDR is ready read again */
1795 } while ((status & SDMMC_INT_RXDR) ||
1796 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001797
1798 if (!remain) {
1799 if (!sg_miter_next(sg_miter))
1800 goto done;
1801 sg_miter->consumed = 0;
1802 }
1803 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001804 return;
1805
1806done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001807 sg_miter_stop(sg_miter);
1808 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001809 smp_wmb();
1810 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1811}
1812
1813static void dw_mci_write_data_pio(struct dw_mci *host)
1814{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001815 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1816 void *buf;
1817 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001818 struct mmc_data *data = host->data;
1819 int shift = host->data_shift;
1820 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001821 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001822 unsigned int fifo_depth = host->fifo_depth;
1823 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001824
1825 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001826 if (!sg_miter_next(sg_miter))
1827 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001828
Imre Deak4225fc82013-02-27 17:02:57 -08001829 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001830 buf = sg_miter->addr;
1831 remain = sg_miter->length;
1832 offset = 0;
1833
1834 do {
1835 fcnt = ((fifo_depth -
1836 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1837 << shift) - host->part_buf_count;
1838 len = min(remain, fcnt);
1839 if (!len)
1840 break;
1841 host->push_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001842 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001843 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001844 remain -= len;
1845 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001846
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001847 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001848 status = mci_readl(host, MINTSTS);
1849 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
Will Newtonf95f3852011-01-02 01:11:59 -05001850 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001851
1852 if (!remain) {
1853 if (!sg_miter_next(sg_miter))
1854 goto done;
1855 sg_miter->consumed = 0;
1856 }
1857 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001858 return;
1859
1860done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001861 sg_miter_stop(sg_miter);
1862 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001863 smp_wmb();
1864 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1865}
1866
1867static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1868{
1869 if (!host->cmd_status)
1870 host->cmd_status = status;
1871
1872 smp_wmb();
1873
1874 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1875 tasklet_schedule(&host->tasklet);
1876}
1877
1878static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1879{
1880 struct dw_mci *host = dev_id;
Seungwon Jeon182c9082012-08-01 09:30:30 +09001881 u32 pending;
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301882 int i;
Will Newtonf95f3852011-01-02 01:11:59 -05001883
Markos Chandras1fb5f682013-03-12 10:53:11 +00001884 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1885
Doug Anderson476d79f2013-07-09 13:04:40 -07001886 /*
1887 * DTO fix - version 2.10a and below, and only if internal DMA
1888 * is configured.
1889 */
1890 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1891 if (!pending &&
1892 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1893 pending |= SDMMC_INT_DATA_OVER;
1894 }
1895
Markos Chandras1fb5f682013-03-12 10:53:11 +00001896 if (pending) {
Will Newtonf95f3852011-01-02 01:11:59 -05001897 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1898 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001899 host->cmd_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001900 smp_wmb();
1901 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -05001902 }
1903
1904 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1905 /* if there is an error report DATA_ERROR */
1906 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001907 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001908 smp_wmb();
1909 set_bit(EVENT_DATA_ERROR, &host->pending_events);
Seungwon Jeon9b2026a2012-08-01 09:30:40 +09001910 tasklet_schedule(&host->tasklet);
Will Newtonf95f3852011-01-02 01:11:59 -05001911 }
1912
1913 if (pending & SDMMC_INT_DATA_OVER) {
1914 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1915 if (!host->data_status)
Seungwon Jeon182c9082012-08-01 09:30:30 +09001916 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001917 smp_wmb();
1918 if (host->dir_status == DW_MCI_RECV_STATUS) {
1919 if (host->sg != NULL)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001920 dw_mci_read_data_pio(host, true);
Will Newtonf95f3852011-01-02 01:11:59 -05001921 }
1922 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1923 tasklet_schedule(&host->tasklet);
1924 }
1925
1926 if (pending & SDMMC_INT_RXDR) {
1927 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001928 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001929 dw_mci_read_data_pio(host, false);
Will Newtonf95f3852011-01-02 01:11:59 -05001930 }
1931
1932 if (pending & SDMMC_INT_TXDR) {
1933 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001934 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
Will Newtonf95f3852011-01-02 01:11:59 -05001935 dw_mci_write_data_pio(host);
1936 }
1937
1938 if (pending & SDMMC_INT_CMD_DONE) {
1939 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001940 dw_mci_cmd_interrupt(host, pending);
Will Newtonf95f3852011-01-02 01:11:59 -05001941 }
1942
1943 if (pending & SDMMC_INT_CD) {
1944 mci_writel(host, RINTSTS, SDMMC_INT_CD);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07001945 queue_work(host->card_workqueue, &host->card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001946 }
1947
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301948 /* Handle SDIO Interrupts */
1949 for (i = 0; i < host->num_slots; i++) {
1950 struct dw_mci_slot *slot = host->slot[i];
1951 if (pending & SDMMC_INT_SDIO(i)) {
1952 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1953 mmc_signal_sdio_irq(slot->mmc);
1954 }
1955 }
1956
Markos Chandras1fb5f682013-03-12 10:53:11 +00001957 }
Will Newtonf95f3852011-01-02 01:11:59 -05001958
1959#ifdef CONFIG_MMC_DW_IDMAC
1960 /* Handle DMA interrupts */
1961 pending = mci_readl(host, IDSTS);
1962 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1963 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1964 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
Will Newtonf95f3852011-01-02 01:11:59 -05001965 host->dma_ops->complete(host);
1966 }
1967#endif
1968
1969 return IRQ_HANDLED;
1970}
1971
James Hogan1791b13e2011-06-24 13:55:55 +01001972static void dw_mci_work_routine_card(struct work_struct *work)
Will Newtonf95f3852011-01-02 01:11:59 -05001973{
James Hogan1791b13e2011-06-24 13:55:55 +01001974 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001975 int i;
1976
1977 for (i = 0; i < host->num_slots; i++) {
1978 struct dw_mci_slot *slot = host->slot[i];
1979 struct mmc_host *mmc = slot->mmc;
1980 struct mmc_request *mrq;
1981 int present;
Will Newtonf95f3852011-01-02 01:11:59 -05001982
1983 present = dw_mci_get_cd(mmc);
1984 while (present != slot->last_detect_state) {
Will Newtonf95f3852011-01-02 01:11:59 -05001985 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1986 present ? "inserted" : "removed");
1987
James Hogan1791b13e2011-06-24 13:55:55 +01001988 spin_lock_bh(&host->lock);
1989
Will Newtonf95f3852011-01-02 01:11:59 -05001990 /* Card change detected */
1991 slot->last_detect_state = present;
1992
Will Newtonf95f3852011-01-02 01:11:59 -05001993 /* Clean up queue if present */
1994 mrq = slot->mrq;
1995 if (mrq) {
1996 if (mrq == host->mrq) {
1997 host->data = NULL;
1998 host->cmd = NULL;
1999
2000 switch (host->state) {
2001 case STATE_IDLE:
2002 break;
2003 case STATE_SENDING_CMD:
2004 mrq->cmd->error = -ENOMEDIUM;
2005 if (!mrq->data)
2006 break;
2007 /* fall through */
2008 case STATE_SENDING_DATA:
2009 mrq->data->error = -ENOMEDIUM;
2010 dw_mci_stop_dma(host);
2011 break;
2012 case STATE_DATA_BUSY:
2013 case STATE_DATA_ERROR:
2014 if (mrq->data->error == -EINPROGRESS)
2015 mrq->data->error = -ENOMEDIUM;
Will Newtonf95f3852011-01-02 01:11:59 -05002016 /* fall through */
2017 case STATE_SENDING_STOP:
Seungwon Jeon90c21432013-08-31 00:14:05 +09002018 if (mrq->stop)
2019 mrq->stop->error = -ENOMEDIUM;
Will Newtonf95f3852011-01-02 01:11:59 -05002020 break;
2021 }
2022
2023 dw_mci_request_end(host, mrq);
2024 } else {
2025 list_del(&slot->queue_node);
2026 mrq->cmd->error = -ENOMEDIUM;
2027 if (mrq->data)
2028 mrq->data->error = -ENOMEDIUM;
2029 if (mrq->stop)
2030 mrq->stop->error = -ENOMEDIUM;
2031
2032 spin_unlock(&host->lock);
2033 mmc_request_done(slot->mmc, mrq);
2034 spin_lock(&host->lock);
2035 }
2036 }
2037
2038 /* Power down slot */
Sonny Rao3a33a942014-08-04 18:19:50 -07002039 if (present == 0)
2040 dw_mci_reset(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002041
James Hogan1791b13e2011-06-24 13:55:55 +01002042 spin_unlock_bh(&host->lock);
2043
Will Newtonf95f3852011-01-02 01:11:59 -05002044 present = dw_mci_get_cd(mmc);
2045 }
2046
2047 mmc_detect_change(slot->mmc,
2048 msecs_to_jiffies(host->pdata->detect_delay_ms));
2049 }
2050}
2051
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002052#ifdef CONFIG_OF
2053/* given a slot id, find out the device node representing that slot */
2054static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2055{
2056 struct device_node *np;
2057 const __be32 *addr;
2058 int len;
2059
2060 if (!dev || !dev->of_node)
2061 return NULL;
2062
2063 for_each_child_of_node(dev->of_node, np) {
2064 addr = of_get_property(np, "reg", &len);
2065 if (!addr || (len < sizeof(int)))
2066 continue;
2067 if (be32_to_cpup(addr) == slot)
2068 return np;
2069 }
2070 return NULL;
2071}
2072
Doug Andersona70aaa62013-01-11 17:03:50 +00002073static struct dw_mci_of_slot_quirks {
2074 char *quirk;
2075 int id;
2076} of_slot_quirks[] = {
2077 {
2078 .quirk = "disable-wp",
2079 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2080 },
2081};
2082
2083static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2084{
2085 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2086 int quirks = 0;
2087 int idx;
2088
2089 /* get quirks */
2090 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
Jaehoon Chung26375b52014-08-07 16:37:58 +09002091 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
2092 dev_warn(dev, "Slot quirk %s is deprecated\n",
2093 of_slot_quirks[idx].quirk);
Doug Andersona70aaa62013-01-11 17:03:50 +00002094 quirks |= of_slot_quirks[idx].id;
Jaehoon Chung26375b52014-08-07 16:37:58 +09002095 }
Doug Andersona70aaa62013-01-11 17:03:50 +00002096
2097 return quirks;
2098}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002099#else /* CONFIG_OF */
Doug Andersona70aaa62013-01-11 17:03:50 +00002100static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2101{
2102 return 0;
2103}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002104static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2105{
2106 return NULL;
2107}
2108#endif /* CONFIG_OF */
2109
Jaehoon Chung36c179a2012-08-23 20:31:48 +09002110static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
Will Newtonf95f3852011-01-02 01:11:59 -05002111{
2112 struct mmc_host *mmc;
2113 struct dw_mci_slot *slot;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002114 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002115 int ctrl_id, ret;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002116 u32 freq[2];
Will Newtonf95f3852011-01-02 01:11:59 -05002117
Thomas Abraham4a909202012-09-17 18:16:35 +00002118 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
Will Newtonf95f3852011-01-02 01:11:59 -05002119 if (!mmc)
2120 return -ENOMEM;
2121
2122 slot = mmc_priv(mmc);
2123 slot->id = id;
2124 slot->mmc = mmc;
2125 slot->host = host;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002126 host->slot[id] = slot;
Will Newtonf95f3852011-01-02 01:11:59 -05002127
Doug Andersona70aaa62013-01-11 17:03:50 +00002128 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2129
Will Newtonf95f3852011-01-02 01:11:59 -05002130 mmc->ops = &dw_mci_ops;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002131 if (of_property_read_u32_array(host->dev->of_node,
2132 "clock-freq-min-max", freq, 2)) {
2133 mmc->f_min = DW_MCI_FREQ_MIN;
2134 mmc->f_max = DW_MCI_FREQ_MAX;
2135 } else {
2136 mmc->f_min = freq[0];
2137 mmc->f_max = freq[1];
2138 }
Will Newtonf95f3852011-01-02 01:11:59 -05002139
Yuvaraj CD51da2242014-08-22 19:17:50 +05302140 /*if there are external regulators, get them*/
2141 ret = mmc_regulator_get_supply(mmc);
2142 if (ret == -EPROBE_DEFER)
2143 goto err_setup_bus;
2144
2145 if (!mmc->ocr_avail)
2146 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
Will Newtonf95f3852011-01-02 01:11:59 -05002147
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002148 if (host->pdata->caps)
2149 mmc->caps = host->pdata->caps;
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002150
Abhilash Kesavanab269122012-11-19 10:26:21 +05302151 if (host->pdata->pm_caps)
2152 mmc->pm_caps = host->pdata->pm_caps;
2153
Thomas Abraham800d78b2012-09-17 18:16:42 +00002154 if (host->dev->of_node) {
2155 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2156 if (ctrl_id < 0)
2157 ctrl_id = 0;
2158 } else {
2159 ctrl_id = to_platform_device(host->dev)->id;
2160 }
James Hogancb27a842012-10-16 09:43:08 +01002161 if (drv_data && drv_data->caps)
2162 mmc->caps |= drv_data->caps[ctrl_id];
Thomas Abraham800d78b2012-09-17 18:16:42 +00002163
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002164 if (host->pdata->caps2)
2165 mmc->caps2 = host->pdata->caps2;
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002166
Jaehoon Chungd8a4fb02014-03-03 11:36:41 +09002167 mmc_of_parse(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -05002168
Will Newtonf95f3852011-01-02 01:11:59 -05002169 if (host->pdata->blk_settings) {
2170 mmc->max_segs = host->pdata->blk_settings->max_segs;
2171 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2172 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2173 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2174 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2175 } else {
2176 /* Useful defaults if platform data is unset. */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002177#ifdef CONFIG_MMC_DW_IDMAC
2178 mmc->max_segs = host->ring_size;
2179 mmc->max_blk_size = 65536;
2180 mmc->max_blk_count = host->ring_size;
2181 mmc->max_seg_size = 0x1000;
2182 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2183#else
Will Newtonf95f3852011-01-02 01:11:59 -05002184 mmc->max_segs = 64;
2185 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2186 mmc->max_blk_count = 512;
2187 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2188 mmc->max_seg_size = mmc->max_req_size;
Will Newtonf95f3852011-01-02 01:11:59 -05002189#endif /* CONFIG_MMC_DW_IDMAC */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002190 }
Will Newtonf95f3852011-01-02 01:11:59 -05002191
Jaehoon Chungae0eb342014-03-03 11:36:48 +09002192 if (dw_mci_get_cd(mmc))
2193 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2194 else
2195 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2196
Jaehoon Chung0cea5292013-02-15 23:45:45 +09002197 ret = mmc_add_host(mmc);
2198 if (ret)
2199 goto err_setup_bus;
Will Newtonf95f3852011-01-02 01:11:59 -05002200
2201#if defined(CONFIG_DEBUG_FS)
2202 dw_mci_init_debugfs(slot);
2203#endif
2204
2205 /* Card initially undetected */
2206 slot->last_detect_state = 0;
2207
Will Newtonf95f3852011-01-02 01:11:59 -05002208 return 0;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002209
2210err_setup_bus:
2211 mmc_free_host(mmc);
Yuvaraj CD51da2242014-08-22 19:17:50 +05302212 return ret;
Will Newtonf95f3852011-01-02 01:11:59 -05002213}
2214
2215static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2216{
Will Newtonf95f3852011-01-02 01:11:59 -05002217 /* Debugfs stuff is cleaned up by mmc core */
2218 mmc_remove_host(slot->mmc);
2219 slot->host->slot[id] = NULL;
2220 mmc_free_host(slot->mmc);
2221}
2222
2223static void dw_mci_init_dma(struct dw_mci *host)
2224{
2225 /* Alloc memory for sg translation */
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002226 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
Will Newtonf95f3852011-01-02 01:11:59 -05002227 &host->sg_dma, GFP_KERNEL);
2228 if (!host->sg_cpu) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002229 dev_err(host->dev, "%s: could not alloc DMA memory\n",
Will Newtonf95f3852011-01-02 01:11:59 -05002230 __func__);
2231 goto no_dma;
2232 }
2233
2234 /* Determine which DMA interface to use */
2235#ifdef CONFIG_MMC_DW_IDMAC
2236 host->dma_ops = &dw_mci_idmac_ops;
Seungwon Jeon00956ea2012-09-28 19:13:11 +09002237 dev_info(host->dev, "Using internal DMA controller.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002238#endif
2239
2240 if (!host->dma_ops)
2241 goto no_dma;
2242
Jaehoon Chunge1631f92012-04-18 15:42:31 +09002243 if (host->dma_ops->init && host->dma_ops->start &&
2244 host->dma_ops->stop && host->dma_ops->cleanup) {
Will Newtonf95f3852011-01-02 01:11:59 -05002245 if (host->dma_ops->init(host)) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002246 dev_err(host->dev, "%s: Unable to initialize "
Will Newtonf95f3852011-01-02 01:11:59 -05002247 "DMA Controller.\n", __func__);
2248 goto no_dma;
2249 }
2250 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00002251 dev_err(host->dev, "DMA initialization not found.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002252 goto no_dma;
2253 }
2254
2255 host->use_dma = 1;
2256 return;
2257
2258no_dma:
Thomas Abraham4a909202012-09-17 18:16:35 +00002259 dev_info(host->dev, "Using PIO mode.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002260 host->use_dma = 0;
2261 return;
2262}
2263
Seungwon Jeon31bff452013-08-31 00:14:23 +09002264static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
Will Newtonf95f3852011-01-02 01:11:59 -05002265{
2266 unsigned long timeout = jiffies + msecs_to_jiffies(500);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002267 u32 ctrl;
Will Newtonf95f3852011-01-02 01:11:59 -05002268
Seungwon Jeon31bff452013-08-31 00:14:23 +09002269 ctrl = mci_readl(host, CTRL);
2270 ctrl |= reset;
2271 mci_writel(host, CTRL, ctrl);
Will Newtonf95f3852011-01-02 01:11:59 -05002272
2273 /* wait till resets clear */
2274 do {
2275 ctrl = mci_readl(host, CTRL);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002276 if (!(ctrl & reset))
Will Newtonf95f3852011-01-02 01:11:59 -05002277 return true;
2278 } while (time_before(jiffies, timeout));
2279
Seungwon Jeon31bff452013-08-31 00:14:23 +09002280 dev_err(host->dev,
2281 "Timeout resetting block (ctrl reset %#x)\n",
2282 ctrl & reset);
Will Newtonf95f3852011-01-02 01:11:59 -05002283
2284 return false;
2285}
2286
Sonny Rao3a33a942014-08-04 18:19:50 -07002287static bool dw_mci_reset(struct dw_mci *host)
Seungwon Jeon31bff452013-08-31 00:14:23 +09002288{
Sonny Rao3a33a942014-08-04 18:19:50 -07002289 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2290 bool ret = false;
2291
Seungwon Jeon31bff452013-08-31 00:14:23 +09002292 /*
2293 * Reseting generates a block interrupt, hence setting
2294 * the scatter-gather pointer to NULL.
2295 */
2296 if (host->sg) {
2297 sg_miter_stop(&host->sg_miter);
2298 host->sg = NULL;
2299 }
2300
Sonny Rao3a33a942014-08-04 18:19:50 -07002301 if (host->use_dma)
2302 flags |= SDMMC_CTRL_DMA_RESET;
Seungwon Jeon31bff452013-08-31 00:14:23 +09002303
Sonny Rao3a33a942014-08-04 18:19:50 -07002304 if (dw_mci_ctrl_reset(host, flags)) {
2305 /*
2306 * In all cases we clear the RAWINTS register to clear any
2307 * interrupts.
2308 */
2309 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2310
2311 /* if using dma we wait for dma_req to clear */
2312 if (host->use_dma) {
2313 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2314 u32 status;
2315 do {
2316 status = mci_readl(host, STATUS);
2317 if (!(status & SDMMC_STATUS_DMA_REQ))
2318 break;
2319 cpu_relax();
2320 } while (time_before(jiffies, timeout));
2321
2322 if (status & SDMMC_STATUS_DMA_REQ) {
2323 dev_err(host->dev,
2324 "%s: Timeout waiting for dma_req to "
2325 "clear during reset\n", __func__);
2326 goto ciu_out;
2327 }
2328
2329 /* when using DMA next we reset the fifo again */
2330 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2331 goto ciu_out;
2332 }
2333 } else {
2334 /* if the controller reset bit did clear, then set clock regs */
2335 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2336 dev_err(host->dev, "%s: fifo/dma reset bits didn't "
2337 "clear but ciu was reset, doing clock update\n",
2338 __func__);
2339 goto ciu_out;
2340 }
2341 }
2342
2343#if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
2344 /* It is also recommended that we reset and reprogram idmac */
2345 dw_mci_idmac_reset(host);
2346#endif
2347
2348 ret = true;
2349
2350ciu_out:
2351 /* After a CTRL reset we need to have CIU set clock registers */
2352 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2353
2354 return ret;
Seungwon Jeon31bff452013-08-31 00:14:23 +09002355}
2356
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002357#ifdef CONFIG_OF
2358static struct dw_mci_of_quirks {
2359 char *quirk;
2360 int id;
2361} of_quirks[] = {
2362 {
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002363 .quirk = "broken-cd",
2364 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
Jaehoon Chung26375b52014-08-07 16:37:58 +09002365 }, {
2366 .quirk = "disable-wp",
2367 .id = DW_MCI_QUIRK_NO_WRITE_PROTECT,
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002368 },
2369};
2370
2371static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2372{
2373 struct dw_mci_board *pdata;
2374 struct device *dev = host->dev;
2375 struct device_node *np = dev->of_node;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002376 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002377 int idx, ret;
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002378 u32 clock_frequency;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002379
2380 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2381 if (!pdata) {
2382 dev_err(dev, "could not allocate memory for pdata\n");
2383 return ERR_PTR(-ENOMEM);
2384 }
2385
2386 /* find out number of slots supported */
2387 if (of_property_read_u32(dev->of_node, "num-slots",
2388 &pdata->num_slots)) {
2389 dev_info(dev, "num-slots property not found, "
2390 "assuming 1 slot is available\n");
2391 pdata->num_slots = 1;
2392 }
2393
2394 /* get quirks */
2395 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2396 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2397 pdata->quirks |= of_quirks[idx].id;
2398
2399 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2400 dev_info(dev, "fifo-depth property not found, using "
2401 "value of FIFOTH register as default\n");
2402
2403 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2404
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002405 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2406 pdata->bus_hz = clock_frequency;
2407
James Hogancb27a842012-10-16 09:43:08 +01002408 if (drv_data && drv_data->parse_dt) {
2409 ret = drv_data->parse_dt(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002410 if (ret)
2411 return ERR_PTR(ret);
2412 }
2413
Seungwon Jeon10b49842013-08-31 00:13:22 +09002414 if (of_find_property(np, "supports-highspeed", NULL))
2415 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2416
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002417 return pdata;
2418}
2419
2420#else /* CONFIG_OF */
2421static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2422{
2423 return ERR_PTR(-EINVAL);
2424}
2425#endif /* CONFIG_OF */
2426
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302427int dw_mci_probe(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002428{
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002429 const struct dw_mci_drv_data *drv_data = host->drv_data;
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302430 int width, i, ret = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002431 u32 fifo_size;
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002432 int init_slots = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002433
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002434 if (!host->pdata) {
2435 host->pdata = dw_mci_parse_dt(host);
2436 if (IS_ERR(host->pdata)) {
2437 dev_err(host->dev, "platform data not available\n");
2438 return -EINVAL;
2439 }
Will Newtonf95f3852011-01-02 01:11:59 -05002440 }
2441
Jaehoon Chung907abd52014-03-03 11:36:43 +09002442 if (host->pdata->num_slots > 1) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002443 dev_err(host->dev,
Jaehoon Chung907abd52014-03-03 11:36:43 +09002444 "Platform data must supply num_slots.\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302445 return -ENODEV;
Will Newtonf95f3852011-01-02 01:11:59 -05002446 }
2447
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002448 host->biu_clk = devm_clk_get(host->dev, "biu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002449 if (IS_ERR(host->biu_clk)) {
2450 dev_dbg(host->dev, "biu clock not available\n");
2451 } else {
2452 ret = clk_prepare_enable(host->biu_clk);
2453 if (ret) {
2454 dev_err(host->dev, "failed to enable biu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002455 return ret;
2456 }
Will Newtonf95f3852011-01-02 01:11:59 -05002457 }
2458
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002459 host->ciu_clk = devm_clk_get(host->dev, "ciu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002460 if (IS_ERR(host->ciu_clk)) {
2461 dev_dbg(host->dev, "ciu clock not available\n");
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002462 host->bus_hz = host->pdata->bus_hz;
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002463 } else {
2464 ret = clk_prepare_enable(host->ciu_clk);
2465 if (ret) {
2466 dev_err(host->dev, "failed to enable ciu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002467 goto err_clk_biu;
2468 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002469
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002470 if (host->pdata->bus_hz) {
2471 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2472 if (ret)
2473 dev_warn(host->dev,
Jaehoon Chung612de4c2014-03-03 11:36:42 +09002474 "Unable to set bus rate to %uHz\n",
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002475 host->pdata->bus_hz);
2476 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002477 host->bus_hz = clk_get_rate(host->ciu_clk);
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002478 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002479
Jaehoon Chung612de4c2014-03-03 11:36:42 +09002480 if (!host->bus_hz) {
2481 dev_err(host->dev,
2482 "Platform data must supply bus speed\n");
2483 ret = -ENODEV;
2484 goto err_clk_ciu;
2485 }
2486
Yuvaraj Kumar C D002f0d52013-08-31 00:12:19 +09002487 if (drv_data && drv_data->init) {
2488 ret = drv_data->init(host);
2489 if (ret) {
2490 dev_err(host->dev,
2491 "implementation specific init failed\n");
2492 goto err_clk_ciu;
2493 }
2494 }
2495
James Hogancb27a842012-10-16 09:43:08 +01002496 if (drv_data && drv_data->setup_clock) {
2497 ret = drv_data->setup_clock(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002498 if (ret) {
2499 dev_err(host->dev,
2500 "implementation specific clock setup failed\n");
2501 goto err_clk_ciu;
2502 }
2503 }
2504
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302505 host->quirks = host->pdata->quirks;
Will Newtonf95f3852011-01-02 01:11:59 -05002506
2507 spin_lock_init(&host->lock);
2508 INIT_LIST_HEAD(&host->queue);
2509
Will Newtonf95f3852011-01-02 01:11:59 -05002510 /*
2511 * Get the host data width - this assumes that HCON has been set with
2512 * the correct values.
2513 */
2514 i = (mci_readl(host, HCON) >> 7) & 0x7;
2515 if (!i) {
2516 host->push_data = dw_mci_push_data16;
2517 host->pull_data = dw_mci_pull_data16;
2518 width = 16;
2519 host->data_shift = 1;
2520 } else if (i == 2) {
2521 host->push_data = dw_mci_push_data64;
2522 host->pull_data = dw_mci_pull_data64;
2523 width = 64;
2524 host->data_shift = 3;
2525 } else {
2526 /* Check for a reserved value, and warn if it is */
2527 WARN((i != 1),
2528 "HCON reports a reserved host data width!\n"
2529 "Defaulting to 32-bit access.\n");
2530 host->push_data = dw_mci_push_data32;
2531 host->pull_data = dw_mci_pull_data32;
2532 width = 32;
2533 host->data_shift = 2;
2534 }
2535
2536 /* Reset all blocks */
Sonny Rao3a33a942014-08-04 18:19:50 -07002537 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
Seungwon Jeon141a7122012-05-22 13:01:03 +09002538 return -ENODEV;
2539
2540 host->dma_ops = host->pdata->dma_ops;
2541 dw_mci_init_dma(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002542
2543 /* Clear the interrupts for the host controller */
2544 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2545 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2546
2547 /* Put in max timeout */
2548 mci_writel(host, TMOUT, 0xFFFFFFFF);
2549
2550 /*
2551 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2552 * Tx Mark = fifo_size / 2 DMA Size = 8
2553 */
James Hoganb86d8252011-06-24 13:57:18 +01002554 if (!host->pdata->fifo_depth) {
2555 /*
2556 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2557 * have been overwritten by the bootloader, just like we're
2558 * about to do, so if you know the value for your hardware, you
2559 * should put it in the platform data.
2560 */
2561 fifo_size = mci_readl(host, FIFOTH);
Jaehoon Chung8234e862012-01-11 09:28:21 +00002562 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
James Hoganb86d8252011-06-24 13:57:18 +01002563 } else {
2564 fifo_size = host->pdata->fifo_depth;
2565 }
2566 host->fifo_depth = fifo_size;
Seungwon Jeon52426892013-08-31 00:13:42 +09002567 host->fifoth_val =
2568 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002569 mci_writel(host, FIFOTH, host->fifoth_val);
Will Newtonf95f3852011-01-02 01:11:59 -05002570
2571 /* disable clock to CIU */
2572 mci_writel(host, CLKENA, 0);
2573 mci_writel(host, CLKSRC, 0);
2574
James Hogan63008762013-03-12 10:43:54 +00002575 /*
2576 * In 2.40a spec, Data offset is changed.
2577 * Need to check the version-id and set data-offset for DATA register.
2578 */
2579 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2580 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2581
2582 if (host->verid < DW_MMC_240A)
2583 host->data_offset = DATA_OFFSET;
2584 else
2585 host->data_offset = DATA_240A_OFFSET;
2586
Will Newtonf95f3852011-01-02 01:11:59 -05002587 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002588 host->card_workqueue = alloc_workqueue("dw-mci-card",
ZhangZhen59ff3eb2014-03-27 09:41:47 +08002589 WQ_MEM_RECLAIM, 1);
Wei Yongjunef7aef92013-04-19 09:25:45 +08002590 if (!host->card_workqueue) {
2591 ret = -ENOMEM;
James Hogan1791b13e2011-06-24 13:55:55 +01002592 goto err_dmaunmap;
Wei Yongjunef7aef92013-04-19 09:25:45 +08002593 }
James Hogan1791b13e2011-06-24 13:55:55 +01002594 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002595 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2596 host->irq_flags, "dw-mci", host);
Will Newtonf95f3852011-01-02 01:11:59 -05002597 if (ret)
James Hogan1791b13e2011-06-24 13:55:55 +01002598 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002599
Will Newtonf95f3852011-01-02 01:11:59 -05002600 if (host->pdata->num_slots)
2601 host->num_slots = host->pdata->num_slots;
2602 else
2603 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2604
Yuvaraj CD2da1d7f2012-10-08 14:29:51 +05302605 /*
2606 * Enable interrupts for command done, data over, data empty, card det,
2607 * receive ready and error such as transmit, receive timeout, crc error
2608 */
2609 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2610 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2611 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2612 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2613 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2614
2615 dev_info(host->dev, "DW MMC controller at irq %d, "
2616 "%d bit host data width, "
2617 "%u deep fifo\n",
2618 host->irq, width, fifo_size);
2619
Will Newtonf95f3852011-01-02 01:11:59 -05002620 /* We need at least one slot to succeed */
2621 for (i = 0; i < host->num_slots; i++) {
2622 ret = dw_mci_init_slot(host, i);
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002623 if (ret)
2624 dev_dbg(host->dev, "slot %d init failed\n", i);
2625 else
2626 init_slots++;
2627 }
2628
2629 if (init_slots) {
2630 dev_info(host->dev, "%d slots initialized\n", init_slots);
2631 } else {
2632 dev_dbg(host->dev, "attempted to initialize %d slots, "
2633 "but failed on all\n", host->num_slots);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002634 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002635 }
2636
Will Newtonf95f3852011-01-02 01:11:59 -05002637 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
Thomas Abraham4a909202012-09-17 18:16:35 +00002638 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002639
2640 return 0;
2641
James Hogan1791b13e2011-06-24 13:55:55 +01002642err_workqueue:
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002643 destroy_workqueue(host->card_workqueue);
James Hogan1791b13e2011-06-24 13:55:55 +01002644
Will Newtonf95f3852011-01-02 01:11:59 -05002645err_dmaunmap:
2646 if (host->use_dma && host->dma_ops->exit)
2647 host->dma_ops->exit(host);
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002648
2649err_clk_ciu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002650 if (!IS_ERR(host->ciu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002651 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002652
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002653err_clk_biu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002654 if (!IS_ERR(host->biu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002655 clk_disable_unprepare(host->biu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002656
Will Newtonf95f3852011-01-02 01:11:59 -05002657 return ret;
2658}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302659EXPORT_SYMBOL(dw_mci_probe);
Will Newtonf95f3852011-01-02 01:11:59 -05002660
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302661void dw_mci_remove(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002662{
Will Newtonf95f3852011-01-02 01:11:59 -05002663 int i;
2664
2665 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2666 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2667
Will Newtonf95f3852011-01-02 01:11:59 -05002668 for (i = 0; i < host->num_slots; i++) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002669 dev_dbg(host->dev, "remove slot %d\n", i);
Will Newtonf95f3852011-01-02 01:11:59 -05002670 if (host->slot[i])
2671 dw_mci_cleanup_slot(host->slot[i], i);
2672 }
2673
2674 /* disable clock to CIU */
2675 mci_writel(host, CLKENA, 0);
2676 mci_writel(host, CLKSRC, 0);
2677
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002678 destroy_workqueue(host->card_workqueue);
Will Newtonf95f3852011-01-02 01:11:59 -05002679
2680 if (host->use_dma && host->dma_ops->exit)
2681 host->dma_ops->exit(host);
2682
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002683 if (!IS_ERR(host->ciu_clk))
2684 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002685
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002686 if (!IS_ERR(host->biu_clk))
2687 clk_disable_unprepare(host->biu_clk);
Will Newtonf95f3852011-01-02 01:11:59 -05002688}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302689EXPORT_SYMBOL(dw_mci_remove);
2690
2691
Will Newtonf95f3852011-01-02 01:11:59 -05002692
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002693#ifdef CONFIG_PM_SLEEP
Will Newtonf95f3852011-01-02 01:11:59 -05002694/*
2695 * TODO: we should probably disable the clock to the card in the suspend path.
2696 */
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302697int dw_mci_suspend(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002698{
Will Newtonf95f3852011-01-02 01:11:59 -05002699 return 0;
2700}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302701EXPORT_SYMBOL(dw_mci_suspend);
Will Newtonf95f3852011-01-02 01:11:59 -05002702
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302703int dw_mci_resume(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002704{
2705 int i, ret;
Will Newtonf95f3852011-01-02 01:11:59 -05002706
Sonny Rao3a33a942014-08-04 18:19:50 -07002707 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002708 ret = -ENODEV;
2709 return ret;
2710 }
2711
Jonathan Kliegman3bfe6192012-06-14 13:31:55 -04002712 if (host->use_dma && host->dma_ops->init)
Seungwon Jeon141a7122012-05-22 13:01:03 +09002713 host->dma_ops->init(host);
2714
Seungwon Jeon52426892013-08-31 00:13:42 +09002715 /*
2716 * Restore the initial value at FIFOTH register
2717 * And Invalidate the prev_blksz with zero
2718 */
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002719 mci_writel(host, FIFOTH, host->fifoth_val);
Seungwon Jeon52426892013-08-31 00:13:42 +09002720 host->prev_blksz = 0;
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002721
Doug Anderson2eb29442013-08-31 00:11:49 +09002722 /* Put in max timeout */
2723 mci_writel(host, TMOUT, 0xFFFFFFFF);
2724
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002725 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2726 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2727 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2728 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2729 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2730
Will Newtonf95f3852011-01-02 01:11:59 -05002731 for (i = 0; i < host->num_slots; i++) {
2732 struct dw_mci_slot *slot = host->slot[i];
2733 if (!slot)
2734 continue;
Abhilash Kesavanab269122012-11-19 10:26:21 +05302735 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2736 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2737 dw_mci_setup_bus(slot, true);
2738 }
Will Newtonf95f3852011-01-02 01:11:59 -05002739 }
Will Newtonf95f3852011-01-02 01:11:59 -05002740 return 0;
2741}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302742EXPORT_SYMBOL(dw_mci_resume);
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002743#endif /* CONFIG_PM_SLEEP */
2744
Will Newtonf95f3852011-01-02 01:11:59 -05002745static int __init dw_mci_init(void)
2746{
Sachin Kamat8e1c4e42013-04-04 11:25:11 +05302747 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302748 return 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002749}
2750
2751static void __exit dw_mci_exit(void)
2752{
Will Newtonf95f3852011-01-02 01:11:59 -05002753}
2754
2755module_init(dw_mci_init);
2756module_exit(dw_mci_exit);
2757
2758MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2759MODULE_AUTHOR("NXP Semiconductor VietNam");
2760MODULE_AUTHOR("Imagination Technologies Ltd");
2761MODULE_LICENSE("GPL v2");