blob: 39cf54f479d9b83df157126e0754743d84a5955c [file] [log] [blame]
Will Newtonf95f3852011-01-02 01:11:59 -05001/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
Will Newtonf95f3852011-01-02 01:11:59 -050025#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
Seungwon Jeon90c21432013-08-31 00:14:05 +090032#include <linux/mmc/sdio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050033#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
Jaehoon Chungc07946a2011-02-25 11:08:14 +090035#include <linux/regulator/consumer.h>
James Hogan1791b13e2011-06-24 13:55:55 +010036#include <linux/workqueue.h>
Thomas Abrahamc91eab42012-09-17 18:16:40 +000037#include <linux/of.h>
Doug Anderson55a6ceb2013-01-11 17:03:53 +000038#include <linux/of_gpio.h>
Zhangfei Gaobf626e52014-01-09 22:35:10 +080039#include <linux/mmc/slot-gpio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050040
41#include "dw_mmc.h"
42
43/* Common flag combinations */
Jaehoon Chung3f7eec62013-05-27 13:47:57 +090044#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
Will Newtonf95f3852011-01-02 01:11:59 -050045 SDMMC_INT_HTO | SDMMC_INT_SBE | \
46 SDMMC_INT_EBE)
47#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48 SDMMC_INT_RESP_ERR)
49#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
51#define DW_MCI_SEND_STATUS 1
52#define DW_MCI_RECV_STATUS 2
53#define DW_MCI_DMA_THRESHOLD 16
54
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +090055#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
57
Will Newtonf95f3852011-01-02 01:11:59 -050058#ifdef CONFIG_MMC_DW_IDMAC
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +090059#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 SDMMC_IDMAC_INT_TI)
63
Will Newtonf95f3852011-01-02 01:11:59 -050064struct idmac_desc {
65 u32 des0; /* Control Descriptor */
66#define IDMAC_DES0_DIC BIT(1)
67#define IDMAC_DES0_LD BIT(2)
68#define IDMAC_DES0_FD BIT(3)
69#define IDMAC_DES0_CH BIT(4)
70#define IDMAC_DES0_ER BIT(5)
71#define IDMAC_DES0_CES BIT(30)
72#define IDMAC_DES0_OWN BIT(31)
73
74 u32 des1; /* Buffer sizes */
75#define IDMAC_SET_BUFFER1_SIZE(d, s) \
Shashidhar Hiremath9b7bbe12011-07-29 08:49:50 -040076 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
Will Newtonf95f3852011-01-02 01:11:59 -050077
78 u32 des2; /* buffer 1 physical address */
79
80 u32 des3; /* buffer 2 physical address */
81};
82#endif /* CONFIG_MMC_DW_IDMAC */
83
Seungwon Jeon0976f162013-08-31 00:12:42 +090084static const u8 tuning_blk_pattern_4bit[] = {
85 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
93};
Will Newtonf95f3852011-01-02 01:11:59 -050094
Seungwon Jeon0976f162013-08-31 00:12:42 +090095static const u8 tuning_blk_pattern_8bit[] = {
96 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
Will Newtonf95f3852011-01-02 01:11:59 -0500112};
113
Sonny Rao3a33a942014-08-04 18:19:50 -0700114static bool dw_mci_reset(struct dw_mci *host);
Seungwon Jeon31bff452013-08-31 00:14:23 +0900115
Will Newtonf95f3852011-01-02 01:11:59 -0500116#if defined(CONFIG_DEBUG_FS)
117static int dw_mci_req_show(struct seq_file *s, void *v)
118{
119 struct dw_mci_slot *slot = s->private;
120 struct mmc_request *mrq;
121 struct mmc_command *cmd;
122 struct mmc_command *stop;
123 struct mmc_data *data;
124
125 /* Make sure we get a consistent snapshot */
126 spin_lock_bh(&slot->host->lock);
127 mrq = slot->mrq;
128
129 if (mrq) {
130 cmd = mrq->cmd;
131 data = mrq->data;
132 stop = mrq->stop;
133
134 if (cmd)
135 seq_printf(s,
136 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
137 cmd->opcode, cmd->arg, cmd->flags,
138 cmd->resp[0], cmd->resp[1], cmd->resp[2],
139 cmd->resp[2], cmd->error);
140 if (data)
141 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
142 data->bytes_xfered, data->blocks,
143 data->blksz, data->flags, data->error);
144 if (stop)
145 seq_printf(s,
146 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
147 stop->opcode, stop->arg, stop->flags,
148 stop->resp[0], stop->resp[1], stop->resp[2],
149 stop->resp[2], stop->error);
150 }
151
152 spin_unlock_bh(&slot->host->lock);
153
154 return 0;
155}
156
157static int dw_mci_req_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, dw_mci_req_show, inode->i_private);
160}
161
162static const struct file_operations dw_mci_req_fops = {
163 .owner = THIS_MODULE,
164 .open = dw_mci_req_open,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = single_release,
168};
169
170static int dw_mci_regs_show(struct seq_file *s, void *v)
171{
172 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
173 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
174 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
175 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
176 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
177 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
178
179 return 0;
180}
181
182static int dw_mci_regs_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, dw_mci_regs_show, inode->i_private);
185}
186
187static const struct file_operations dw_mci_regs_fops = {
188 .owner = THIS_MODULE,
189 .open = dw_mci_regs_open,
190 .read = seq_read,
191 .llseek = seq_lseek,
192 .release = single_release,
193};
194
195static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
196{
197 struct mmc_host *mmc = slot->mmc;
198 struct dw_mci *host = slot->host;
199 struct dentry *root;
200 struct dentry *node;
201
202 root = mmc->debugfs_root;
203 if (!root)
204 return;
205
206 node = debugfs_create_file("regs", S_IRUSR, root, host,
207 &dw_mci_regs_fops);
208 if (!node)
209 goto err;
210
211 node = debugfs_create_file("req", S_IRUSR, root, slot,
212 &dw_mci_req_fops);
213 if (!node)
214 goto err;
215
216 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
217 if (!node)
218 goto err;
219
220 node = debugfs_create_x32("pending_events", S_IRUSR, root,
221 (u32 *)&host->pending_events);
222 if (!node)
223 goto err;
224
225 node = debugfs_create_x32("completed_events", S_IRUSR, root,
226 (u32 *)&host->completed_events);
227 if (!node)
228 goto err;
229
230 return;
231
232err:
233 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
234}
235#endif /* defined(CONFIG_DEBUG_FS) */
236
Will Newtonf95f3852011-01-02 01:11:59 -0500237static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
238{
239 struct mmc_data *data;
Thomas Abraham800d78b2012-09-17 18:16:42 +0000240 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000241 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Will Newtonf95f3852011-01-02 01:11:59 -0500242 u32 cmdr;
243 cmd->error = -EINPROGRESS;
244
245 cmdr = cmd->opcode;
246
Seungwon Jeon90c21432013-08-31 00:14:05 +0900247 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
248 cmd->opcode == MMC_GO_IDLE_STATE ||
249 cmd->opcode == MMC_GO_INACTIVE_STATE ||
250 (cmd->opcode == SD_IO_RW_DIRECT &&
251 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
Will Newtonf95f3852011-01-02 01:11:59 -0500252 cmdr |= SDMMC_CMD_STOP;
Jaehoon Chung4a1b27a2014-03-03 11:36:44 +0900253 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
254 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500255
256 if (cmd->flags & MMC_RSP_PRESENT) {
257 /* We expect a response, so set this bit */
258 cmdr |= SDMMC_CMD_RESP_EXP;
259 if (cmd->flags & MMC_RSP_136)
260 cmdr |= SDMMC_CMD_RESP_LONG;
261 }
262
263 if (cmd->flags & MMC_RSP_CRC)
264 cmdr |= SDMMC_CMD_RESP_CRC;
265
266 data = cmd->data;
267 if (data) {
268 cmdr |= SDMMC_CMD_DAT_EXP;
269 if (data->flags & MMC_DATA_STREAM)
270 cmdr |= SDMMC_CMD_STRM_MODE;
271 if (data->flags & MMC_DATA_WRITE)
272 cmdr |= SDMMC_CMD_DAT_WR;
273 }
274
James Hogancb27a842012-10-16 09:43:08 +0100275 if (drv_data && drv_data->prepare_command)
276 drv_data->prepare_command(slot->host, &cmdr);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000277
Will Newtonf95f3852011-01-02 01:11:59 -0500278 return cmdr;
279}
280
Seungwon Jeon90c21432013-08-31 00:14:05 +0900281static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
282{
283 struct mmc_command *stop;
284 u32 cmdr;
285
286 if (!cmd->data)
287 return 0;
288
289 stop = &host->stop_abort;
290 cmdr = cmd->opcode;
291 memset(stop, 0, sizeof(struct mmc_command));
292
293 if (cmdr == MMC_READ_SINGLE_BLOCK ||
294 cmdr == MMC_READ_MULTIPLE_BLOCK ||
295 cmdr == MMC_WRITE_BLOCK ||
296 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
297 stop->opcode = MMC_STOP_TRANSMISSION;
298 stop->arg = 0;
299 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
300 } else if (cmdr == SD_IO_RW_EXTENDED) {
301 stop->opcode = SD_IO_RW_DIRECT;
302 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
303 ((cmd->arg >> 28) & 0x7);
304 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
305 } else {
306 return 0;
307 }
308
309 cmdr = stop->opcode | SDMMC_CMD_STOP |
310 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
311
312 return cmdr;
313}
314
Will Newtonf95f3852011-01-02 01:11:59 -0500315static void dw_mci_start_command(struct dw_mci *host,
316 struct mmc_command *cmd, u32 cmd_flags)
317{
318 host->cmd = cmd;
Thomas Abraham4a909202012-09-17 18:16:35 +0000319 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500320 "start command: ARGR=0x%08x CMDR=0x%08x\n",
321 cmd->arg, cmd_flags);
322
323 mci_writel(host, CMDARG, cmd->arg);
324 wmb();
325
326 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
327}
328
Seungwon Jeon90c21432013-08-31 00:14:05 +0900329static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
Will Newtonf95f3852011-01-02 01:11:59 -0500330{
Seungwon Jeon90c21432013-08-31 00:14:05 +0900331 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
332 dw_mci_start_command(host, stop, host->stop_cmdr);
Will Newtonf95f3852011-01-02 01:11:59 -0500333}
334
335/* DMA interface functions */
336static void dw_mci_stop_dma(struct dw_mci *host)
337{
James Hogan03e8cb532011-06-29 09:28:43 +0100338 if (host->using_dma) {
Will Newtonf95f3852011-01-02 01:11:59 -0500339 host->dma_ops->stop(host);
340 host->dma_ops->cleanup(host);
Will Newtonf95f3852011-01-02 01:11:59 -0500341 }
Seungwon Jeonaa50f252013-08-31 00:14:38 +0900342
343 /* Data transfer was stopped by the interrupt handler */
344 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -0500345}
346
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900347static int dw_mci_get_dma_dir(struct mmc_data *data)
348{
349 if (data->flags & MMC_DATA_WRITE)
350 return DMA_TO_DEVICE;
351 else
352 return DMA_FROM_DEVICE;
353}
354
Jaehoon Chung9beee912012-02-16 11:19:38 +0900355#ifdef CONFIG_MMC_DW_IDMAC
Will Newtonf95f3852011-01-02 01:11:59 -0500356static void dw_mci_dma_cleanup(struct dw_mci *host)
357{
358 struct mmc_data *data = host->data;
359
360 if (data)
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900361 if (!data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000362 dma_unmap_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900363 data->sg,
364 data->sg_len,
365 dw_mci_get_dma_dir(data));
Will Newtonf95f3852011-01-02 01:11:59 -0500366}
367
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900368static void dw_mci_idmac_reset(struct dw_mci *host)
369{
370 u32 bmod = mci_readl(host, BMOD);
371 /* Software reset of DMA */
372 bmod |= SDMMC_IDMAC_SWRESET;
373 mci_writel(host, BMOD, bmod);
374}
375
Will Newtonf95f3852011-01-02 01:11:59 -0500376static void dw_mci_idmac_stop_dma(struct dw_mci *host)
377{
378 u32 temp;
379
380 /* Disable and reset the IDMAC interface */
381 temp = mci_readl(host, CTRL);
382 temp &= ~SDMMC_CTRL_USE_IDMAC;
383 temp |= SDMMC_CTRL_DMA_RESET;
384 mci_writel(host, CTRL, temp);
385
386 /* Stop the IDMAC running */
387 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900388 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900389 temp |= SDMMC_IDMAC_SWRESET;
Will Newtonf95f3852011-01-02 01:11:59 -0500390 mci_writel(host, BMOD, temp);
391}
392
393static void dw_mci_idmac_complete_dma(struct dw_mci *host)
394{
395 struct mmc_data *data = host->data;
396
Thomas Abraham4a909202012-09-17 18:16:35 +0000397 dev_vdbg(host->dev, "DMA complete\n");
Will Newtonf95f3852011-01-02 01:11:59 -0500398
399 host->dma_ops->cleanup(host);
400
401 /*
402 * If the card was removed, data will be NULL. No point in trying to
403 * send the stop command or waiting for NBUSY in this case.
404 */
405 if (data) {
406 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
407 tasklet_schedule(&host->tasklet);
408 }
409}
410
411static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
412 unsigned int sg_len)
413{
414 int i;
415 struct idmac_desc *desc = host->sg_cpu;
416
417 for (i = 0; i < sg_len; i++, desc++) {
418 unsigned int length = sg_dma_len(&data->sg[i]);
419 u32 mem_addr = sg_dma_address(&data->sg[i]);
420
421 /* Set the OWN bit and disable interrupts for this descriptor */
422 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
423
424 /* Buffer length */
425 IDMAC_SET_BUFFER1_SIZE(desc, length);
426
427 /* Physical address to DMA to/from */
428 desc->des2 = mem_addr;
429 }
430
431 /* Set first descriptor */
432 desc = host->sg_cpu;
433 desc->des0 |= IDMAC_DES0_FD;
434
435 /* Set last descriptor */
436 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
437 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
438 desc->des0 |= IDMAC_DES0_LD;
439
440 wmb();
441}
442
443static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
444{
445 u32 temp;
446
447 dw_mci_translate_sglist(host, host->data, sg_len);
448
449 /* Select IDMAC interface */
450 temp = mci_readl(host, CTRL);
451 temp |= SDMMC_CTRL_USE_IDMAC;
452 mci_writel(host, CTRL, temp);
453
454 wmb();
455
456 /* Enable the IDMAC */
457 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900458 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
Will Newtonf95f3852011-01-02 01:11:59 -0500459 mci_writel(host, BMOD, temp);
460
461 /* Start it running */
462 mci_writel(host, PLDMND, 1);
463}
464
465static int dw_mci_idmac_init(struct dw_mci *host)
466{
467 struct idmac_desc *p;
Seungwon Jeon897b69e2012-09-19 13:58:31 +0800468 int i;
Will Newtonf95f3852011-01-02 01:11:59 -0500469
470 /* Number of descriptors in the ring buffer */
471 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
472
473 /* Forward link the descriptor list */
474 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
475 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
476
477 /* Set the last descriptor as the end-of-ring descriptor */
478 p->des3 = host->sg_dma;
479 p->des0 = IDMAC_DES0_ER;
480
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900481 dw_mci_idmac_reset(host);
Seungwon Jeon141a7122012-05-22 13:01:03 +0900482
Will Newtonf95f3852011-01-02 01:11:59 -0500483 /* Mask out interrupts - get Tx & Rx complete only */
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +0900484 mci_writel(host, IDSTS, IDMAC_INT_CLR);
Will Newtonf95f3852011-01-02 01:11:59 -0500485 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
486 SDMMC_IDMAC_INT_TI);
487
488 /* Set the descriptor base address */
489 mci_writel(host, DBADDR, host->sg_dma);
490 return 0;
491}
492
Arnd Bergmann8e2b36e2012-11-06 22:55:31 +0100493static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
Seungwon Jeon885c3e82012-02-20 11:01:43 +0900494 .init = dw_mci_idmac_init,
495 .start = dw_mci_idmac_start_dma,
496 .stop = dw_mci_idmac_stop_dma,
497 .complete = dw_mci_idmac_complete_dma,
498 .cleanup = dw_mci_dma_cleanup,
499};
500#endif /* CONFIG_MMC_DW_IDMAC */
501
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900502static int dw_mci_pre_dma_transfer(struct dw_mci *host,
503 struct mmc_data *data,
504 bool next)
Will Newtonf95f3852011-01-02 01:11:59 -0500505{
506 struct scatterlist *sg;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900507 unsigned int i, sg_len;
Will Newtonf95f3852011-01-02 01:11:59 -0500508
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900509 if (!next && data->host_cookie)
510 return data->host_cookie;
Will Newtonf95f3852011-01-02 01:11:59 -0500511
512 /*
513 * We don't do DMA on "complex" transfers, i.e. with
514 * non-word-aligned buffers or lengths. Also, we don't bother
515 * with all the DMA setup overhead for short transfers.
516 */
517 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
518 return -EINVAL;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900519
Will Newtonf95f3852011-01-02 01:11:59 -0500520 if (data->blksz & 3)
521 return -EINVAL;
522
523 for_each_sg(data->sg, sg, data->sg_len, i) {
524 if (sg->offset & 3 || sg->length & 3)
525 return -EINVAL;
526 }
527
Thomas Abraham4a909202012-09-17 18:16:35 +0000528 sg_len = dma_map_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900529 data->sg,
530 data->sg_len,
531 dw_mci_get_dma_dir(data));
532 if (sg_len == 0)
533 return -EINVAL;
534
535 if (next)
536 data->host_cookie = sg_len;
537
538 return sg_len;
539}
540
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900541static void dw_mci_pre_req(struct mmc_host *mmc,
542 struct mmc_request *mrq,
543 bool is_first_req)
544{
545 struct dw_mci_slot *slot = mmc_priv(mmc);
546 struct mmc_data *data = mrq->data;
547
548 if (!slot->host->use_dma || !data)
549 return;
550
551 if (data->host_cookie) {
552 data->host_cookie = 0;
553 return;
554 }
555
556 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
557 data->host_cookie = 0;
558}
559
560static void dw_mci_post_req(struct mmc_host *mmc,
561 struct mmc_request *mrq,
562 int err)
563{
564 struct dw_mci_slot *slot = mmc_priv(mmc);
565 struct mmc_data *data = mrq->data;
566
567 if (!slot->host->use_dma || !data)
568 return;
569
570 if (data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000571 dma_unmap_sg(slot->host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900572 data->sg,
573 data->sg_len,
574 dw_mci_get_dma_dir(data));
575 data->host_cookie = 0;
576}
577
Seungwon Jeon52426892013-08-31 00:13:42 +0900578static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
579{
580#ifdef CONFIG_MMC_DW_IDMAC
581 unsigned int blksz = data->blksz;
582 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
583 u32 fifo_width = 1 << host->data_shift;
584 u32 blksz_depth = blksz / fifo_width, fifoth_val;
585 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
586 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
587
588 tx_wmark = (host->fifo_depth) / 2;
589 tx_wmark_invers = host->fifo_depth - tx_wmark;
590
591 /*
592 * MSIZE is '1',
593 * if blksz is not a multiple of the FIFO width
594 */
595 if (blksz % fifo_width) {
596 msize = 0;
597 rx_wmark = 1;
598 goto done;
599 }
600
601 do {
602 if (!((blksz_depth % mszs[idx]) ||
603 (tx_wmark_invers % mszs[idx]))) {
604 msize = idx;
605 rx_wmark = mszs[idx] - 1;
606 break;
607 }
608 } while (--idx > 0);
609 /*
610 * If idx is '0', it won't be tried
611 * Thus, initial values are uesed
612 */
613done:
614 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
615 mci_writel(host, FIFOTH, fifoth_val);
616#endif
617}
618
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900619static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
620{
621 unsigned int blksz = data->blksz;
622 u32 blksz_depth, fifo_depth;
623 u16 thld_size;
624
625 WARN_ON(!(data->flags & MMC_DATA_READ));
626
627 if (host->timing != MMC_TIMING_MMC_HS200 &&
628 host->timing != MMC_TIMING_UHS_SDR104)
629 goto disable;
630
631 blksz_depth = blksz / (1 << host->data_shift);
632 fifo_depth = host->fifo_depth;
633
634 if (blksz_depth > fifo_depth)
635 goto disable;
636
637 /*
638 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
639 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
640 * Currently just choose blksz.
641 */
642 thld_size = blksz;
643 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
644 return;
645
646disable:
647 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
648}
649
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900650static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
651{
652 int sg_len;
653 u32 temp;
654
655 host->using_dma = 0;
656
657 /* If we don't have a channel, we can't do DMA */
658 if (!host->use_dma)
659 return -ENODEV;
660
661 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900662 if (sg_len < 0) {
663 host->dma_ops->stop(host);
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900664 return sg_len;
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900665 }
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900666
James Hogan03e8cb532011-06-29 09:28:43 +0100667 host->using_dma = 1;
668
Thomas Abraham4a909202012-09-17 18:16:35 +0000669 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500670 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
671 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
672 sg_len);
673
Seungwon Jeon52426892013-08-31 00:13:42 +0900674 /*
675 * Decide the MSIZE and RX/TX Watermark.
676 * If current block size is same with previous size,
677 * no need to update fifoth.
678 */
679 if (host->prev_blksz != data->blksz)
680 dw_mci_adjust_fifoth(host, data);
681
Will Newtonf95f3852011-01-02 01:11:59 -0500682 /* Enable the DMA interface */
683 temp = mci_readl(host, CTRL);
684 temp |= SDMMC_CTRL_DMA_ENABLE;
685 mci_writel(host, CTRL, temp);
686
687 /* Disable RX/TX IRQs, let DMA handle it */
688 temp = mci_readl(host, INTMASK);
689 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
690 mci_writel(host, INTMASK, temp);
691
692 host->dma_ops->start(host, sg_len);
693
694 return 0;
695}
696
697static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
698{
699 u32 temp;
700
701 data->error = -EINPROGRESS;
702
703 WARN_ON(host->data);
704 host->sg = NULL;
705 host->data = data;
706
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900707 if (data->flags & MMC_DATA_READ) {
James Hogan55c5efbc2011-06-29 09:29:58 +0100708 host->dir_status = DW_MCI_RECV_STATUS;
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900709 dw_mci_ctrl_rd_thld(host, data);
710 } else {
James Hogan55c5efbc2011-06-29 09:29:58 +0100711 host->dir_status = DW_MCI_SEND_STATUS;
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900712 }
James Hogan55c5efbc2011-06-29 09:29:58 +0100713
Will Newtonf95f3852011-01-02 01:11:59 -0500714 if (dw_mci_submit_data_dma(host, data)) {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +0900715 int flags = SG_MITER_ATOMIC;
716 if (host->data->flags & MMC_DATA_READ)
717 flags |= SG_MITER_TO_SG;
718 else
719 flags |= SG_MITER_FROM_SG;
720
721 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
Will Newtonf95f3852011-01-02 01:11:59 -0500722 host->sg = data->sg;
James Hogan34b664a2011-06-24 13:57:56 +0100723 host->part_buf_start = 0;
724 host->part_buf_count = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500725
James Hoganb40af3a2011-06-24 13:54:06 +0100726 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
Will Newtonf95f3852011-01-02 01:11:59 -0500727 temp = mci_readl(host, INTMASK);
728 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
729 mci_writel(host, INTMASK, temp);
730
731 temp = mci_readl(host, CTRL);
732 temp &= ~SDMMC_CTRL_DMA_ENABLE;
733 mci_writel(host, CTRL, temp);
Seungwon Jeon52426892013-08-31 00:13:42 +0900734
735 /*
736 * Use the initial fifoth_val for PIO mode.
737 * If next issued data may be transfered by DMA mode,
738 * prev_blksz should be invalidated.
739 */
740 mci_writel(host, FIFOTH, host->fifoth_val);
741 host->prev_blksz = 0;
742 } else {
743 /*
744 * Keep the current block size.
745 * It will be used to decide whether to update
746 * fifoth register next time.
747 */
748 host->prev_blksz = data->blksz;
Will Newtonf95f3852011-01-02 01:11:59 -0500749 }
750}
751
752static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
753{
754 struct dw_mci *host = slot->host;
755 unsigned long timeout = jiffies + msecs_to_jiffies(500);
756 unsigned int cmd_status = 0;
757
758 mci_writel(host, CMDARG, arg);
759 wmb();
760 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
761
762 while (time_before(jiffies, timeout)) {
763 cmd_status = mci_readl(host, CMD);
764 if (!(cmd_status & SDMMC_CMD_START))
765 return;
766 }
767 dev_err(&slot->mmc->class_dev,
768 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
769 cmd, arg, cmd_status);
770}
771
Abhilash Kesavanab269122012-11-19 10:26:21 +0530772static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
Will Newtonf95f3852011-01-02 01:11:59 -0500773{
774 struct dw_mci *host = slot->host;
Doug Andersonfdf492a2013-08-31 00:11:43 +0900775 unsigned int clock = slot->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500776 u32 div;
Doug Anderson9623b5b2012-07-25 08:33:17 -0700777 u32 clk_en_a;
Will Newtonf95f3852011-01-02 01:11:59 -0500778
Doug Andersonfdf492a2013-08-31 00:11:43 +0900779 if (!clock) {
780 mci_writel(host, CLKENA, 0);
781 mci_send_cmd(slot,
782 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
783 } else if (clock != host->current_speed || force_clkinit) {
784 div = host->bus_hz / clock;
785 if (host->bus_hz % clock && host->bus_hz > clock)
Will Newtonf95f3852011-01-02 01:11:59 -0500786 /*
787 * move the + 1 after the divide to prevent
788 * over-clocking the card.
789 */
Seungwon Jeone4199902012-05-22 13:01:21 +0900790 div += 1;
791
Doug Andersonfdf492a2013-08-31 00:11:43 +0900792 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500793
Doug Andersonfdf492a2013-08-31 00:11:43 +0900794 if ((clock << div) != slot->__clk_old || force_clkinit)
795 dev_info(&slot->mmc->class_dev,
796 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
797 slot->id, host->bus_hz, clock,
798 div ? ((host->bus_hz / div) >> 1) :
799 host->bus_hz, div);
Will Newtonf95f3852011-01-02 01:11:59 -0500800
801 /* disable clock */
802 mci_writel(host, CLKENA, 0);
803 mci_writel(host, CLKSRC, 0);
804
805 /* inform CIU */
806 mci_send_cmd(slot,
807 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
808
809 /* set clock to desired speed */
810 mci_writel(host, CLKDIV, div);
811
812 /* inform CIU */
813 mci_send_cmd(slot,
814 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
815
Doug Anderson9623b5b2012-07-25 08:33:17 -0700816 /* enable clock; only low power if no SDIO */
817 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
818 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
819 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
820 mci_writel(host, CLKENA, clk_en_a);
Will Newtonf95f3852011-01-02 01:11:59 -0500821
822 /* inform CIU */
823 mci_send_cmd(slot,
824 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
825
Doug Andersonfdf492a2013-08-31 00:11:43 +0900826 /* keep the clock with reflecting clock dividor */
827 slot->__clk_old = clock << div;
Will Newtonf95f3852011-01-02 01:11:59 -0500828 }
829
Doug Andersonfdf492a2013-08-31 00:11:43 +0900830 host->current_speed = clock;
831
Will Newtonf95f3852011-01-02 01:11:59 -0500832 /* Set the current slot bus width */
Seungwon Jeon1d56c452011-06-20 17:23:53 +0900833 mci_writel(host, CTYPE, (slot->ctype << slot->id));
Will Newtonf95f3852011-01-02 01:11:59 -0500834}
835
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900836static void __dw_mci_start_request(struct dw_mci *host,
837 struct dw_mci_slot *slot,
838 struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -0500839{
840 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -0500841 struct mmc_data *data;
842 u32 cmdflags;
843
844 mrq = slot->mrq;
Will Newtonf95f3852011-01-02 01:11:59 -0500845
Will Newtonf95f3852011-01-02 01:11:59 -0500846 host->cur_slot = slot;
847 host->mrq = mrq;
848
849 host->pending_events = 0;
850 host->completed_events = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +0900851 host->cmd_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500852 host->data_status = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +0900853 host->dir_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500854
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900855 data = cmd->data;
Will Newtonf95f3852011-01-02 01:11:59 -0500856 if (data) {
Jaehoon Chungf16afa82014-03-03 11:36:45 +0900857 mci_writel(host, TMOUT, 0xFFFFFFFF);
Will Newtonf95f3852011-01-02 01:11:59 -0500858 mci_writel(host, BYTCNT, data->blksz*data->blocks);
859 mci_writel(host, BLKSIZ, data->blksz);
860 }
861
Will Newtonf95f3852011-01-02 01:11:59 -0500862 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
863
864 /* this is the first command, send the initialization clock */
865 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
866 cmdflags |= SDMMC_CMD_INIT;
867
868 if (data) {
869 dw_mci_submit_data(host, data);
870 wmb();
871 }
872
873 dw_mci_start_command(host, cmd, cmdflags);
874
875 if (mrq->stop)
876 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +0900877 else
878 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
Will Newtonf95f3852011-01-02 01:11:59 -0500879}
880
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900881static void dw_mci_start_request(struct dw_mci *host,
882 struct dw_mci_slot *slot)
883{
884 struct mmc_request *mrq = slot->mrq;
885 struct mmc_command *cmd;
886
887 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
888 __dw_mci_start_request(host, slot, cmd);
889}
890
James Hogan7456caa2011-06-24 13:55:10 +0100891/* must be called with host->lock held */
Will Newtonf95f3852011-01-02 01:11:59 -0500892static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
893 struct mmc_request *mrq)
894{
895 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
896 host->state);
897
Will Newtonf95f3852011-01-02 01:11:59 -0500898 slot->mrq = mrq;
899
900 if (host->state == STATE_IDLE) {
901 host->state = STATE_SENDING_CMD;
902 dw_mci_start_request(host, slot);
903 } else {
904 list_add_tail(&slot->queue_node, &host->queue);
905 }
Will Newtonf95f3852011-01-02 01:11:59 -0500906}
907
908static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
909{
910 struct dw_mci_slot *slot = mmc_priv(mmc);
911 struct dw_mci *host = slot->host;
912
913 WARN_ON(slot->mrq);
914
James Hogan7456caa2011-06-24 13:55:10 +0100915 /*
916 * The check for card presence and queueing of the request must be
917 * atomic, otherwise the card could be removed in between and the
918 * request wouldn't fail until another card was inserted.
919 */
920 spin_lock_bh(&host->lock);
921
Will Newtonf95f3852011-01-02 01:11:59 -0500922 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
James Hogan7456caa2011-06-24 13:55:10 +0100923 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500924 mrq->cmd->error = -ENOMEDIUM;
925 mmc_request_done(mmc, mrq);
926 return;
927 }
928
Will Newtonf95f3852011-01-02 01:11:59 -0500929 dw_mci_queue_request(host, slot, mrq);
James Hogan7456caa2011-06-24 13:55:10 +0100930
931 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500932}
933
934static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
935{
936 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000937 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900938 u32 regs;
Will Newtonf95f3852011-01-02 01:11:59 -0500939
Will Newtonf95f3852011-01-02 01:11:59 -0500940 switch (ios->bus_width) {
Will Newtonf95f3852011-01-02 01:11:59 -0500941 case MMC_BUS_WIDTH_4:
942 slot->ctype = SDMMC_CTYPE_4BIT;
943 break;
Jaehoon Chungc9b2a062011-02-17 16:12:38 +0900944 case MMC_BUS_WIDTH_8:
945 slot->ctype = SDMMC_CTYPE_8BIT;
946 break;
Jaehoon Chungb2f7cb42012-11-08 17:35:31 +0900947 default:
948 /* set default 1 bit mode */
949 slot->ctype = SDMMC_CTYPE_1BIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500950 }
951
Seungwon Jeon3f514292012-01-02 16:00:02 +0900952 regs = mci_readl(slot->host, UHS_REG);
953
Jaehoon Chung41babf72011-02-24 13:46:11 +0900954 /* DDR mode set */
Seungwon Jeoncab3a802014-03-14 21:12:43 +0900955 if (ios->timing == MMC_TIMING_MMC_DDR52)
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900956 regs |= ((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900957 else
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900958 regs &= ~((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900959
960 mci_writel(slot->host, UHS_REG, regs);
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900961 slot->host->timing = ios->timing;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900962
Doug Andersonfdf492a2013-08-31 00:11:43 +0900963 /*
964 * Use mirror of ios->clock to prevent race with mmc
965 * core ios update when finding the minimum.
966 */
967 slot->clock = ios->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500968
James Hogancb27a842012-10-16 09:43:08 +0100969 if (drv_data && drv_data->set_ios)
970 drv_data->set_ios(slot->host, ios);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000971
Jaehoon Chungbf7cb222012-11-08 17:35:29 +0900972 /* Slot specific timing and width adjustment */
973 dw_mci_setup_bus(slot, false);
974
Will Newtonf95f3852011-01-02 01:11:59 -0500975 switch (ios->power_mode) {
976 case MMC_POWER_UP:
977 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900978 regs = mci_readl(slot->host, PWREN);
979 regs |= (1 << slot->id);
980 mci_writel(slot->host, PWREN, regs);
James Hogane6f34e22013-03-12 10:43:32 +0000981 break;
982 case MMC_POWER_OFF:
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900983 regs = mci_readl(slot->host, PWREN);
984 regs &= ~(1 << slot->id);
985 mci_writel(slot->host, PWREN, regs);
Will Newtonf95f3852011-01-02 01:11:59 -0500986 break;
987 default:
988 break;
989 }
990}
991
992static int dw_mci_get_ro(struct mmc_host *mmc)
993{
994 int read_only;
995 struct dw_mci_slot *slot = mmc_priv(mmc);
Jaehoon Chung9795a842014-03-03 11:36:46 +0900996 int gpio_ro = mmc_gpio_get_ro(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -0500997
998 /* Use platform get_ro function, else try on board write protect */
Doug Anderson96406392013-01-11 17:03:54 +0000999 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
Thomas Abrahamb4967aa2012-09-17 18:16:39 +00001000 read_only = 0;
Jaehoon Chung9795a842014-03-03 11:36:46 +09001001 else if (!IS_ERR_VALUE(gpio_ro))
1002 read_only = gpio_ro;
Will Newtonf95f3852011-01-02 01:11:59 -05001003 else
1004 read_only =
1005 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1006
1007 dev_dbg(&mmc->class_dev, "card is %s\n",
1008 read_only ? "read-only" : "read-write");
1009
1010 return read_only;
1011}
1012
1013static int dw_mci_get_cd(struct mmc_host *mmc)
1014{
1015 int present;
1016 struct dw_mci_slot *slot = mmc_priv(mmc);
1017 struct dw_mci_board *brd = slot->host->pdata;
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001018 struct dw_mci *host = slot->host;
1019 int gpio_cd = mmc_gpio_get_cd(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -05001020
1021 /* Use platform get_cd function, else try onboard card detect */
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09001022 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1023 present = 1;
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001024 else if (!IS_ERR_VALUE(gpio_cd))
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001025 present = gpio_cd;
Will Newtonf95f3852011-01-02 01:11:59 -05001026 else
1027 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1028 == 0 ? 1 : 0;
1029
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001030 spin_lock_bh(&host->lock);
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001031 if (present) {
1032 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001033 dev_dbg(&mmc->class_dev, "card is present\n");
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001034 } else {
1035 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001036 dev_dbg(&mmc->class_dev, "card is not present\n");
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001037 }
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001038 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -05001039
1040 return present;
1041}
1042
Doug Anderson9623b5b2012-07-25 08:33:17 -07001043/*
1044 * Disable lower power mode.
1045 *
1046 * Low power mode will stop the card clock when idle. According to the
1047 * description of the CLKENA register we should disable low power mode
1048 * for SDIO cards if we need SDIO interrupts to work.
1049 *
1050 * This function is fast if low power mode is already disabled.
1051 */
1052static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1053{
1054 struct dw_mci *host = slot->host;
1055 u32 clk_en_a;
1056 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1057
1058 clk_en_a = mci_readl(host, CLKENA);
1059
1060 if (clk_en_a & clken_low_pwr) {
1061 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1062 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1063 SDMMC_CMD_PRV_DAT_WAIT, 0);
1064 }
1065}
1066
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301067static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1068{
1069 struct dw_mci_slot *slot = mmc_priv(mmc);
1070 struct dw_mci *host = slot->host;
1071 u32 int_mask;
1072
1073 /* Enable/disable Slot Specific SDIO interrupt */
1074 int_mask = mci_readl(host, INTMASK);
1075 if (enb) {
Doug Anderson9623b5b2012-07-25 08:33:17 -07001076 /*
1077 * Turn off low power mode if it was enabled. This is a bit of
1078 * a heavy operation and we disable / enable IRQs a lot, so
1079 * we'll leave low power mode disabled and it will get
1080 * re-enabled again in dw_mci_setup_bus().
1081 */
1082 dw_mci_disable_low_power(slot);
1083
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301084 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +09001085 (int_mask | SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301086 } else {
1087 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +09001088 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301089 }
1090}
1091
Seungwon Jeon0976f162013-08-31 00:12:42 +09001092static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1093{
1094 struct dw_mci_slot *slot = mmc_priv(mmc);
1095 struct dw_mci *host = slot->host;
1096 const struct dw_mci_drv_data *drv_data = host->drv_data;
1097 struct dw_mci_tuning_data tuning_data;
1098 int err = -ENOSYS;
1099
1100 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1101 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1102 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1103 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1104 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1105 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1106 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1107 } else {
1108 return -EINVAL;
1109 }
1110 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1111 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1112 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1113 } else {
1114 dev_err(host->dev,
1115 "Undefined command(%d) for tuning\n", opcode);
1116 return -EINVAL;
1117 }
1118
1119 if (drv_data && drv_data->execute_tuning)
1120 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1121 return err;
1122}
1123
Will Newtonf95f3852011-01-02 01:11:59 -05001124static const struct mmc_host_ops dw_mci_ops = {
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301125 .request = dw_mci_request,
Seungwon Jeon9aa51402012-02-06 16:55:07 +09001126 .pre_req = dw_mci_pre_req,
1127 .post_req = dw_mci_post_req,
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301128 .set_ios = dw_mci_set_ios,
1129 .get_ro = dw_mci_get_ro,
1130 .get_cd = dw_mci_get_cd,
1131 .enable_sdio_irq = dw_mci_enable_sdio_irq,
Seungwon Jeon0976f162013-08-31 00:12:42 +09001132 .execute_tuning = dw_mci_execute_tuning,
Will Newtonf95f3852011-01-02 01:11:59 -05001133};
1134
1135static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1136 __releases(&host->lock)
1137 __acquires(&host->lock)
1138{
1139 struct dw_mci_slot *slot;
1140 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1141
1142 WARN_ON(host->cmd || host->data);
1143
1144 host->cur_slot->mrq = NULL;
1145 host->mrq = NULL;
1146 if (!list_empty(&host->queue)) {
1147 slot = list_entry(host->queue.next,
1148 struct dw_mci_slot, queue_node);
1149 list_del(&slot->queue_node);
Thomas Abraham4a909202012-09-17 18:16:35 +00001150 dev_vdbg(host->dev, "list not empty: %s is next\n",
Will Newtonf95f3852011-01-02 01:11:59 -05001151 mmc_hostname(slot->mmc));
1152 host->state = STATE_SENDING_CMD;
1153 dw_mci_start_request(host, slot);
1154 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00001155 dev_vdbg(host->dev, "list empty\n");
Will Newtonf95f3852011-01-02 01:11:59 -05001156 host->state = STATE_IDLE;
1157 }
1158
1159 spin_unlock(&host->lock);
1160 mmc_request_done(prev_mmc, mrq);
1161 spin_lock(&host->lock);
1162}
1163
Seungwon Jeone352c812013-08-31 00:14:17 +09001164static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -05001165{
1166 u32 status = host->cmd_status;
1167
1168 host->cmd_status = 0;
1169
1170 /* Read the response from the card (up to 16 bytes) */
1171 if (cmd->flags & MMC_RSP_PRESENT) {
1172 if (cmd->flags & MMC_RSP_136) {
1173 cmd->resp[3] = mci_readl(host, RESP0);
1174 cmd->resp[2] = mci_readl(host, RESP1);
1175 cmd->resp[1] = mci_readl(host, RESP2);
1176 cmd->resp[0] = mci_readl(host, RESP3);
1177 } else {
1178 cmd->resp[0] = mci_readl(host, RESP0);
1179 cmd->resp[1] = 0;
1180 cmd->resp[2] = 0;
1181 cmd->resp[3] = 0;
1182 }
1183 }
1184
1185 if (status & SDMMC_INT_RTO)
1186 cmd->error = -ETIMEDOUT;
1187 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1188 cmd->error = -EILSEQ;
1189 else if (status & SDMMC_INT_RESP_ERR)
1190 cmd->error = -EIO;
1191 else
1192 cmd->error = 0;
1193
1194 if (cmd->error) {
1195 /* newer ip versions need a delay between retries */
1196 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1197 mdelay(20);
Will Newtonf95f3852011-01-02 01:11:59 -05001198 }
Seungwon Jeone352c812013-08-31 00:14:17 +09001199
1200 return cmd->error;
1201}
1202
1203static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1204{
Seungwon Jeon31bff452013-08-31 00:14:23 +09001205 u32 status = host->data_status;
Seungwon Jeone352c812013-08-31 00:14:17 +09001206
1207 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1208 if (status & SDMMC_INT_DRTO) {
1209 data->error = -ETIMEDOUT;
1210 } else if (status & SDMMC_INT_DCRC) {
1211 data->error = -EILSEQ;
1212 } else if (status & SDMMC_INT_EBE) {
1213 if (host->dir_status ==
1214 DW_MCI_SEND_STATUS) {
1215 /*
1216 * No data CRC status was returned.
1217 * The number of bytes transferred
1218 * will be exaggerated in PIO mode.
1219 */
1220 data->bytes_xfered = 0;
1221 data->error = -ETIMEDOUT;
1222 } else if (host->dir_status ==
1223 DW_MCI_RECV_STATUS) {
1224 data->error = -EIO;
1225 }
1226 } else {
1227 /* SDMMC_INT_SBE is included */
1228 data->error = -EIO;
1229 }
1230
Doug Andersone6cc0122014-04-22 16:51:21 -07001231 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
Seungwon Jeone352c812013-08-31 00:14:17 +09001232
1233 /*
1234 * After an error, there may be data lingering
Seungwon Jeon31bff452013-08-31 00:14:23 +09001235 * in the FIFO
Seungwon Jeone352c812013-08-31 00:14:17 +09001236 */
Sonny Rao3a33a942014-08-04 18:19:50 -07001237 dw_mci_reset(host);
Seungwon Jeone352c812013-08-31 00:14:17 +09001238 } else {
1239 data->bytes_xfered = data->blocks * data->blksz;
1240 data->error = 0;
1241 }
1242
1243 return data->error;
Will Newtonf95f3852011-01-02 01:11:59 -05001244}
1245
1246static void dw_mci_tasklet_func(unsigned long priv)
1247{
1248 struct dw_mci *host = (struct dw_mci *)priv;
1249 struct mmc_data *data;
1250 struct mmc_command *cmd;
Seungwon Jeone352c812013-08-31 00:14:17 +09001251 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001252 enum dw_mci_state state;
1253 enum dw_mci_state prev_state;
Seungwon Jeone352c812013-08-31 00:14:17 +09001254 unsigned int err;
Will Newtonf95f3852011-01-02 01:11:59 -05001255
1256 spin_lock(&host->lock);
1257
1258 state = host->state;
1259 data = host->data;
Seungwon Jeone352c812013-08-31 00:14:17 +09001260 mrq = host->mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001261
1262 do {
1263 prev_state = state;
1264
1265 switch (state) {
1266 case STATE_IDLE:
1267 break;
1268
1269 case STATE_SENDING_CMD:
1270 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1271 &host->pending_events))
1272 break;
1273
1274 cmd = host->cmd;
1275 host->cmd = NULL;
1276 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001277 err = dw_mci_command_complete(host, cmd);
1278 if (cmd == mrq->sbc && !err) {
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001279 prev_state = state = STATE_SENDING_CMD;
1280 __dw_mci_start_request(host, host->cur_slot,
Seungwon Jeone352c812013-08-31 00:14:17 +09001281 mrq->cmd);
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001282 goto unlock;
1283 }
1284
Seungwon Jeone352c812013-08-31 00:14:17 +09001285 if (cmd->data && err) {
Seungwon Jeon71abb132013-08-31 00:13:59 +09001286 dw_mci_stop_dma(host);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001287 send_stop_abort(host, data);
1288 state = STATE_SENDING_STOP;
1289 break;
Seungwon Jeon71abb132013-08-31 00:13:59 +09001290 }
1291
Seungwon Jeone352c812013-08-31 00:14:17 +09001292 if (!cmd->data || err) {
1293 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05001294 goto unlock;
1295 }
1296
1297 prev_state = state = STATE_SENDING_DATA;
1298 /* fall through */
1299
1300 case STATE_SENDING_DATA:
1301 if (test_and_clear_bit(EVENT_DATA_ERROR,
1302 &host->pending_events)) {
1303 dw_mci_stop_dma(host);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001304 send_stop_abort(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001305 state = STATE_DATA_ERROR;
1306 break;
1307 }
1308
1309 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1310 &host->pending_events))
1311 break;
1312
1313 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1314 prev_state = state = STATE_DATA_BUSY;
1315 /* fall through */
1316
1317 case STATE_DATA_BUSY:
1318 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1319 &host->pending_events))
1320 break;
1321
1322 host->data = NULL;
1323 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001324 err = dw_mci_data_complete(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001325
Seungwon Jeone352c812013-08-31 00:14:17 +09001326 if (!err) {
1327 if (!data->stop || mrq->sbc) {
Sachin Kamat17c8bc82014-02-25 15:18:28 +05301328 if (mrq->sbc && data->stop)
Seungwon Jeone352c812013-08-31 00:14:17 +09001329 data->stop->error = 0;
1330 dw_mci_request_end(host, mrq);
1331 goto unlock;
Will Newtonf95f3852011-01-02 01:11:59 -05001332 }
Will Newtonf95f3852011-01-02 01:11:59 -05001333
Seungwon Jeon90c21432013-08-31 00:14:05 +09001334 /* stop command for open-ended transfer*/
Seungwon Jeone352c812013-08-31 00:14:17 +09001335 if (data->stop)
1336 send_stop_abort(host, data);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001337 }
Seungwon Jeone352c812013-08-31 00:14:17 +09001338
1339 /*
1340 * If err has non-zero,
1341 * stop-abort command has been already issued.
1342 */
1343 prev_state = state = STATE_SENDING_STOP;
1344
Will Newtonf95f3852011-01-02 01:11:59 -05001345 /* fall through */
1346
1347 case STATE_SENDING_STOP:
1348 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1349 &host->pending_events))
1350 break;
1351
Seungwon Jeon71abb132013-08-31 00:13:59 +09001352 /* CMD error in data command */
Seungwon Jeon31bff452013-08-31 00:14:23 +09001353 if (mrq->cmd->error && mrq->data)
Sonny Rao3a33a942014-08-04 18:19:50 -07001354 dw_mci_reset(host);
Seungwon Jeon71abb132013-08-31 00:13:59 +09001355
Will Newtonf95f3852011-01-02 01:11:59 -05001356 host->cmd = NULL;
Seungwon Jeon71abb132013-08-31 00:13:59 +09001357 host->data = NULL;
Seungwon Jeon90c21432013-08-31 00:14:05 +09001358
Seungwon Jeone352c812013-08-31 00:14:17 +09001359 if (mrq->stop)
1360 dw_mci_command_complete(host, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001361 else
1362 host->cmd_status = 0;
1363
Seungwon Jeone352c812013-08-31 00:14:17 +09001364 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05001365 goto unlock;
1366
1367 case STATE_DATA_ERROR:
1368 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1369 &host->pending_events))
1370 break;
1371
1372 state = STATE_DATA_BUSY;
1373 break;
1374 }
1375 } while (state != prev_state);
1376
1377 host->state = state;
1378unlock:
1379 spin_unlock(&host->lock);
1380
1381}
1382
James Hogan34b664a2011-06-24 13:57:56 +01001383/* push final bytes to part_buf, only use during push */
1384static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1385{
1386 memcpy((void *)&host->part_buf, buf, cnt);
1387 host->part_buf_count = cnt;
1388}
1389
1390/* append bytes to part_buf, only use during push */
1391static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1392{
1393 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1394 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1395 host->part_buf_count += cnt;
1396 return cnt;
1397}
1398
1399/* pull first bytes from part_buf, only use during pull */
1400static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1401{
1402 cnt = min(cnt, (int)host->part_buf_count);
1403 if (cnt) {
1404 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1405 cnt);
1406 host->part_buf_count -= cnt;
1407 host->part_buf_start += cnt;
1408 }
1409 return cnt;
1410}
1411
1412/* pull final bytes from the part_buf, assuming it's just been filled */
1413static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1414{
1415 memcpy(buf, &host->part_buf, cnt);
1416 host->part_buf_start = cnt;
1417 host->part_buf_count = (1 << host->data_shift) - cnt;
1418}
1419
Will Newtonf95f3852011-01-02 01:11:59 -05001420static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1421{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001422 struct mmc_data *data = host->data;
1423 int init_cnt = cnt;
1424
James Hogan34b664a2011-06-24 13:57:56 +01001425 /* try and push anything in the part_buf */
1426 if (unlikely(host->part_buf_count)) {
1427 int len = dw_mci_push_part_bytes(host, buf, cnt);
1428 buf += len;
1429 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001430 if (host->part_buf_count == 2) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001431 mci_writew(host, DATA(host->data_offset),
1432 host->part_buf16);
James Hogan34b664a2011-06-24 13:57:56 +01001433 host->part_buf_count = 0;
1434 }
1435 }
1436#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1437 if (unlikely((unsigned long)buf & 0x1)) {
1438 while (cnt >= 2) {
1439 u16 aligned_buf[64];
1440 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1441 int items = len >> 1;
1442 int i;
1443 /* memcpy from input buffer into aligned buffer */
1444 memcpy(aligned_buf, buf, len);
1445 buf += len;
1446 cnt -= len;
1447 /* push data from aligned buffer into fifo */
1448 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001449 mci_writew(host, DATA(host->data_offset),
1450 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001451 }
1452 } else
1453#endif
1454 {
1455 u16 *pdata = buf;
1456 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001457 mci_writew(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001458 buf = pdata;
1459 }
1460 /* put anything remaining in the part_buf */
1461 if (cnt) {
1462 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001463 /* Push data if we have reached the expected data length */
1464 if ((data->bytes_xfered + init_cnt) ==
1465 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001466 mci_writew(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001467 host->part_buf16);
Will Newtonf95f3852011-01-02 01:11:59 -05001468 }
1469}
1470
1471static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1472{
James Hogan34b664a2011-06-24 13:57:56 +01001473#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1474 if (unlikely((unsigned long)buf & 0x1)) {
1475 while (cnt >= 2) {
1476 /* pull data from fifo into aligned buffer */
1477 u16 aligned_buf[64];
1478 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1479 int items = len >> 1;
1480 int i;
1481 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001482 aligned_buf[i] = mci_readw(host,
1483 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001484 /* memcpy from aligned buffer into output buffer */
1485 memcpy(buf, aligned_buf, len);
1486 buf += len;
1487 cnt -= len;
1488 }
1489 } else
1490#endif
1491 {
1492 u16 *pdata = buf;
1493 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001494 *pdata++ = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001495 buf = pdata;
1496 }
1497 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001498 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001499 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001500 }
1501}
1502
1503static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1504{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001505 struct mmc_data *data = host->data;
1506 int init_cnt = cnt;
1507
James Hogan34b664a2011-06-24 13:57:56 +01001508 /* try and push anything in the part_buf */
1509 if (unlikely(host->part_buf_count)) {
1510 int len = dw_mci_push_part_bytes(host, buf, cnt);
1511 buf += len;
1512 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001513 if (host->part_buf_count == 4) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001514 mci_writel(host, DATA(host->data_offset),
1515 host->part_buf32);
James Hogan34b664a2011-06-24 13:57:56 +01001516 host->part_buf_count = 0;
1517 }
1518 }
1519#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1520 if (unlikely((unsigned long)buf & 0x3)) {
1521 while (cnt >= 4) {
1522 u32 aligned_buf[32];
1523 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1524 int items = len >> 2;
1525 int i;
1526 /* memcpy from input buffer into aligned buffer */
1527 memcpy(aligned_buf, buf, len);
1528 buf += len;
1529 cnt -= len;
1530 /* push data from aligned buffer into fifo */
1531 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001532 mci_writel(host, DATA(host->data_offset),
1533 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001534 }
1535 } else
1536#endif
1537 {
1538 u32 *pdata = buf;
1539 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001540 mci_writel(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001541 buf = pdata;
1542 }
1543 /* put anything remaining in the part_buf */
1544 if (cnt) {
1545 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001546 /* Push data if we have reached the expected data length */
1547 if ((data->bytes_xfered + init_cnt) ==
1548 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001549 mci_writel(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001550 host->part_buf32);
Will Newtonf95f3852011-01-02 01:11:59 -05001551 }
1552}
1553
1554static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1555{
James Hogan34b664a2011-06-24 13:57:56 +01001556#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1557 if (unlikely((unsigned long)buf & 0x3)) {
1558 while (cnt >= 4) {
1559 /* pull data from fifo into aligned buffer */
1560 u32 aligned_buf[32];
1561 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1562 int items = len >> 2;
1563 int i;
1564 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001565 aligned_buf[i] = mci_readl(host,
1566 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001567 /* memcpy from aligned buffer into output buffer */
1568 memcpy(buf, aligned_buf, len);
1569 buf += len;
1570 cnt -= len;
1571 }
1572 } else
1573#endif
1574 {
1575 u32 *pdata = buf;
1576 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001577 *pdata++ = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001578 buf = pdata;
1579 }
1580 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001581 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001582 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001583 }
1584}
1585
1586static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1587{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001588 struct mmc_data *data = host->data;
1589 int init_cnt = cnt;
1590
James Hogan34b664a2011-06-24 13:57:56 +01001591 /* try and push anything in the part_buf */
1592 if (unlikely(host->part_buf_count)) {
1593 int len = dw_mci_push_part_bytes(host, buf, cnt);
1594 buf += len;
1595 cnt -= len;
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001596
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001597 if (host->part_buf_count == 8) {
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001598 mci_writeq(host, DATA(host->data_offset),
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001599 host->part_buf);
James Hogan34b664a2011-06-24 13:57:56 +01001600 host->part_buf_count = 0;
1601 }
1602 }
1603#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1604 if (unlikely((unsigned long)buf & 0x7)) {
1605 while (cnt >= 8) {
1606 u64 aligned_buf[16];
1607 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1608 int items = len >> 3;
1609 int i;
1610 /* memcpy from input buffer into aligned buffer */
1611 memcpy(aligned_buf, buf, len);
1612 buf += len;
1613 cnt -= len;
1614 /* push data from aligned buffer into fifo */
1615 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001616 mci_writeq(host, DATA(host->data_offset),
1617 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001618 }
1619 } else
1620#endif
1621 {
1622 u64 *pdata = buf;
1623 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001624 mci_writeq(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001625 buf = pdata;
1626 }
1627 /* put anything remaining in the part_buf */
1628 if (cnt) {
1629 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001630 /* Push data if we have reached the expected data length */
1631 if ((data->bytes_xfered + init_cnt) ==
1632 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001633 mci_writeq(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001634 host->part_buf);
Will Newtonf95f3852011-01-02 01:11:59 -05001635 }
1636}
1637
1638static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1639{
James Hogan34b664a2011-06-24 13:57:56 +01001640#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1641 if (unlikely((unsigned long)buf & 0x7)) {
1642 while (cnt >= 8) {
1643 /* pull data from fifo into aligned buffer */
1644 u64 aligned_buf[16];
1645 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1646 int items = len >> 3;
1647 int i;
1648 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001649 aligned_buf[i] = mci_readq(host,
1650 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001651 /* memcpy from aligned buffer into output buffer */
1652 memcpy(buf, aligned_buf, len);
1653 buf += len;
1654 cnt -= len;
1655 }
1656 } else
1657#endif
1658 {
1659 u64 *pdata = buf;
1660 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001661 *pdata++ = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001662 buf = pdata;
Will Newtonf95f3852011-01-02 01:11:59 -05001663 }
James Hogan34b664a2011-06-24 13:57:56 +01001664 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001665 host->part_buf = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001666 dw_mci_pull_final_bytes(host, buf, cnt);
1667 }
1668}
1669
1670static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1671{
1672 int len;
1673
1674 /* get remaining partial bytes */
1675 len = dw_mci_pull_part_bytes(host, buf, cnt);
1676 if (unlikely(len == cnt))
1677 return;
1678 buf += len;
1679 cnt -= len;
1680
1681 /* get the rest of the data */
1682 host->pull_data(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001683}
1684
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001685static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
Will Newtonf95f3852011-01-02 01:11:59 -05001686{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001687 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1688 void *buf;
1689 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001690 struct mmc_data *data = host->data;
1691 int shift = host->data_shift;
1692 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001693 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001694 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001695
1696 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001697 if (!sg_miter_next(sg_miter))
1698 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001699
Imre Deak4225fc82013-02-27 17:02:57 -08001700 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001701 buf = sg_miter->addr;
1702 remain = sg_miter->length;
1703 offset = 0;
1704
1705 do {
1706 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1707 << shift) + host->part_buf_count;
1708 len = min(remain, fcnt);
1709 if (!len)
1710 break;
1711 dw_mci_pull_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001712 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001713 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001714 remain -= len;
1715 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001716
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001717 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001718 status = mci_readl(host, MINTSTS);
1719 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001720 /* if the RXDR is ready read again */
1721 } while ((status & SDMMC_INT_RXDR) ||
1722 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001723
1724 if (!remain) {
1725 if (!sg_miter_next(sg_miter))
1726 goto done;
1727 sg_miter->consumed = 0;
1728 }
1729 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001730 return;
1731
1732done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001733 sg_miter_stop(sg_miter);
1734 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001735 smp_wmb();
1736 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1737}
1738
1739static void dw_mci_write_data_pio(struct dw_mci *host)
1740{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001741 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1742 void *buf;
1743 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001744 struct mmc_data *data = host->data;
1745 int shift = host->data_shift;
1746 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001747 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001748 unsigned int fifo_depth = host->fifo_depth;
1749 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001750
1751 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001752 if (!sg_miter_next(sg_miter))
1753 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001754
Imre Deak4225fc82013-02-27 17:02:57 -08001755 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001756 buf = sg_miter->addr;
1757 remain = sg_miter->length;
1758 offset = 0;
1759
1760 do {
1761 fcnt = ((fifo_depth -
1762 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1763 << shift) - host->part_buf_count;
1764 len = min(remain, fcnt);
1765 if (!len)
1766 break;
1767 host->push_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001768 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001769 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001770 remain -= len;
1771 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001772
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001773 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001774 status = mci_readl(host, MINTSTS);
1775 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
Will Newtonf95f3852011-01-02 01:11:59 -05001776 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001777
1778 if (!remain) {
1779 if (!sg_miter_next(sg_miter))
1780 goto done;
1781 sg_miter->consumed = 0;
1782 }
1783 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001784 return;
1785
1786done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001787 sg_miter_stop(sg_miter);
1788 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001789 smp_wmb();
1790 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1791}
1792
1793static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1794{
1795 if (!host->cmd_status)
1796 host->cmd_status = status;
1797
1798 smp_wmb();
1799
1800 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1801 tasklet_schedule(&host->tasklet);
1802}
1803
1804static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1805{
1806 struct dw_mci *host = dev_id;
Seungwon Jeon182c9082012-08-01 09:30:30 +09001807 u32 pending;
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301808 int i;
Will Newtonf95f3852011-01-02 01:11:59 -05001809
Markos Chandras1fb5f682013-03-12 10:53:11 +00001810 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1811
Doug Anderson476d79f2013-07-09 13:04:40 -07001812 /*
1813 * DTO fix - version 2.10a and below, and only if internal DMA
1814 * is configured.
1815 */
1816 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1817 if (!pending &&
1818 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1819 pending |= SDMMC_INT_DATA_OVER;
1820 }
1821
Markos Chandras1fb5f682013-03-12 10:53:11 +00001822 if (pending) {
Will Newtonf95f3852011-01-02 01:11:59 -05001823 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1824 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001825 host->cmd_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001826 smp_wmb();
1827 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -05001828 }
1829
1830 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1831 /* if there is an error report DATA_ERROR */
1832 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001833 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001834 smp_wmb();
1835 set_bit(EVENT_DATA_ERROR, &host->pending_events);
Seungwon Jeon9b2026a2012-08-01 09:30:40 +09001836 tasklet_schedule(&host->tasklet);
Will Newtonf95f3852011-01-02 01:11:59 -05001837 }
1838
1839 if (pending & SDMMC_INT_DATA_OVER) {
1840 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1841 if (!host->data_status)
Seungwon Jeon182c9082012-08-01 09:30:30 +09001842 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001843 smp_wmb();
1844 if (host->dir_status == DW_MCI_RECV_STATUS) {
1845 if (host->sg != NULL)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001846 dw_mci_read_data_pio(host, true);
Will Newtonf95f3852011-01-02 01:11:59 -05001847 }
1848 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1849 tasklet_schedule(&host->tasklet);
1850 }
1851
1852 if (pending & SDMMC_INT_RXDR) {
1853 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001854 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001855 dw_mci_read_data_pio(host, false);
Will Newtonf95f3852011-01-02 01:11:59 -05001856 }
1857
1858 if (pending & SDMMC_INT_TXDR) {
1859 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001860 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
Will Newtonf95f3852011-01-02 01:11:59 -05001861 dw_mci_write_data_pio(host);
1862 }
1863
1864 if (pending & SDMMC_INT_CMD_DONE) {
1865 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001866 dw_mci_cmd_interrupt(host, pending);
Will Newtonf95f3852011-01-02 01:11:59 -05001867 }
1868
1869 if (pending & SDMMC_INT_CD) {
1870 mci_writel(host, RINTSTS, SDMMC_INT_CD);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07001871 queue_work(host->card_workqueue, &host->card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001872 }
1873
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301874 /* Handle SDIO Interrupts */
1875 for (i = 0; i < host->num_slots; i++) {
1876 struct dw_mci_slot *slot = host->slot[i];
1877 if (pending & SDMMC_INT_SDIO(i)) {
1878 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1879 mmc_signal_sdio_irq(slot->mmc);
1880 }
1881 }
1882
Markos Chandras1fb5f682013-03-12 10:53:11 +00001883 }
Will Newtonf95f3852011-01-02 01:11:59 -05001884
1885#ifdef CONFIG_MMC_DW_IDMAC
1886 /* Handle DMA interrupts */
1887 pending = mci_readl(host, IDSTS);
1888 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1889 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1890 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
Will Newtonf95f3852011-01-02 01:11:59 -05001891 host->dma_ops->complete(host);
1892 }
1893#endif
1894
1895 return IRQ_HANDLED;
1896}
1897
James Hogan1791b13e2011-06-24 13:55:55 +01001898static void dw_mci_work_routine_card(struct work_struct *work)
Will Newtonf95f3852011-01-02 01:11:59 -05001899{
James Hogan1791b13e2011-06-24 13:55:55 +01001900 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001901 int i;
1902
1903 for (i = 0; i < host->num_slots; i++) {
1904 struct dw_mci_slot *slot = host->slot[i];
1905 struct mmc_host *mmc = slot->mmc;
1906 struct mmc_request *mrq;
1907 int present;
Will Newtonf95f3852011-01-02 01:11:59 -05001908
1909 present = dw_mci_get_cd(mmc);
1910 while (present != slot->last_detect_state) {
Will Newtonf95f3852011-01-02 01:11:59 -05001911 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1912 present ? "inserted" : "removed");
1913
James Hogan1791b13e2011-06-24 13:55:55 +01001914 spin_lock_bh(&host->lock);
1915
Will Newtonf95f3852011-01-02 01:11:59 -05001916 /* Card change detected */
1917 slot->last_detect_state = present;
1918
Will Newtonf95f3852011-01-02 01:11:59 -05001919 /* Clean up queue if present */
1920 mrq = slot->mrq;
1921 if (mrq) {
1922 if (mrq == host->mrq) {
1923 host->data = NULL;
1924 host->cmd = NULL;
1925
1926 switch (host->state) {
1927 case STATE_IDLE:
1928 break;
1929 case STATE_SENDING_CMD:
1930 mrq->cmd->error = -ENOMEDIUM;
1931 if (!mrq->data)
1932 break;
1933 /* fall through */
1934 case STATE_SENDING_DATA:
1935 mrq->data->error = -ENOMEDIUM;
1936 dw_mci_stop_dma(host);
1937 break;
1938 case STATE_DATA_BUSY:
1939 case STATE_DATA_ERROR:
1940 if (mrq->data->error == -EINPROGRESS)
1941 mrq->data->error = -ENOMEDIUM;
Will Newtonf95f3852011-01-02 01:11:59 -05001942 /* fall through */
1943 case STATE_SENDING_STOP:
Seungwon Jeon90c21432013-08-31 00:14:05 +09001944 if (mrq->stop)
1945 mrq->stop->error = -ENOMEDIUM;
Will Newtonf95f3852011-01-02 01:11:59 -05001946 break;
1947 }
1948
1949 dw_mci_request_end(host, mrq);
1950 } else {
1951 list_del(&slot->queue_node);
1952 mrq->cmd->error = -ENOMEDIUM;
1953 if (mrq->data)
1954 mrq->data->error = -ENOMEDIUM;
1955 if (mrq->stop)
1956 mrq->stop->error = -ENOMEDIUM;
1957
1958 spin_unlock(&host->lock);
1959 mmc_request_done(slot->mmc, mrq);
1960 spin_lock(&host->lock);
1961 }
1962 }
1963
1964 /* Power down slot */
Sonny Rao3a33a942014-08-04 18:19:50 -07001965 if (present == 0)
1966 dw_mci_reset(host);
Will Newtonf95f3852011-01-02 01:11:59 -05001967
James Hogan1791b13e2011-06-24 13:55:55 +01001968 spin_unlock_bh(&host->lock);
1969
Will Newtonf95f3852011-01-02 01:11:59 -05001970 present = dw_mci_get_cd(mmc);
1971 }
1972
1973 mmc_detect_change(slot->mmc,
1974 msecs_to_jiffies(host->pdata->detect_delay_ms));
1975 }
1976}
1977
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001978#ifdef CONFIG_OF
1979/* given a slot id, find out the device node representing that slot */
1980static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1981{
1982 struct device_node *np;
1983 const __be32 *addr;
1984 int len;
1985
1986 if (!dev || !dev->of_node)
1987 return NULL;
1988
1989 for_each_child_of_node(dev->of_node, np) {
1990 addr = of_get_property(np, "reg", &len);
1991 if (!addr || (len < sizeof(int)))
1992 continue;
1993 if (be32_to_cpup(addr) == slot)
1994 return np;
1995 }
1996 return NULL;
1997}
1998
Doug Andersona70aaa62013-01-11 17:03:50 +00001999static struct dw_mci_of_slot_quirks {
2000 char *quirk;
2001 int id;
2002} of_slot_quirks[] = {
2003 {
2004 .quirk = "disable-wp",
2005 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2006 },
2007};
2008
2009static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2010{
2011 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2012 int quirks = 0;
2013 int idx;
2014
2015 /* get quirks */
2016 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2017 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2018 quirks |= of_slot_quirks[idx].id;
2019
2020 return quirks;
2021}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002022#else /* CONFIG_OF */
Doug Andersona70aaa62013-01-11 17:03:50 +00002023static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2024{
2025 return 0;
2026}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002027static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2028{
2029 return NULL;
2030}
2031#endif /* CONFIG_OF */
2032
Jaehoon Chung36c179a2012-08-23 20:31:48 +09002033static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
Will Newtonf95f3852011-01-02 01:11:59 -05002034{
2035 struct mmc_host *mmc;
2036 struct dw_mci_slot *slot;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002037 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002038 int ctrl_id, ret;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002039 u32 freq[2];
Will Newtonf95f3852011-01-02 01:11:59 -05002040
Thomas Abraham4a909202012-09-17 18:16:35 +00002041 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
Will Newtonf95f3852011-01-02 01:11:59 -05002042 if (!mmc)
2043 return -ENOMEM;
2044
2045 slot = mmc_priv(mmc);
2046 slot->id = id;
2047 slot->mmc = mmc;
2048 slot->host = host;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002049 host->slot[id] = slot;
Will Newtonf95f3852011-01-02 01:11:59 -05002050
Doug Andersona70aaa62013-01-11 17:03:50 +00002051 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2052
Will Newtonf95f3852011-01-02 01:11:59 -05002053 mmc->ops = &dw_mci_ops;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002054 if (of_property_read_u32_array(host->dev->of_node,
2055 "clock-freq-min-max", freq, 2)) {
2056 mmc->f_min = DW_MCI_FREQ_MIN;
2057 mmc->f_max = DW_MCI_FREQ_MAX;
2058 } else {
2059 mmc->f_min = freq[0];
2060 mmc->f_max = freq[1];
2061 }
Will Newtonf95f3852011-01-02 01:11:59 -05002062
Jaehoon Chung907abd52014-03-03 11:36:43 +09002063 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
Will Newtonf95f3852011-01-02 01:11:59 -05002064
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002065 if (host->pdata->caps)
2066 mmc->caps = host->pdata->caps;
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002067
Abhilash Kesavanab269122012-11-19 10:26:21 +05302068 if (host->pdata->pm_caps)
2069 mmc->pm_caps = host->pdata->pm_caps;
2070
Thomas Abraham800d78b2012-09-17 18:16:42 +00002071 if (host->dev->of_node) {
2072 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2073 if (ctrl_id < 0)
2074 ctrl_id = 0;
2075 } else {
2076 ctrl_id = to_platform_device(host->dev)->id;
2077 }
James Hogancb27a842012-10-16 09:43:08 +01002078 if (drv_data && drv_data->caps)
2079 mmc->caps |= drv_data->caps[ctrl_id];
Thomas Abraham800d78b2012-09-17 18:16:42 +00002080
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002081 if (host->pdata->caps2)
2082 mmc->caps2 = host->pdata->caps2;
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002083
Jaehoon Chungd8a4fb02014-03-03 11:36:41 +09002084 mmc_of_parse(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -05002085
Will Newtonf95f3852011-01-02 01:11:59 -05002086 if (host->pdata->blk_settings) {
2087 mmc->max_segs = host->pdata->blk_settings->max_segs;
2088 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2089 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2090 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2091 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2092 } else {
2093 /* Useful defaults if platform data is unset. */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002094#ifdef CONFIG_MMC_DW_IDMAC
2095 mmc->max_segs = host->ring_size;
2096 mmc->max_blk_size = 65536;
2097 mmc->max_blk_count = host->ring_size;
2098 mmc->max_seg_size = 0x1000;
2099 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2100#else
Will Newtonf95f3852011-01-02 01:11:59 -05002101 mmc->max_segs = 64;
2102 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2103 mmc->max_blk_count = 512;
2104 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2105 mmc->max_seg_size = mmc->max_req_size;
Will Newtonf95f3852011-01-02 01:11:59 -05002106#endif /* CONFIG_MMC_DW_IDMAC */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002107 }
Will Newtonf95f3852011-01-02 01:11:59 -05002108
Jaehoon Chungae0eb342014-03-03 11:36:48 +09002109 if (dw_mci_get_cd(mmc))
2110 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2111 else
2112 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2113
Jaehoon Chung0cea5292013-02-15 23:45:45 +09002114 ret = mmc_add_host(mmc);
2115 if (ret)
2116 goto err_setup_bus;
Will Newtonf95f3852011-01-02 01:11:59 -05002117
2118#if defined(CONFIG_DEBUG_FS)
2119 dw_mci_init_debugfs(slot);
2120#endif
2121
2122 /* Card initially undetected */
2123 slot->last_detect_state = 0;
2124
Will Newtonf95f3852011-01-02 01:11:59 -05002125 return 0;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002126
2127err_setup_bus:
2128 mmc_free_host(mmc);
2129 return -EINVAL;
Will Newtonf95f3852011-01-02 01:11:59 -05002130}
2131
2132static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2133{
Will Newtonf95f3852011-01-02 01:11:59 -05002134 /* Debugfs stuff is cleaned up by mmc core */
2135 mmc_remove_host(slot->mmc);
2136 slot->host->slot[id] = NULL;
2137 mmc_free_host(slot->mmc);
2138}
2139
2140static void dw_mci_init_dma(struct dw_mci *host)
2141{
2142 /* Alloc memory for sg translation */
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002143 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
Will Newtonf95f3852011-01-02 01:11:59 -05002144 &host->sg_dma, GFP_KERNEL);
2145 if (!host->sg_cpu) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002146 dev_err(host->dev, "%s: could not alloc DMA memory\n",
Will Newtonf95f3852011-01-02 01:11:59 -05002147 __func__);
2148 goto no_dma;
2149 }
2150
2151 /* Determine which DMA interface to use */
2152#ifdef CONFIG_MMC_DW_IDMAC
2153 host->dma_ops = &dw_mci_idmac_ops;
Seungwon Jeon00956ea2012-09-28 19:13:11 +09002154 dev_info(host->dev, "Using internal DMA controller.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002155#endif
2156
2157 if (!host->dma_ops)
2158 goto no_dma;
2159
Jaehoon Chunge1631f92012-04-18 15:42:31 +09002160 if (host->dma_ops->init && host->dma_ops->start &&
2161 host->dma_ops->stop && host->dma_ops->cleanup) {
Will Newtonf95f3852011-01-02 01:11:59 -05002162 if (host->dma_ops->init(host)) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002163 dev_err(host->dev, "%s: Unable to initialize "
Will Newtonf95f3852011-01-02 01:11:59 -05002164 "DMA Controller.\n", __func__);
2165 goto no_dma;
2166 }
2167 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00002168 dev_err(host->dev, "DMA initialization not found.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002169 goto no_dma;
2170 }
2171
2172 host->use_dma = 1;
2173 return;
2174
2175no_dma:
Thomas Abraham4a909202012-09-17 18:16:35 +00002176 dev_info(host->dev, "Using PIO mode.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002177 host->use_dma = 0;
2178 return;
2179}
2180
Seungwon Jeon31bff452013-08-31 00:14:23 +09002181static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
Will Newtonf95f3852011-01-02 01:11:59 -05002182{
2183 unsigned long timeout = jiffies + msecs_to_jiffies(500);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002184 u32 ctrl;
Will Newtonf95f3852011-01-02 01:11:59 -05002185
Seungwon Jeon31bff452013-08-31 00:14:23 +09002186 ctrl = mci_readl(host, CTRL);
2187 ctrl |= reset;
2188 mci_writel(host, CTRL, ctrl);
Will Newtonf95f3852011-01-02 01:11:59 -05002189
2190 /* wait till resets clear */
2191 do {
2192 ctrl = mci_readl(host, CTRL);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002193 if (!(ctrl & reset))
Will Newtonf95f3852011-01-02 01:11:59 -05002194 return true;
2195 } while (time_before(jiffies, timeout));
2196
Seungwon Jeon31bff452013-08-31 00:14:23 +09002197 dev_err(host->dev,
2198 "Timeout resetting block (ctrl reset %#x)\n",
2199 ctrl & reset);
Will Newtonf95f3852011-01-02 01:11:59 -05002200
2201 return false;
2202}
2203
Sonny Rao3a33a942014-08-04 18:19:50 -07002204static bool dw_mci_reset(struct dw_mci *host)
Seungwon Jeon31bff452013-08-31 00:14:23 +09002205{
Sonny Rao3a33a942014-08-04 18:19:50 -07002206 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2207 bool ret = false;
2208
Seungwon Jeon31bff452013-08-31 00:14:23 +09002209 /*
2210 * Reseting generates a block interrupt, hence setting
2211 * the scatter-gather pointer to NULL.
2212 */
2213 if (host->sg) {
2214 sg_miter_stop(&host->sg_miter);
2215 host->sg = NULL;
2216 }
2217
Sonny Rao3a33a942014-08-04 18:19:50 -07002218 if (host->use_dma)
2219 flags |= SDMMC_CTRL_DMA_RESET;
Seungwon Jeon31bff452013-08-31 00:14:23 +09002220
Sonny Rao3a33a942014-08-04 18:19:50 -07002221 if (dw_mci_ctrl_reset(host, flags)) {
2222 /*
2223 * In all cases we clear the RAWINTS register to clear any
2224 * interrupts.
2225 */
2226 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2227
2228 /* if using dma we wait for dma_req to clear */
2229 if (host->use_dma) {
2230 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2231 u32 status;
2232 do {
2233 status = mci_readl(host, STATUS);
2234 if (!(status & SDMMC_STATUS_DMA_REQ))
2235 break;
2236 cpu_relax();
2237 } while (time_before(jiffies, timeout));
2238
2239 if (status & SDMMC_STATUS_DMA_REQ) {
2240 dev_err(host->dev,
2241 "%s: Timeout waiting for dma_req to "
2242 "clear during reset\n", __func__);
2243 goto ciu_out;
2244 }
2245
2246 /* when using DMA next we reset the fifo again */
2247 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2248 goto ciu_out;
2249 }
2250 } else {
2251 /* if the controller reset bit did clear, then set clock regs */
2252 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2253 dev_err(host->dev, "%s: fifo/dma reset bits didn't "
2254 "clear but ciu was reset, doing clock update\n",
2255 __func__);
2256 goto ciu_out;
2257 }
2258 }
2259
2260#if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
2261 /* It is also recommended that we reset and reprogram idmac */
2262 dw_mci_idmac_reset(host);
2263#endif
2264
2265 ret = true;
2266
2267ciu_out:
2268 /* After a CTRL reset we need to have CIU set clock registers */
2269 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2270
2271 return ret;
Seungwon Jeon31bff452013-08-31 00:14:23 +09002272}
2273
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002274#ifdef CONFIG_OF
2275static struct dw_mci_of_quirks {
2276 char *quirk;
2277 int id;
2278} of_quirks[] = {
2279 {
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002280 .quirk = "broken-cd",
2281 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2282 },
2283};
2284
2285static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2286{
2287 struct dw_mci_board *pdata;
2288 struct device *dev = host->dev;
2289 struct device_node *np = dev->of_node;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002290 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002291 int idx, ret;
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002292 u32 clock_frequency;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002293
2294 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2295 if (!pdata) {
2296 dev_err(dev, "could not allocate memory for pdata\n");
2297 return ERR_PTR(-ENOMEM);
2298 }
2299
2300 /* find out number of slots supported */
2301 if (of_property_read_u32(dev->of_node, "num-slots",
2302 &pdata->num_slots)) {
2303 dev_info(dev, "num-slots property not found, "
2304 "assuming 1 slot is available\n");
2305 pdata->num_slots = 1;
2306 }
2307
2308 /* get quirks */
2309 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2310 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2311 pdata->quirks |= of_quirks[idx].id;
2312
2313 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2314 dev_info(dev, "fifo-depth property not found, using "
2315 "value of FIFOTH register as default\n");
2316
2317 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2318
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002319 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2320 pdata->bus_hz = clock_frequency;
2321
James Hogancb27a842012-10-16 09:43:08 +01002322 if (drv_data && drv_data->parse_dt) {
2323 ret = drv_data->parse_dt(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002324 if (ret)
2325 return ERR_PTR(ret);
2326 }
2327
Seungwon Jeon10b49842013-08-31 00:13:22 +09002328 if (of_find_property(np, "supports-highspeed", NULL))
2329 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2330
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002331 return pdata;
2332}
2333
2334#else /* CONFIG_OF */
2335static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2336{
2337 return ERR_PTR(-EINVAL);
2338}
2339#endif /* CONFIG_OF */
2340
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302341int dw_mci_probe(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002342{
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002343 const struct dw_mci_drv_data *drv_data = host->drv_data;
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302344 int width, i, ret = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002345 u32 fifo_size;
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002346 int init_slots = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002347
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002348 if (!host->pdata) {
2349 host->pdata = dw_mci_parse_dt(host);
2350 if (IS_ERR(host->pdata)) {
2351 dev_err(host->dev, "platform data not available\n");
2352 return -EINVAL;
2353 }
Will Newtonf95f3852011-01-02 01:11:59 -05002354 }
2355
Jaehoon Chung907abd52014-03-03 11:36:43 +09002356 if (host->pdata->num_slots > 1) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002357 dev_err(host->dev,
Jaehoon Chung907abd52014-03-03 11:36:43 +09002358 "Platform data must supply num_slots.\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302359 return -ENODEV;
Will Newtonf95f3852011-01-02 01:11:59 -05002360 }
2361
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002362 host->biu_clk = devm_clk_get(host->dev, "biu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002363 if (IS_ERR(host->biu_clk)) {
2364 dev_dbg(host->dev, "biu clock not available\n");
2365 } else {
2366 ret = clk_prepare_enable(host->biu_clk);
2367 if (ret) {
2368 dev_err(host->dev, "failed to enable biu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002369 return ret;
2370 }
Will Newtonf95f3852011-01-02 01:11:59 -05002371 }
2372
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002373 host->ciu_clk = devm_clk_get(host->dev, "ciu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002374 if (IS_ERR(host->ciu_clk)) {
2375 dev_dbg(host->dev, "ciu clock not available\n");
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002376 host->bus_hz = host->pdata->bus_hz;
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002377 } else {
2378 ret = clk_prepare_enable(host->ciu_clk);
2379 if (ret) {
2380 dev_err(host->dev, "failed to enable ciu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002381 goto err_clk_biu;
2382 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002383
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002384 if (host->pdata->bus_hz) {
2385 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2386 if (ret)
2387 dev_warn(host->dev,
Jaehoon Chung612de4c2014-03-03 11:36:42 +09002388 "Unable to set bus rate to %uHz\n",
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002389 host->pdata->bus_hz);
2390 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002391 host->bus_hz = clk_get_rate(host->ciu_clk);
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002392 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002393
Jaehoon Chung612de4c2014-03-03 11:36:42 +09002394 if (!host->bus_hz) {
2395 dev_err(host->dev,
2396 "Platform data must supply bus speed\n");
2397 ret = -ENODEV;
2398 goto err_clk_ciu;
2399 }
2400
Yuvaraj Kumar C D002f0d52013-08-31 00:12:19 +09002401 if (drv_data && drv_data->init) {
2402 ret = drv_data->init(host);
2403 if (ret) {
2404 dev_err(host->dev,
2405 "implementation specific init failed\n");
2406 goto err_clk_ciu;
2407 }
2408 }
2409
James Hogancb27a842012-10-16 09:43:08 +01002410 if (drv_data && drv_data->setup_clock) {
2411 ret = drv_data->setup_clock(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002412 if (ret) {
2413 dev_err(host->dev,
2414 "implementation specific clock setup failed\n");
2415 goto err_clk_ciu;
2416 }
2417 }
2418
Mark Browna55d6ff2013-07-29 21:55:27 +01002419 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
Doug Anderson870556a2013-06-07 10:28:29 -07002420 if (IS_ERR(host->vmmc)) {
2421 ret = PTR_ERR(host->vmmc);
2422 if (ret == -EPROBE_DEFER)
2423 goto err_clk_ciu;
2424
2425 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2426 host->vmmc = NULL;
2427 } else {
2428 ret = regulator_enable(host->vmmc);
2429 if (ret) {
2430 if (ret != -EPROBE_DEFER)
2431 dev_err(host->dev,
2432 "regulator_enable fail: %d\n", ret);
2433 goto err_clk_ciu;
2434 }
2435 }
2436
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302437 host->quirks = host->pdata->quirks;
Will Newtonf95f3852011-01-02 01:11:59 -05002438
2439 spin_lock_init(&host->lock);
2440 INIT_LIST_HEAD(&host->queue);
2441
Will Newtonf95f3852011-01-02 01:11:59 -05002442 /*
2443 * Get the host data width - this assumes that HCON has been set with
2444 * the correct values.
2445 */
2446 i = (mci_readl(host, HCON) >> 7) & 0x7;
2447 if (!i) {
2448 host->push_data = dw_mci_push_data16;
2449 host->pull_data = dw_mci_pull_data16;
2450 width = 16;
2451 host->data_shift = 1;
2452 } else if (i == 2) {
2453 host->push_data = dw_mci_push_data64;
2454 host->pull_data = dw_mci_pull_data64;
2455 width = 64;
2456 host->data_shift = 3;
2457 } else {
2458 /* Check for a reserved value, and warn if it is */
2459 WARN((i != 1),
2460 "HCON reports a reserved host data width!\n"
2461 "Defaulting to 32-bit access.\n");
2462 host->push_data = dw_mci_push_data32;
2463 host->pull_data = dw_mci_pull_data32;
2464 width = 32;
2465 host->data_shift = 2;
2466 }
2467
2468 /* Reset all blocks */
Sonny Rao3a33a942014-08-04 18:19:50 -07002469 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
Seungwon Jeon141a7122012-05-22 13:01:03 +09002470 return -ENODEV;
2471
2472 host->dma_ops = host->pdata->dma_ops;
2473 dw_mci_init_dma(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002474
2475 /* Clear the interrupts for the host controller */
2476 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2477 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2478
2479 /* Put in max timeout */
2480 mci_writel(host, TMOUT, 0xFFFFFFFF);
2481
2482 /*
2483 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2484 * Tx Mark = fifo_size / 2 DMA Size = 8
2485 */
James Hoganb86d8252011-06-24 13:57:18 +01002486 if (!host->pdata->fifo_depth) {
2487 /*
2488 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2489 * have been overwritten by the bootloader, just like we're
2490 * about to do, so if you know the value for your hardware, you
2491 * should put it in the platform data.
2492 */
2493 fifo_size = mci_readl(host, FIFOTH);
Jaehoon Chung8234e862012-01-11 09:28:21 +00002494 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
James Hoganb86d8252011-06-24 13:57:18 +01002495 } else {
2496 fifo_size = host->pdata->fifo_depth;
2497 }
2498 host->fifo_depth = fifo_size;
Seungwon Jeon52426892013-08-31 00:13:42 +09002499 host->fifoth_val =
2500 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002501 mci_writel(host, FIFOTH, host->fifoth_val);
Will Newtonf95f3852011-01-02 01:11:59 -05002502
2503 /* disable clock to CIU */
2504 mci_writel(host, CLKENA, 0);
2505 mci_writel(host, CLKSRC, 0);
2506
James Hogan63008762013-03-12 10:43:54 +00002507 /*
2508 * In 2.40a spec, Data offset is changed.
2509 * Need to check the version-id and set data-offset for DATA register.
2510 */
2511 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2512 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2513
2514 if (host->verid < DW_MMC_240A)
2515 host->data_offset = DATA_OFFSET;
2516 else
2517 host->data_offset = DATA_240A_OFFSET;
2518
Will Newtonf95f3852011-01-02 01:11:59 -05002519 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002520 host->card_workqueue = alloc_workqueue("dw-mci-card",
ZhangZhen59ff3eb2014-03-27 09:41:47 +08002521 WQ_MEM_RECLAIM, 1);
Wei Yongjunef7aef92013-04-19 09:25:45 +08002522 if (!host->card_workqueue) {
2523 ret = -ENOMEM;
James Hogan1791b13e2011-06-24 13:55:55 +01002524 goto err_dmaunmap;
Wei Yongjunef7aef92013-04-19 09:25:45 +08002525 }
James Hogan1791b13e2011-06-24 13:55:55 +01002526 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002527 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2528 host->irq_flags, "dw-mci", host);
Will Newtonf95f3852011-01-02 01:11:59 -05002529 if (ret)
James Hogan1791b13e2011-06-24 13:55:55 +01002530 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002531
Will Newtonf95f3852011-01-02 01:11:59 -05002532 if (host->pdata->num_slots)
2533 host->num_slots = host->pdata->num_slots;
2534 else
2535 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2536
Yuvaraj CD2da1d7f2012-10-08 14:29:51 +05302537 /*
2538 * Enable interrupts for command done, data over, data empty, card det,
2539 * receive ready and error such as transmit, receive timeout, crc error
2540 */
2541 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2542 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2543 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2544 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2545 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2546
2547 dev_info(host->dev, "DW MMC controller at irq %d, "
2548 "%d bit host data width, "
2549 "%u deep fifo\n",
2550 host->irq, width, fifo_size);
2551
Will Newtonf95f3852011-01-02 01:11:59 -05002552 /* We need at least one slot to succeed */
2553 for (i = 0; i < host->num_slots; i++) {
2554 ret = dw_mci_init_slot(host, i);
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002555 if (ret)
2556 dev_dbg(host->dev, "slot %d init failed\n", i);
2557 else
2558 init_slots++;
2559 }
2560
2561 if (init_slots) {
2562 dev_info(host->dev, "%d slots initialized\n", init_slots);
2563 } else {
2564 dev_dbg(host->dev, "attempted to initialize %d slots, "
2565 "but failed on all\n", host->num_slots);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002566 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002567 }
2568
Will Newtonf95f3852011-01-02 01:11:59 -05002569 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
Thomas Abraham4a909202012-09-17 18:16:35 +00002570 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002571
2572 return 0;
2573
James Hogan1791b13e2011-06-24 13:55:55 +01002574err_workqueue:
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002575 destroy_workqueue(host->card_workqueue);
James Hogan1791b13e2011-06-24 13:55:55 +01002576
Will Newtonf95f3852011-01-02 01:11:59 -05002577err_dmaunmap:
2578 if (host->use_dma && host->dma_ops->exit)
2579 host->dma_ops->exit(host);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002580 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002581 regulator_disable(host->vmmc);
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002582
2583err_clk_ciu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002584 if (!IS_ERR(host->ciu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002585 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002586
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002587err_clk_biu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002588 if (!IS_ERR(host->biu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002589 clk_disable_unprepare(host->biu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002590
Will Newtonf95f3852011-01-02 01:11:59 -05002591 return ret;
2592}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302593EXPORT_SYMBOL(dw_mci_probe);
Will Newtonf95f3852011-01-02 01:11:59 -05002594
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302595void dw_mci_remove(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002596{
Will Newtonf95f3852011-01-02 01:11:59 -05002597 int i;
2598
2599 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2600 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2601
Will Newtonf95f3852011-01-02 01:11:59 -05002602 for (i = 0; i < host->num_slots; i++) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002603 dev_dbg(host->dev, "remove slot %d\n", i);
Will Newtonf95f3852011-01-02 01:11:59 -05002604 if (host->slot[i])
2605 dw_mci_cleanup_slot(host->slot[i], i);
2606 }
2607
2608 /* disable clock to CIU */
2609 mci_writel(host, CLKENA, 0);
2610 mci_writel(host, CLKSRC, 0);
2611
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002612 destroy_workqueue(host->card_workqueue);
Will Newtonf95f3852011-01-02 01:11:59 -05002613
2614 if (host->use_dma && host->dma_ops->exit)
2615 host->dma_ops->exit(host);
2616
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002617 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002618 regulator_disable(host->vmmc);
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002619
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002620 if (!IS_ERR(host->ciu_clk))
2621 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002622
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002623 if (!IS_ERR(host->biu_clk))
2624 clk_disable_unprepare(host->biu_clk);
Will Newtonf95f3852011-01-02 01:11:59 -05002625}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302626EXPORT_SYMBOL(dw_mci_remove);
2627
2628
Will Newtonf95f3852011-01-02 01:11:59 -05002629
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002630#ifdef CONFIG_PM_SLEEP
Will Newtonf95f3852011-01-02 01:11:59 -05002631/*
2632 * TODO: we should probably disable the clock to the card in the suspend path.
2633 */
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302634int dw_mci_suspend(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002635{
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002636 if (host->vmmc)
2637 regulator_disable(host->vmmc);
2638
Will Newtonf95f3852011-01-02 01:11:59 -05002639 return 0;
2640}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302641EXPORT_SYMBOL(dw_mci_suspend);
Will Newtonf95f3852011-01-02 01:11:59 -05002642
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302643int dw_mci_resume(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002644{
2645 int i, ret;
Will Newtonf95f3852011-01-02 01:11:59 -05002646
Sachin Kamatf2f942c2013-04-04 11:25:10 +05302647 if (host->vmmc) {
2648 ret = regulator_enable(host->vmmc);
2649 if (ret) {
2650 dev_err(host->dev,
2651 "failed to enable regulator: %d\n", ret);
2652 return ret;
2653 }
2654 }
Jaehoon Chung1d6c4e02011-05-11 15:52:39 +09002655
Sonny Rao3a33a942014-08-04 18:19:50 -07002656 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002657 ret = -ENODEV;
2658 return ret;
2659 }
2660
Jonathan Kliegman3bfe6192012-06-14 13:31:55 -04002661 if (host->use_dma && host->dma_ops->init)
Seungwon Jeon141a7122012-05-22 13:01:03 +09002662 host->dma_ops->init(host);
2663
Seungwon Jeon52426892013-08-31 00:13:42 +09002664 /*
2665 * Restore the initial value at FIFOTH register
2666 * And Invalidate the prev_blksz with zero
2667 */
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002668 mci_writel(host, FIFOTH, host->fifoth_val);
Seungwon Jeon52426892013-08-31 00:13:42 +09002669 host->prev_blksz = 0;
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002670
Doug Anderson2eb29442013-08-31 00:11:49 +09002671 /* Put in max timeout */
2672 mci_writel(host, TMOUT, 0xFFFFFFFF);
2673
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002674 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2675 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2676 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2677 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2678 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2679
Will Newtonf95f3852011-01-02 01:11:59 -05002680 for (i = 0; i < host->num_slots; i++) {
2681 struct dw_mci_slot *slot = host->slot[i];
2682 if (!slot)
2683 continue;
Abhilash Kesavanab269122012-11-19 10:26:21 +05302684 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2685 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2686 dw_mci_setup_bus(slot, true);
2687 }
Will Newtonf95f3852011-01-02 01:11:59 -05002688 }
Will Newtonf95f3852011-01-02 01:11:59 -05002689 return 0;
2690}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302691EXPORT_SYMBOL(dw_mci_resume);
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002692#endif /* CONFIG_PM_SLEEP */
2693
Will Newtonf95f3852011-01-02 01:11:59 -05002694static int __init dw_mci_init(void)
2695{
Sachin Kamat8e1c4e42013-04-04 11:25:11 +05302696 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302697 return 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002698}
2699
2700static void __exit dw_mci_exit(void)
2701{
Will Newtonf95f3852011-01-02 01:11:59 -05002702}
2703
2704module_init(dw_mci_init);
2705module_exit(dw_mci_exit);
2706
2707MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2708MODULE_AUTHOR("NXP Semiconductor VietNam");
2709MODULE_AUTHOR("Imagination Technologies Ltd");
2710MODULE_LICENSE("GPL v2");