blob: 0c56faa6730efc8ac55704a70e7fe373fd2c175d [file] [log] [blame]
Will Newtonf95f3852011-01-02 01:11:59 -05001/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
Will Newtonf95f3852011-01-02 01:11:59 -050025#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
Seungwon Jeon90c21432013-08-31 00:14:05 +090032#include <linux/mmc/sdio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050033#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
Jaehoon Chungc07946a2011-02-25 11:08:14 +090035#include <linux/regulator/consumer.h>
James Hogan1791b13e2011-06-24 13:55:55 +010036#include <linux/workqueue.h>
Thomas Abrahamc91eab42012-09-17 18:16:40 +000037#include <linux/of.h>
Doug Anderson55a6ceb2013-01-11 17:03:53 +000038#include <linux/of_gpio.h>
Zhangfei Gaobf626e52014-01-09 22:35:10 +080039#include <linux/mmc/slot-gpio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050040
41#include "dw_mmc.h"
42
43/* Common flag combinations */
Jaehoon Chung3f7eec62013-05-27 13:47:57 +090044#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
Will Newtonf95f3852011-01-02 01:11:59 -050045 SDMMC_INT_HTO | SDMMC_INT_SBE | \
46 SDMMC_INT_EBE)
47#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48 SDMMC_INT_RESP_ERR)
49#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
51#define DW_MCI_SEND_STATUS 1
52#define DW_MCI_RECV_STATUS 2
53#define DW_MCI_DMA_THRESHOLD 16
54
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +090055#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
57
Will Newtonf95f3852011-01-02 01:11:59 -050058#ifdef CONFIG_MMC_DW_IDMAC
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +090059#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 SDMMC_IDMAC_INT_TI)
63
Will Newtonf95f3852011-01-02 01:11:59 -050064struct idmac_desc {
65 u32 des0; /* Control Descriptor */
66#define IDMAC_DES0_DIC BIT(1)
67#define IDMAC_DES0_LD BIT(2)
68#define IDMAC_DES0_FD BIT(3)
69#define IDMAC_DES0_CH BIT(4)
70#define IDMAC_DES0_ER BIT(5)
71#define IDMAC_DES0_CES BIT(30)
72#define IDMAC_DES0_OWN BIT(31)
73
74 u32 des1; /* Buffer sizes */
75#define IDMAC_SET_BUFFER1_SIZE(d, s) \
Shashidhar Hiremath9b7bbe12011-07-29 08:49:50 -040076 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
Will Newtonf95f3852011-01-02 01:11:59 -050077
78 u32 des2; /* buffer 1 physical address */
79
80 u32 des3; /* buffer 2 physical address */
81};
82#endif /* CONFIG_MMC_DW_IDMAC */
83
Seungwon Jeon0976f162013-08-31 00:12:42 +090084static const u8 tuning_blk_pattern_4bit[] = {
85 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
93};
Will Newtonf95f3852011-01-02 01:11:59 -050094
Seungwon Jeon0976f162013-08-31 00:12:42 +090095static const u8 tuning_blk_pattern_8bit[] = {
96 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
Will Newtonf95f3852011-01-02 01:11:59 -0500112};
113
Seungwon Jeon31bff452013-08-31 00:14:23 +0900114static inline bool dw_mci_fifo_reset(struct dw_mci *host);
115static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
116
Will Newtonf95f3852011-01-02 01:11:59 -0500117#if defined(CONFIG_DEBUG_FS)
118static int dw_mci_req_show(struct seq_file *s, void *v)
119{
120 struct dw_mci_slot *slot = s->private;
121 struct mmc_request *mrq;
122 struct mmc_command *cmd;
123 struct mmc_command *stop;
124 struct mmc_data *data;
125
126 /* Make sure we get a consistent snapshot */
127 spin_lock_bh(&slot->host->lock);
128 mrq = slot->mrq;
129
130 if (mrq) {
131 cmd = mrq->cmd;
132 data = mrq->data;
133 stop = mrq->stop;
134
135 if (cmd)
136 seq_printf(s,
137 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 cmd->opcode, cmd->arg, cmd->flags,
139 cmd->resp[0], cmd->resp[1], cmd->resp[2],
140 cmd->resp[2], cmd->error);
141 if (data)
142 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
143 data->bytes_xfered, data->blocks,
144 data->blksz, data->flags, data->error);
145 if (stop)
146 seq_printf(s,
147 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148 stop->opcode, stop->arg, stop->flags,
149 stop->resp[0], stop->resp[1], stop->resp[2],
150 stop->resp[2], stop->error);
151 }
152
153 spin_unlock_bh(&slot->host->lock);
154
155 return 0;
156}
157
158static int dw_mci_req_open(struct inode *inode, struct file *file)
159{
160 return single_open(file, dw_mci_req_show, inode->i_private);
161}
162
163static const struct file_operations dw_mci_req_fops = {
164 .owner = THIS_MODULE,
165 .open = dw_mci_req_open,
166 .read = seq_read,
167 .llseek = seq_lseek,
168 .release = single_release,
169};
170
171static int dw_mci_regs_show(struct seq_file *s, void *v)
172{
173 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
174 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
175 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
176 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
177 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
178 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
179
180 return 0;
181}
182
183static int dw_mci_regs_open(struct inode *inode, struct file *file)
184{
185 return single_open(file, dw_mci_regs_show, inode->i_private);
186}
187
188static const struct file_operations dw_mci_regs_fops = {
189 .owner = THIS_MODULE,
190 .open = dw_mci_regs_open,
191 .read = seq_read,
192 .llseek = seq_lseek,
193 .release = single_release,
194};
195
196static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
197{
198 struct mmc_host *mmc = slot->mmc;
199 struct dw_mci *host = slot->host;
200 struct dentry *root;
201 struct dentry *node;
202
203 root = mmc->debugfs_root;
204 if (!root)
205 return;
206
207 node = debugfs_create_file("regs", S_IRUSR, root, host,
208 &dw_mci_regs_fops);
209 if (!node)
210 goto err;
211
212 node = debugfs_create_file("req", S_IRUSR, root, slot,
213 &dw_mci_req_fops);
214 if (!node)
215 goto err;
216
217 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
218 if (!node)
219 goto err;
220
221 node = debugfs_create_x32("pending_events", S_IRUSR, root,
222 (u32 *)&host->pending_events);
223 if (!node)
224 goto err;
225
226 node = debugfs_create_x32("completed_events", S_IRUSR, root,
227 (u32 *)&host->completed_events);
228 if (!node)
229 goto err;
230
231 return;
232
233err:
234 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
235}
236#endif /* defined(CONFIG_DEBUG_FS) */
237
238static void dw_mci_set_timeout(struct dw_mci *host)
239{
240 /* timeout (maximum) */
241 mci_writel(host, TMOUT, 0xffffffff);
242}
243
244static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
245{
246 struct mmc_data *data;
Thomas Abraham800d78b2012-09-17 18:16:42 +0000247 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000248 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Will Newtonf95f3852011-01-02 01:11:59 -0500249 u32 cmdr;
250 cmd->error = -EINPROGRESS;
251
252 cmdr = cmd->opcode;
253
Seungwon Jeon90c21432013-08-31 00:14:05 +0900254 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
255 cmd->opcode == MMC_GO_IDLE_STATE ||
256 cmd->opcode == MMC_GO_INACTIVE_STATE ||
257 (cmd->opcode == SD_IO_RW_DIRECT &&
258 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
Will Newtonf95f3852011-01-02 01:11:59 -0500259 cmdr |= SDMMC_CMD_STOP;
260 else
Seungwon Jeon90c21432013-08-31 00:14:05 +0900261 if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
262 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500263
264 if (cmd->flags & MMC_RSP_PRESENT) {
265 /* We expect a response, so set this bit */
266 cmdr |= SDMMC_CMD_RESP_EXP;
267 if (cmd->flags & MMC_RSP_136)
268 cmdr |= SDMMC_CMD_RESP_LONG;
269 }
270
271 if (cmd->flags & MMC_RSP_CRC)
272 cmdr |= SDMMC_CMD_RESP_CRC;
273
274 data = cmd->data;
275 if (data) {
276 cmdr |= SDMMC_CMD_DAT_EXP;
277 if (data->flags & MMC_DATA_STREAM)
278 cmdr |= SDMMC_CMD_STRM_MODE;
279 if (data->flags & MMC_DATA_WRITE)
280 cmdr |= SDMMC_CMD_DAT_WR;
281 }
282
James Hogancb27a842012-10-16 09:43:08 +0100283 if (drv_data && drv_data->prepare_command)
284 drv_data->prepare_command(slot->host, &cmdr);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000285
Will Newtonf95f3852011-01-02 01:11:59 -0500286 return cmdr;
287}
288
Seungwon Jeon90c21432013-08-31 00:14:05 +0900289static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
290{
291 struct mmc_command *stop;
292 u32 cmdr;
293
294 if (!cmd->data)
295 return 0;
296
297 stop = &host->stop_abort;
298 cmdr = cmd->opcode;
299 memset(stop, 0, sizeof(struct mmc_command));
300
301 if (cmdr == MMC_READ_SINGLE_BLOCK ||
302 cmdr == MMC_READ_MULTIPLE_BLOCK ||
303 cmdr == MMC_WRITE_BLOCK ||
304 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
305 stop->opcode = MMC_STOP_TRANSMISSION;
306 stop->arg = 0;
307 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
308 } else if (cmdr == SD_IO_RW_EXTENDED) {
309 stop->opcode = SD_IO_RW_DIRECT;
310 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
311 ((cmd->arg >> 28) & 0x7);
312 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
313 } else {
314 return 0;
315 }
316
317 cmdr = stop->opcode | SDMMC_CMD_STOP |
318 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
319
320 return cmdr;
321}
322
Will Newtonf95f3852011-01-02 01:11:59 -0500323static void dw_mci_start_command(struct dw_mci *host,
324 struct mmc_command *cmd, u32 cmd_flags)
325{
326 host->cmd = cmd;
Thomas Abraham4a909202012-09-17 18:16:35 +0000327 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500328 "start command: ARGR=0x%08x CMDR=0x%08x\n",
329 cmd->arg, cmd_flags);
330
331 mci_writel(host, CMDARG, cmd->arg);
332 wmb();
333
334 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
335}
336
Seungwon Jeon90c21432013-08-31 00:14:05 +0900337static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
Will Newtonf95f3852011-01-02 01:11:59 -0500338{
Seungwon Jeon90c21432013-08-31 00:14:05 +0900339 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
340 dw_mci_start_command(host, stop, host->stop_cmdr);
Will Newtonf95f3852011-01-02 01:11:59 -0500341}
342
343/* DMA interface functions */
344static void dw_mci_stop_dma(struct dw_mci *host)
345{
James Hogan03e8cb52011-06-29 09:28:43 +0100346 if (host->using_dma) {
Will Newtonf95f3852011-01-02 01:11:59 -0500347 host->dma_ops->stop(host);
348 host->dma_ops->cleanup(host);
Will Newtonf95f3852011-01-02 01:11:59 -0500349 }
Seungwon Jeonaa50f252013-08-31 00:14:38 +0900350
351 /* Data transfer was stopped by the interrupt handler */
352 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -0500353}
354
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900355static int dw_mci_get_dma_dir(struct mmc_data *data)
356{
357 if (data->flags & MMC_DATA_WRITE)
358 return DMA_TO_DEVICE;
359 else
360 return DMA_FROM_DEVICE;
361}
362
Jaehoon Chung9beee912012-02-16 11:19:38 +0900363#ifdef CONFIG_MMC_DW_IDMAC
Will Newtonf95f3852011-01-02 01:11:59 -0500364static void dw_mci_dma_cleanup(struct dw_mci *host)
365{
366 struct mmc_data *data = host->data;
367
368 if (data)
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900369 if (!data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000370 dma_unmap_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900371 data->sg,
372 data->sg_len,
373 dw_mci_get_dma_dir(data));
Will Newtonf95f3852011-01-02 01:11:59 -0500374}
375
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900376static void dw_mci_idmac_reset(struct dw_mci *host)
377{
378 u32 bmod = mci_readl(host, BMOD);
379 /* Software reset of DMA */
380 bmod |= SDMMC_IDMAC_SWRESET;
381 mci_writel(host, BMOD, bmod);
382}
383
Will Newtonf95f3852011-01-02 01:11:59 -0500384static void dw_mci_idmac_stop_dma(struct dw_mci *host)
385{
386 u32 temp;
387
388 /* Disable and reset the IDMAC interface */
389 temp = mci_readl(host, CTRL);
390 temp &= ~SDMMC_CTRL_USE_IDMAC;
391 temp |= SDMMC_CTRL_DMA_RESET;
392 mci_writel(host, CTRL, temp);
393
394 /* Stop the IDMAC running */
395 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900396 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900397 temp |= SDMMC_IDMAC_SWRESET;
Will Newtonf95f3852011-01-02 01:11:59 -0500398 mci_writel(host, BMOD, temp);
399}
400
401static void dw_mci_idmac_complete_dma(struct dw_mci *host)
402{
403 struct mmc_data *data = host->data;
404
Thomas Abraham4a909202012-09-17 18:16:35 +0000405 dev_vdbg(host->dev, "DMA complete\n");
Will Newtonf95f3852011-01-02 01:11:59 -0500406
407 host->dma_ops->cleanup(host);
408
409 /*
410 * If the card was removed, data will be NULL. No point in trying to
411 * send the stop command or waiting for NBUSY in this case.
412 */
413 if (data) {
414 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
415 tasklet_schedule(&host->tasklet);
416 }
417}
418
419static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
420 unsigned int sg_len)
421{
422 int i;
423 struct idmac_desc *desc = host->sg_cpu;
424
425 for (i = 0; i < sg_len; i++, desc++) {
426 unsigned int length = sg_dma_len(&data->sg[i]);
427 u32 mem_addr = sg_dma_address(&data->sg[i]);
428
429 /* Set the OWN bit and disable interrupts for this descriptor */
430 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
431
432 /* Buffer length */
433 IDMAC_SET_BUFFER1_SIZE(desc, length);
434
435 /* Physical address to DMA to/from */
436 desc->des2 = mem_addr;
437 }
438
439 /* Set first descriptor */
440 desc = host->sg_cpu;
441 desc->des0 |= IDMAC_DES0_FD;
442
443 /* Set last descriptor */
444 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
445 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
446 desc->des0 |= IDMAC_DES0_LD;
447
448 wmb();
449}
450
451static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
452{
453 u32 temp;
454
455 dw_mci_translate_sglist(host, host->data, sg_len);
456
457 /* Select IDMAC interface */
458 temp = mci_readl(host, CTRL);
459 temp |= SDMMC_CTRL_USE_IDMAC;
460 mci_writel(host, CTRL, temp);
461
462 wmb();
463
464 /* Enable the IDMAC */
465 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900466 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
Will Newtonf95f3852011-01-02 01:11:59 -0500467 mci_writel(host, BMOD, temp);
468
469 /* Start it running */
470 mci_writel(host, PLDMND, 1);
471}
472
473static int dw_mci_idmac_init(struct dw_mci *host)
474{
475 struct idmac_desc *p;
Seungwon Jeon897b69e2012-09-19 13:58:31 +0800476 int i;
Will Newtonf95f3852011-01-02 01:11:59 -0500477
478 /* Number of descriptors in the ring buffer */
479 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
480
481 /* Forward link the descriptor list */
482 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
483 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
484
485 /* Set the last descriptor as the end-of-ring descriptor */
486 p->des3 = host->sg_dma;
487 p->des0 = IDMAC_DES0_ER;
488
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900489 dw_mci_idmac_reset(host);
Seungwon Jeon141a7122012-05-22 13:01:03 +0900490
Will Newtonf95f3852011-01-02 01:11:59 -0500491 /* Mask out interrupts - get Tx & Rx complete only */
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +0900492 mci_writel(host, IDSTS, IDMAC_INT_CLR);
Will Newtonf95f3852011-01-02 01:11:59 -0500493 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
494 SDMMC_IDMAC_INT_TI);
495
496 /* Set the descriptor base address */
497 mci_writel(host, DBADDR, host->sg_dma);
498 return 0;
499}
500
Arnd Bergmann8e2b36e2012-11-06 22:55:31 +0100501static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
Seungwon Jeon885c3e82012-02-20 11:01:43 +0900502 .init = dw_mci_idmac_init,
503 .start = dw_mci_idmac_start_dma,
504 .stop = dw_mci_idmac_stop_dma,
505 .complete = dw_mci_idmac_complete_dma,
506 .cleanup = dw_mci_dma_cleanup,
507};
508#endif /* CONFIG_MMC_DW_IDMAC */
509
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900510static int dw_mci_pre_dma_transfer(struct dw_mci *host,
511 struct mmc_data *data,
512 bool next)
Will Newtonf95f3852011-01-02 01:11:59 -0500513{
514 struct scatterlist *sg;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900515 unsigned int i, sg_len;
Will Newtonf95f3852011-01-02 01:11:59 -0500516
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900517 if (!next && data->host_cookie)
518 return data->host_cookie;
Will Newtonf95f3852011-01-02 01:11:59 -0500519
520 /*
521 * We don't do DMA on "complex" transfers, i.e. with
522 * non-word-aligned buffers or lengths. Also, we don't bother
523 * with all the DMA setup overhead for short transfers.
524 */
525 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
526 return -EINVAL;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900527
Will Newtonf95f3852011-01-02 01:11:59 -0500528 if (data->blksz & 3)
529 return -EINVAL;
530
531 for_each_sg(data->sg, sg, data->sg_len, i) {
532 if (sg->offset & 3 || sg->length & 3)
533 return -EINVAL;
534 }
535
Thomas Abraham4a909202012-09-17 18:16:35 +0000536 sg_len = dma_map_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900537 data->sg,
538 data->sg_len,
539 dw_mci_get_dma_dir(data));
540 if (sg_len == 0)
541 return -EINVAL;
542
543 if (next)
544 data->host_cookie = sg_len;
545
546 return sg_len;
547}
548
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900549static void dw_mci_pre_req(struct mmc_host *mmc,
550 struct mmc_request *mrq,
551 bool is_first_req)
552{
553 struct dw_mci_slot *slot = mmc_priv(mmc);
554 struct mmc_data *data = mrq->data;
555
556 if (!slot->host->use_dma || !data)
557 return;
558
559 if (data->host_cookie) {
560 data->host_cookie = 0;
561 return;
562 }
563
564 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
565 data->host_cookie = 0;
566}
567
568static void dw_mci_post_req(struct mmc_host *mmc,
569 struct mmc_request *mrq,
570 int err)
571{
572 struct dw_mci_slot *slot = mmc_priv(mmc);
573 struct mmc_data *data = mrq->data;
574
575 if (!slot->host->use_dma || !data)
576 return;
577
578 if (data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000579 dma_unmap_sg(slot->host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900580 data->sg,
581 data->sg_len,
582 dw_mci_get_dma_dir(data));
583 data->host_cookie = 0;
584}
585
Seungwon Jeon52426892013-08-31 00:13:42 +0900586static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
587{
588#ifdef CONFIG_MMC_DW_IDMAC
589 unsigned int blksz = data->blksz;
590 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
591 u32 fifo_width = 1 << host->data_shift;
592 u32 blksz_depth = blksz / fifo_width, fifoth_val;
593 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
594 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
595
596 tx_wmark = (host->fifo_depth) / 2;
597 tx_wmark_invers = host->fifo_depth - tx_wmark;
598
599 /*
600 * MSIZE is '1',
601 * if blksz is not a multiple of the FIFO width
602 */
603 if (blksz % fifo_width) {
604 msize = 0;
605 rx_wmark = 1;
606 goto done;
607 }
608
609 do {
610 if (!((blksz_depth % mszs[idx]) ||
611 (tx_wmark_invers % mszs[idx]))) {
612 msize = idx;
613 rx_wmark = mszs[idx] - 1;
614 break;
615 }
616 } while (--idx > 0);
617 /*
618 * If idx is '0', it won't be tried
619 * Thus, initial values are uesed
620 */
621done:
622 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
623 mci_writel(host, FIFOTH, fifoth_val);
624#endif
625}
626
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900627static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
628{
629 unsigned int blksz = data->blksz;
630 u32 blksz_depth, fifo_depth;
631 u16 thld_size;
632
633 WARN_ON(!(data->flags & MMC_DATA_READ));
634
635 if (host->timing != MMC_TIMING_MMC_HS200 &&
636 host->timing != MMC_TIMING_UHS_SDR104)
637 goto disable;
638
639 blksz_depth = blksz / (1 << host->data_shift);
640 fifo_depth = host->fifo_depth;
641
642 if (blksz_depth > fifo_depth)
643 goto disable;
644
645 /*
646 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
647 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
648 * Currently just choose blksz.
649 */
650 thld_size = blksz;
651 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
652 return;
653
654disable:
655 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
656}
657
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900658static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
659{
660 int sg_len;
661 u32 temp;
662
663 host->using_dma = 0;
664
665 /* If we don't have a channel, we can't do DMA */
666 if (!host->use_dma)
667 return -ENODEV;
668
669 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900670 if (sg_len < 0) {
671 host->dma_ops->stop(host);
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900672 return sg_len;
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900673 }
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900674
James Hogan03e8cb52011-06-29 09:28:43 +0100675 host->using_dma = 1;
676
Thomas Abraham4a909202012-09-17 18:16:35 +0000677 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500678 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
679 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
680 sg_len);
681
Seungwon Jeon52426892013-08-31 00:13:42 +0900682 /*
683 * Decide the MSIZE and RX/TX Watermark.
684 * If current block size is same with previous size,
685 * no need to update fifoth.
686 */
687 if (host->prev_blksz != data->blksz)
688 dw_mci_adjust_fifoth(host, data);
689
Will Newtonf95f3852011-01-02 01:11:59 -0500690 /* Enable the DMA interface */
691 temp = mci_readl(host, CTRL);
692 temp |= SDMMC_CTRL_DMA_ENABLE;
693 mci_writel(host, CTRL, temp);
694
695 /* Disable RX/TX IRQs, let DMA handle it */
696 temp = mci_readl(host, INTMASK);
697 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
698 mci_writel(host, INTMASK, temp);
699
700 host->dma_ops->start(host, sg_len);
701
702 return 0;
703}
704
705static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
706{
707 u32 temp;
708
709 data->error = -EINPROGRESS;
710
711 WARN_ON(host->data);
712 host->sg = NULL;
713 host->data = data;
714
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900715 if (data->flags & MMC_DATA_READ) {
James Hogan55c5efbc2011-06-29 09:29:58 +0100716 host->dir_status = DW_MCI_RECV_STATUS;
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900717 dw_mci_ctrl_rd_thld(host, data);
718 } else {
James Hogan55c5efbc2011-06-29 09:29:58 +0100719 host->dir_status = DW_MCI_SEND_STATUS;
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900720 }
James Hogan55c5efbc2011-06-29 09:29:58 +0100721
Will Newtonf95f3852011-01-02 01:11:59 -0500722 if (dw_mci_submit_data_dma(host, data)) {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +0900723 int flags = SG_MITER_ATOMIC;
724 if (host->data->flags & MMC_DATA_READ)
725 flags |= SG_MITER_TO_SG;
726 else
727 flags |= SG_MITER_FROM_SG;
728
729 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
Will Newtonf95f3852011-01-02 01:11:59 -0500730 host->sg = data->sg;
James Hogan34b664a2011-06-24 13:57:56 +0100731 host->part_buf_start = 0;
732 host->part_buf_count = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500733
James Hoganb40af3a2011-06-24 13:54:06 +0100734 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
Will Newtonf95f3852011-01-02 01:11:59 -0500735 temp = mci_readl(host, INTMASK);
736 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
737 mci_writel(host, INTMASK, temp);
738
739 temp = mci_readl(host, CTRL);
740 temp &= ~SDMMC_CTRL_DMA_ENABLE;
741 mci_writel(host, CTRL, temp);
Seungwon Jeon52426892013-08-31 00:13:42 +0900742
743 /*
744 * Use the initial fifoth_val for PIO mode.
745 * If next issued data may be transfered by DMA mode,
746 * prev_blksz should be invalidated.
747 */
748 mci_writel(host, FIFOTH, host->fifoth_val);
749 host->prev_blksz = 0;
750 } else {
751 /*
752 * Keep the current block size.
753 * It will be used to decide whether to update
754 * fifoth register next time.
755 */
756 host->prev_blksz = data->blksz;
Will Newtonf95f3852011-01-02 01:11:59 -0500757 }
758}
759
760static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
761{
762 struct dw_mci *host = slot->host;
763 unsigned long timeout = jiffies + msecs_to_jiffies(500);
764 unsigned int cmd_status = 0;
765
766 mci_writel(host, CMDARG, arg);
767 wmb();
768 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
769
770 while (time_before(jiffies, timeout)) {
771 cmd_status = mci_readl(host, CMD);
772 if (!(cmd_status & SDMMC_CMD_START))
773 return;
774 }
775 dev_err(&slot->mmc->class_dev,
776 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
777 cmd, arg, cmd_status);
778}
779
Abhilash Kesavanab269122012-11-19 10:26:21 +0530780static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
Will Newtonf95f3852011-01-02 01:11:59 -0500781{
782 struct dw_mci *host = slot->host;
Doug Andersonfdf492a2013-08-31 00:11:43 +0900783 unsigned int clock = slot->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500784 u32 div;
Doug Anderson9623b5b2012-07-25 08:33:17 -0700785 u32 clk_en_a;
Will Newtonf95f3852011-01-02 01:11:59 -0500786
Doug Andersonfdf492a2013-08-31 00:11:43 +0900787 if (!clock) {
788 mci_writel(host, CLKENA, 0);
789 mci_send_cmd(slot,
790 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
791 } else if (clock != host->current_speed || force_clkinit) {
792 div = host->bus_hz / clock;
793 if (host->bus_hz % clock && host->bus_hz > clock)
Will Newtonf95f3852011-01-02 01:11:59 -0500794 /*
795 * move the + 1 after the divide to prevent
796 * over-clocking the card.
797 */
Seungwon Jeone4199902012-05-22 13:01:21 +0900798 div += 1;
799
Doug Andersonfdf492a2013-08-31 00:11:43 +0900800 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500801
Doug Andersonfdf492a2013-08-31 00:11:43 +0900802 if ((clock << div) != slot->__clk_old || force_clkinit)
803 dev_info(&slot->mmc->class_dev,
804 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
805 slot->id, host->bus_hz, clock,
806 div ? ((host->bus_hz / div) >> 1) :
807 host->bus_hz, div);
Will Newtonf95f3852011-01-02 01:11:59 -0500808
809 /* disable clock */
810 mci_writel(host, CLKENA, 0);
811 mci_writel(host, CLKSRC, 0);
812
813 /* inform CIU */
814 mci_send_cmd(slot,
815 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
816
817 /* set clock to desired speed */
818 mci_writel(host, CLKDIV, div);
819
820 /* inform CIU */
821 mci_send_cmd(slot,
822 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
823
Doug Anderson9623b5b2012-07-25 08:33:17 -0700824 /* enable clock; only low power if no SDIO */
825 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
826 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
827 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
828 mci_writel(host, CLKENA, clk_en_a);
Will Newtonf95f3852011-01-02 01:11:59 -0500829
830 /* inform CIU */
831 mci_send_cmd(slot,
832 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
833
Doug Andersonfdf492a2013-08-31 00:11:43 +0900834 /* keep the clock with reflecting clock dividor */
835 slot->__clk_old = clock << div;
Will Newtonf95f3852011-01-02 01:11:59 -0500836 }
837
Doug Andersonfdf492a2013-08-31 00:11:43 +0900838 host->current_speed = clock;
839
Will Newtonf95f3852011-01-02 01:11:59 -0500840 /* Set the current slot bus width */
Seungwon Jeon1d56c452011-06-20 17:23:53 +0900841 mci_writel(host, CTYPE, (slot->ctype << slot->id));
Will Newtonf95f3852011-01-02 01:11:59 -0500842}
843
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900844static void __dw_mci_start_request(struct dw_mci *host,
845 struct dw_mci_slot *slot,
846 struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -0500847{
848 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -0500849 struct mmc_data *data;
850 u32 cmdflags;
851
852 mrq = slot->mrq;
853 if (host->pdata->select_slot)
854 host->pdata->select_slot(slot->id);
855
Will Newtonf95f3852011-01-02 01:11:59 -0500856 host->cur_slot = slot;
857 host->mrq = mrq;
858
859 host->pending_events = 0;
860 host->completed_events = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +0900861 host->cmd_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500862 host->data_status = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +0900863 host->dir_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500864
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900865 data = cmd->data;
Will Newtonf95f3852011-01-02 01:11:59 -0500866 if (data) {
867 dw_mci_set_timeout(host);
868 mci_writel(host, BYTCNT, data->blksz*data->blocks);
869 mci_writel(host, BLKSIZ, data->blksz);
870 }
871
Will Newtonf95f3852011-01-02 01:11:59 -0500872 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
873
874 /* this is the first command, send the initialization clock */
875 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
876 cmdflags |= SDMMC_CMD_INIT;
877
878 if (data) {
879 dw_mci_submit_data(host, data);
880 wmb();
881 }
882
883 dw_mci_start_command(host, cmd, cmdflags);
884
885 if (mrq->stop)
886 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +0900887 else
888 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
Will Newtonf95f3852011-01-02 01:11:59 -0500889}
890
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900891static void dw_mci_start_request(struct dw_mci *host,
892 struct dw_mci_slot *slot)
893{
894 struct mmc_request *mrq = slot->mrq;
895 struct mmc_command *cmd;
896
897 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
898 __dw_mci_start_request(host, slot, cmd);
899}
900
James Hogan7456caa2011-06-24 13:55:10 +0100901/* must be called with host->lock held */
Will Newtonf95f3852011-01-02 01:11:59 -0500902static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
903 struct mmc_request *mrq)
904{
905 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
906 host->state);
907
Will Newtonf95f3852011-01-02 01:11:59 -0500908 slot->mrq = mrq;
909
910 if (host->state == STATE_IDLE) {
911 host->state = STATE_SENDING_CMD;
912 dw_mci_start_request(host, slot);
913 } else {
914 list_add_tail(&slot->queue_node, &host->queue);
915 }
Will Newtonf95f3852011-01-02 01:11:59 -0500916}
917
918static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
919{
920 struct dw_mci_slot *slot = mmc_priv(mmc);
921 struct dw_mci *host = slot->host;
922
923 WARN_ON(slot->mrq);
924
James Hogan7456caa2011-06-24 13:55:10 +0100925 /*
926 * The check for card presence and queueing of the request must be
927 * atomic, otherwise the card could be removed in between and the
928 * request wouldn't fail until another card was inserted.
929 */
930 spin_lock_bh(&host->lock);
931
Will Newtonf95f3852011-01-02 01:11:59 -0500932 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
James Hogan7456caa2011-06-24 13:55:10 +0100933 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500934 mrq->cmd->error = -ENOMEDIUM;
935 mmc_request_done(mmc, mrq);
936 return;
937 }
938
Will Newtonf95f3852011-01-02 01:11:59 -0500939 dw_mci_queue_request(host, slot, mrq);
James Hogan7456caa2011-06-24 13:55:10 +0100940
941 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500942}
943
944static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
945{
946 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000947 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900948 u32 regs;
Will Newtonf95f3852011-01-02 01:11:59 -0500949
Will Newtonf95f3852011-01-02 01:11:59 -0500950 switch (ios->bus_width) {
Will Newtonf95f3852011-01-02 01:11:59 -0500951 case MMC_BUS_WIDTH_4:
952 slot->ctype = SDMMC_CTYPE_4BIT;
953 break;
Jaehoon Chungc9b2a062011-02-17 16:12:38 +0900954 case MMC_BUS_WIDTH_8:
955 slot->ctype = SDMMC_CTYPE_8BIT;
956 break;
Jaehoon Chungb2f7cb42012-11-08 17:35:31 +0900957 default:
958 /* set default 1 bit mode */
959 slot->ctype = SDMMC_CTYPE_1BIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500960 }
961
Seungwon Jeon3f514292012-01-02 16:00:02 +0900962 regs = mci_readl(slot->host, UHS_REG);
963
Jaehoon Chung41babf72011-02-24 13:46:11 +0900964 /* DDR mode set */
Seungwon Jeon3f514292012-01-02 16:00:02 +0900965 if (ios->timing == MMC_TIMING_UHS_DDR50)
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900966 regs |= ((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900967 else
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900968 regs &= ~((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900969
970 mci_writel(slot->host, UHS_REG, regs);
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900971 slot->host->timing = ios->timing;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900972
Doug Andersonfdf492a2013-08-31 00:11:43 +0900973 /*
974 * Use mirror of ios->clock to prevent race with mmc
975 * core ios update when finding the minimum.
976 */
977 slot->clock = ios->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500978
James Hogancb27a842012-10-16 09:43:08 +0100979 if (drv_data && drv_data->set_ios)
980 drv_data->set_ios(slot->host, ios);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000981
Jaehoon Chungbf7cb222012-11-08 17:35:29 +0900982 /* Slot specific timing and width adjustment */
983 dw_mci_setup_bus(slot, false);
984
Will Newtonf95f3852011-01-02 01:11:59 -0500985 switch (ios->power_mode) {
986 case MMC_POWER_UP:
987 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
James Hogane6f34e22013-03-12 10:43:32 +0000988 /* Power up slot */
989 if (slot->host->pdata->setpower)
990 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900991 regs = mci_readl(slot->host, PWREN);
992 regs |= (1 << slot->id);
993 mci_writel(slot->host, PWREN, regs);
James Hogane6f34e22013-03-12 10:43:32 +0000994 break;
995 case MMC_POWER_OFF:
996 /* Power down slot */
997 if (slot->host->pdata->setpower)
998 slot->host->pdata->setpower(slot->id, 0);
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900999 regs = mci_readl(slot->host, PWREN);
1000 regs &= ~(1 << slot->id);
1001 mci_writel(slot->host, PWREN, regs);
Will Newtonf95f3852011-01-02 01:11:59 -05001002 break;
1003 default:
1004 break;
1005 }
1006}
1007
1008static int dw_mci_get_ro(struct mmc_host *mmc)
1009{
1010 int read_only;
1011 struct dw_mci_slot *slot = mmc_priv(mmc);
1012 struct dw_mci_board *brd = slot->host->pdata;
1013
1014 /* Use platform get_ro function, else try on board write protect */
Doug Anderson96406392013-01-11 17:03:54 +00001015 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
Thomas Abrahamb4967aa2012-09-17 18:16:39 +00001016 read_only = 0;
1017 else if (brd->get_ro)
Will Newtonf95f3852011-01-02 01:11:59 -05001018 read_only = brd->get_ro(slot->id);
Doug Anderson55a6ceb2013-01-11 17:03:53 +00001019 else if (gpio_is_valid(slot->wp_gpio))
1020 read_only = gpio_get_value(slot->wp_gpio);
Will Newtonf95f3852011-01-02 01:11:59 -05001021 else
1022 read_only =
1023 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1024
1025 dev_dbg(&mmc->class_dev, "card is %s\n",
1026 read_only ? "read-only" : "read-write");
1027
1028 return read_only;
1029}
1030
1031static int dw_mci_get_cd(struct mmc_host *mmc)
1032{
1033 int present;
1034 struct dw_mci_slot *slot = mmc_priv(mmc);
1035 struct dw_mci_board *brd = slot->host->pdata;
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001036 struct dw_mci *host = slot->host;
1037 int gpio_cd = mmc_gpio_get_cd(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -05001038
1039 /* Use platform get_cd function, else try onboard card detect */
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09001040 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1041 present = 1;
1042 else if (brd->get_cd)
Will Newtonf95f3852011-01-02 01:11:59 -05001043 present = !brd->get_cd(slot->id);
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001044 else if (!IS_ERR_VALUE(gpio_cd))
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001045 present = gpio_cd;
Will Newtonf95f3852011-01-02 01:11:59 -05001046 else
1047 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1048 == 0 ? 1 : 0;
1049
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001050 spin_lock_bh(&host->lock);
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001051 if (present) {
1052 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001053 dev_dbg(&mmc->class_dev, "card is present\n");
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001054 } else {
1055 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001056 dev_dbg(&mmc->class_dev, "card is not present\n");
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001057 }
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001058 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -05001059
1060 return present;
1061}
1062
Doug Anderson9623b5b2012-07-25 08:33:17 -07001063/*
1064 * Disable lower power mode.
1065 *
1066 * Low power mode will stop the card clock when idle. According to the
1067 * description of the CLKENA register we should disable low power mode
1068 * for SDIO cards if we need SDIO interrupts to work.
1069 *
1070 * This function is fast if low power mode is already disabled.
1071 */
1072static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1073{
1074 struct dw_mci *host = slot->host;
1075 u32 clk_en_a;
1076 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1077
1078 clk_en_a = mci_readl(host, CLKENA);
1079
1080 if (clk_en_a & clken_low_pwr) {
1081 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1082 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1083 SDMMC_CMD_PRV_DAT_WAIT, 0);
1084 }
1085}
1086
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301087static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1088{
1089 struct dw_mci_slot *slot = mmc_priv(mmc);
1090 struct dw_mci *host = slot->host;
1091 u32 int_mask;
1092
1093 /* Enable/disable Slot Specific SDIO interrupt */
1094 int_mask = mci_readl(host, INTMASK);
1095 if (enb) {
Doug Anderson9623b5b2012-07-25 08:33:17 -07001096 /*
1097 * Turn off low power mode if it was enabled. This is a bit of
1098 * a heavy operation and we disable / enable IRQs a lot, so
1099 * we'll leave low power mode disabled and it will get
1100 * re-enabled again in dw_mci_setup_bus().
1101 */
1102 dw_mci_disable_low_power(slot);
1103
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301104 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +09001105 (int_mask | SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301106 } else {
1107 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +09001108 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301109 }
1110}
1111
Seungwon Jeon0976f162013-08-31 00:12:42 +09001112static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1113{
1114 struct dw_mci_slot *slot = mmc_priv(mmc);
1115 struct dw_mci *host = slot->host;
1116 const struct dw_mci_drv_data *drv_data = host->drv_data;
1117 struct dw_mci_tuning_data tuning_data;
1118 int err = -ENOSYS;
1119
1120 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1121 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1122 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1123 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1124 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1125 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1126 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1127 } else {
1128 return -EINVAL;
1129 }
1130 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1131 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1132 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1133 } else {
1134 dev_err(host->dev,
1135 "Undefined command(%d) for tuning\n", opcode);
1136 return -EINVAL;
1137 }
1138
1139 if (drv_data && drv_data->execute_tuning)
1140 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1141 return err;
1142}
1143
Will Newtonf95f3852011-01-02 01:11:59 -05001144static const struct mmc_host_ops dw_mci_ops = {
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301145 .request = dw_mci_request,
Seungwon Jeon9aa51402012-02-06 16:55:07 +09001146 .pre_req = dw_mci_pre_req,
1147 .post_req = dw_mci_post_req,
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301148 .set_ios = dw_mci_set_ios,
1149 .get_ro = dw_mci_get_ro,
1150 .get_cd = dw_mci_get_cd,
1151 .enable_sdio_irq = dw_mci_enable_sdio_irq,
Seungwon Jeon0976f162013-08-31 00:12:42 +09001152 .execute_tuning = dw_mci_execute_tuning,
Will Newtonf95f3852011-01-02 01:11:59 -05001153};
1154
1155static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1156 __releases(&host->lock)
1157 __acquires(&host->lock)
1158{
1159 struct dw_mci_slot *slot;
1160 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1161
1162 WARN_ON(host->cmd || host->data);
1163
1164 host->cur_slot->mrq = NULL;
1165 host->mrq = NULL;
1166 if (!list_empty(&host->queue)) {
1167 slot = list_entry(host->queue.next,
1168 struct dw_mci_slot, queue_node);
1169 list_del(&slot->queue_node);
Thomas Abraham4a909202012-09-17 18:16:35 +00001170 dev_vdbg(host->dev, "list not empty: %s is next\n",
Will Newtonf95f3852011-01-02 01:11:59 -05001171 mmc_hostname(slot->mmc));
1172 host->state = STATE_SENDING_CMD;
1173 dw_mci_start_request(host, slot);
1174 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00001175 dev_vdbg(host->dev, "list empty\n");
Will Newtonf95f3852011-01-02 01:11:59 -05001176 host->state = STATE_IDLE;
1177 }
1178
1179 spin_unlock(&host->lock);
1180 mmc_request_done(prev_mmc, mrq);
1181 spin_lock(&host->lock);
1182}
1183
Seungwon Jeone352c812013-08-31 00:14:17 +09001184static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -05001185{
1186 u32 status = host->cmd_status;
1187
1188 host->cmd_status = 0;
1189
1190 /* Read the response from the card (up to 16 bytes) */
1191 if (cmd->flags & MMC_RSP_PRESENT) {
1192 if (cmd->flags & MMC_RSP_136) {
1193 cmd->resp[3] = mci_readl(host, RESP0);
1194 cmd->resp[2] = mci_readl(host, RESP1);
1195 cmd->resp[1] = mci_readl(host, RESP2);
1196 cmd->resp[0] = mci_readl(host, RESP3);
1197 } else {
1198 cmd->resp[0] = mci_readl(host, RESP0);
1199 cmd->resp[1] = 0;
1200 cmd->resp[2] = 0;
1201 cmd->resp[3] = 0;
1202 }
1203 }
1204
1205 if (status & SDMMC_INT_RTO)
1206 cmd->error = -ETIMEDOUT;
1207 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1208 cmd->error = -EILSEQ;
1209 else if (status & SDMMC_INT_RESP_ERR)
1210 cmd->error = -EIO;
1211 else
1212 cmd->error = 0;
1213
1214 if (cmd->error) {
1215 /* newer ip versions need a delay between retries */
1216 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1217 mdelay(20);
Will Newtonf95f3852011-01-02 01:11:59 -05001218 }
Seungwon Jeone352c812013-08-31 00:14:17 +09001219
1220 return cmd->error;
1221}
1222
1223static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1224{
Seungwon Jeon31bff452013-08-31 00:14:23 +09001225 u32 status = host->data_status;
Seungwon Jeone352c812013-08-31 00:14:17 +09001226
1227 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1228 if (status & SDMMC_INT_DRTO) {
1229 data->error = -ETIMEDOUT;
1230 } else if (status & SDMMC_INT_DCRC) {
1231 data->error = -EILSEQ;
1232 } else if (status & SDMMC_INT_EBE) {
1233 if (host->dir_status ==
1234 DW_MCI_SEND_STATUS) {
1235 /*
1236 * No data CRC status was returned.
1237 * The number of bytes transferred
1238 * will be exaggerated in PIO mode.
1239 */
1240 data->bytes_xfered = 0;
1241 data->error = -ETIMEDOUT;
1242 } else if (host->dir_status ==
1243 DW_MCI_RECV_STATUS) {
1244 data->error = -EIO;
1245 }
1246 } else {
1247 /* SDMMC_INT_SBE is included */
1248 data->error = -EIO;
1249 }
1250
1251 dev_err(host->dev, "data error, status 0x%08x\n", status);
1252
1253 /*
1254 * After an error, there may be data lingering
Seungwon Jeon31bff452013-08-31 00:14:23 +09001255 * in the FIFO
Seungwon Jeone352c812013-08-31 00:14:17 +09001256 */
Seungwon Jeon31bff452013-08-31 00:14:23 +09001257 dw_mci_fifo_reset(host);
Seungwon Jeone352c812013-08-31 00:14:17 +09001258 } else {
1259 data->bytes_xfered = data->blocks * data->blksz;
1260 data->error = 0;
1261 }
1262
1263 return data->error;
Will Newtonf95f3852011-01-02 01:11:59 -05001264}
1265
1266static void dw_mci_tasklet_func(unsigned long priv)
1267{
1268 struct dw_mci *host = (struct dw_mci *)priv;
1269 struct mmc_data *data;
1270 struct mmc_command *cmd;
Seungwon Jeone352c812013-08-31 00:14:17 +09001271 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001272 enum dw_mci_state state;
1273 enum dw_mci_state prev_state;
Seungwon Jeone352c812013-08-31 00:14:17 +09001274 unsigned int err;
Will Newtonf95f3852011-01-02 01:11:59 -05001275
1276 spin_lock(&host->lock);
1277
1278 state = host->state;
1279 data = host->data;
Seungwon Jeone352c812013-08-31 00:14:17 +09001280 mrq = host->mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001281
1282 do {
1283 prev_state = state;
1284
1285 switch (state) {
1286 case STATE_IDLE:
1287 break;
1288
1289 case STATE_SENDING_CMD:
1290 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1291 &host->pending_events))
1292 break;
1293
1294 cmd = host->cmd;
1295 host->cmd = NULL;
1296 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001297 err = dw_mci_command_complete(host, cmd);
1298 if (cmd == mrq->sbc && !err) {
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001299 prev_state = state = STATE_SENDING_CMD;
1300 __dw_mci_start_request(host, host->cur_slot,
Seungwon Jeone352c812013-08-31 00:14:17 +09001301 mrq->cmd);
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001302 goto unlock;
1303 }
1304
Seungwon Jeone352c812013-08-31 00:14:17 +09001305 if (cmd->data && err) {
Seungwon Jeon71abb132013-08-31 00:13:59 +09001306 dw_mci_stop_dma(host);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001307 send_stop_abort(host, data);
1308 state = STATE_SENDING_STOP;
1309 break;
Seungwon Jeon71abb132013-08-31 00:13:59 +09001310 }
1311
Seungwon Jeone352c812013-08-31 00:14:17 +09001312 if (!cmd->data || err) {
1313 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05001314 goto unlock;
1315 }
1316
1317 prev_state = state = STATE_SENDING_DATA;
1318 /* fall through */
1319
1320 case STATE_SENDING_DATA:
1321 if (test_and_clear_bit(EVENT_DATA_ERROR,
1322 &host->pending_events)) {
1323 dw_mci_stop_dma(host);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001324 send_stop_abort(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001325 state = STATE_DATA_ERROR;
1326 break;
1327 }
1328
1329 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1330 &host->pending_events))
1331 break;
1332
1333 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1334 prev_state = state = STATE_DATA_BUSY;
1335 /* fall through */
1336
1337 case STATE_DATA_BUSY:
1338 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1339 &host->pending_events))
1340 break;
1341
1342 host->data = NULL;
1343 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001344 err = dw_mci_data_complete(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001345
Seungwon Jeone352c812013-08-31 00:14:17 +09001346 if (!err) {
1347 if (!data->stop || mrq->sbc) {
Sachin Kamat17c8bc82014-02-25 15:18:28 +05301348 if (mrq->sbc && data->stop)
Seungwon Jeone352c812013-08-31 00:14:17 +09001349 data->stop->error = 0;
1350 dw_mci_request_end(host, mrq);
1351 goto unlock;
Will Newtonf95f3852011-01-02 01:11:59 -05001352 }
Will Newtonf95f3852011-01-02 01:11:59 -05001353
Seungwon Jeon90c21432013-08-31 00:14:05 +09001354 /* stop command for open-ended transfer*/
Seungwon Jeone352c812013-08-31 00:14:17 +09001355 if (data->stop)
1356 send_stop_abort(host, data);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001357 }
Seungwon Jeone352c812013-08-31 00:14:17 +09001358
1359 /*
1360 * If err has non-zero,
1361 * stop-abort command has been already issued.
1362 */
1363 prev_state = state = STATE_SENDING_STOP;
1364
Will Newtonf95f3852011-01-02 01:11:59 -05001365 /* fall through */
1366
1367 case STATE_SENDING_STOP:
1368 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1369 &host->pending_events))
1370 break;
1371
Seungwon Jeon71abb132013-08-31 00:13:59 +09001372 /* CMD error in data command */
Seungwon Jeon31bff452013-08-31 00:14:23 +09001373 if (mrq->cmd->error && mrq->data)
1374 dw_mci_fifo_reset(host);
Seungwon Jeon71abb132013-08-31 00:13:59 +09001375
Will Newtonf95f3852011-01-02 01:11:59 -05001376 host->cmd = NULL;
Seungwon Jeon71abb132013-08-31 00:13:59 +09001377 host->data = NULL;
Seungwon Jeon90c21432013-08-31 00:14:05 +09001378
Seungwon Jeone352c812013-08-31 00:14:17 +09001379 if (mrq->stop)
1380 dw_mci_command_complete(host, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001381 else
1382 host->cmd_status = 0;
1383
Seungwon Jeone352c812013-08-31 00:14:17 +09001384 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05001385 goto unlock;
1386
1387 case STATE_DATA_ERROR:
1388 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1389 &host->pending_events))
1390 break;
1391
1392 state = STATE_DATA_BUSY;
1393 break;
1394 }
1395 } while (state != prev_state);
1396
1397 host->state = state;
1398unlock:
1399 spin_unlock(&host->lock);
1400
1401}
1402
James Hogan34b664a2011-06-24 13:57:56 +01001403/* push final bytes to part_buf, only use during push */
1404static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1405{
1406 memcpy((void *)&host->part_buf, buf, cnt);
1407 host->part_buf_count = cnt;
1408}
1409
1410/* append bytes to part_buf, only use during push */
1411static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1412{
1413 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1414 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1415 host->part_buf_count += cnt;
1416 return cnt;
1417}
1418
1419/* pull first bytes from part_buf, only use during pull */
1420static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1421{
1422 cnt = min(cnt, (int)host->part_buf_count);
1423 if (cnt) {
1424 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1425 cnt);
1426 host->part_buf_count -= cnt;
1427 host->part_buf_start += cnt;
1428 }
1429 return cnt;
1430}
1431
1432/* pull final bytes from the part_buf, assuming it's just been filled */
1433static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1434{
1435 memcpy(buf, &host->part_buf, cnt);
1436 host->part_buf_start = cnt;
1437 host->part_buf_count = (1 << host->data_shift) - cnt;
1438}
1439
Will Newtonf95f3852011-01-02 01:11:59 -05001440static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1441{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001442 struct mmc_data *data = host->data;
1443 int init_cnt = cnt;
1444
James Hogan34b664a2011-06-24 13:57:56 +01001445 /* try and push anything in the part_buf */
1446 if (unlikely(host->part_buf_count)) {
1447 int len = dw_mci_push_part_bytes(host, buf, cnt);
1448 buf += len;
1449 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001450 if (host->part_buf_count == 2) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001451 mci_writew(host, DATA(host->data_offset),
1452 host->part_buf16);
James Hogan34b664a2011-06-24 13:57:56 +01001453 host->part_buf_count = 0;
1454 }
1455 }
1456#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1457 if (unlikely((unsigned long)buf & 0x1)) {
1458 while (cnt >= 2) {
1459 u16 aligned_buf[64];
1460 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1461 int items = len >> 1;
1462 int i;
1463 /* memcpy from input buffer into aligned buffer */
1464 memcpy(aligned_buf, buf, len);
1465 buf += len;
1466 cnt -= len;
1467 /* push data from aligned buffer into fifo */
1468 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001469 mci_writew(host, DATA(host->data_offset),
1470 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001471 }
1472 } else
1473#endif
1474 {
1475 u16 *pdata = buf;
1476 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001477 mci_writew(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001478 buf = pdata;
1479 }
1480 /* put anything remaining in the part_buf */
1481 if (cnt) {
1482 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001483 /* Push data if we have reached the expected data length */
1484 if ((data->bytes_xfered + init_cnt) ==
1485 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001486 mci_writew(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001487 host->part_buf16);
Will Newtonf95f3852011-01-02 01:11:59 -05001488 }
1489}
1490
1491static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1492{
James Hogan34b664a2011-06-24 13:57:56 +01001493#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1494 if (unlikely((unsigned long)buf & 0x1)) {
1495 while (cnt >= 2) {
1496 /* pull data from fifo into aligned buffer */
1497 u16 aligned_buf[64];
1498 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1499 int items = len >> 1;
1500 int i;
1501 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001502 aligned_buf[i] = mci_readw(host,
1503 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001504 /* memcpy from aligned buffer into output buffer */
1505 memcpy(buf, aligned_buf, len);
1506 buf += len;
1507 cnt -= len;
1508 }
1509 } else
1510#endif
1511 {
1512 u16 *pdata = buf;
1513 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001514 *pdata++ = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001515 buf = pdata;
1516 }
1517 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001518 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001519 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001520 }
1521}
1522
1523static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1524{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001525 struct mmc_data *data = host->data;
1526 int init_cnt = cnt;
1527
James Hogan34b664a2011-06-24 13:57:56 +01001528 /* try and push anything in the part_buf */
1529 if (unlikely(host->part_buf_count)) {
1530 int len = dw_mci_push_part_bytes(host, buf, cnt);
1531 buf += len;
1532 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001533 if (host->part_buf_count == 4) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001534 mci_writel(host, DATA(host->data_offset),
1535 host->part_buf32);
James Hogan34b664a2011-06-24 13:57:56 +01001536 host->part_buf_count = 0;
1537 }
1538 }
1539#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1540 if (unlikely((unsigned long)buf & 0x3)) {
1541 while (cnt >= 4) {
1542 u32 aligned_buf[32];
1543 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1544 int items = len >> 2;
1545 int i;
1546 /* memcpy from input buffer into aligned buffer */
1547 memcpy(aligned_buf, buf, len);
1548 buf += len;
1549 cnt -= len;
1550 /* push data from aligned buffer into fifo */
1551 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001552 mci_writel(host, DATA(host->data_offset),
1553 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001554 }
1555 } else
1556#endif
1557 {
1558 u32 *pdata = buf;
1559 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001560 mci_writel(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001561 buf = pdata;
1562 }
1563 /* put anything remaining in the part_buf */
1564 if (cnt) {
1565 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001566 /* Push data if we have reached the expected data length */
1567 if ((data->bytes_xfered + init_cnt) ==
1568 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001569 mci_writel(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001570 host->part_buf32);
Will Newtonf95f3852011-01-02 01:11:59 -05001571 }
1572}
1573
1574static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1575{
James Hogan34b664a2011-06-24 13:57:56 +01001576#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1577 if (unlikely((unsigned long)buf & 0x3)) {
1578 while (cnt >= 4) {
1579 /* pull data from fifo into aligned buffer */
1580 u32 aligned_buf[32];
1581 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1582 int items = len >> 2;
1583 int i;
1584 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001585 aligned_buf[i] = mci_readl(host,
1586 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001587 /* memcpy from aligned buffer into output buffer */
1588 memcpy(buf, aligned_buf, len);
1589 buf += len;
1590 cnt -= len;
1591 }
1592 } else
1593#endif
1594 {
1595 u32 *pdata = buf;
1596 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001597 *pdata++ = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001598 buf = pdata;
1599 }
1600 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001601 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001602 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001603 }
1604}
1605
1606static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1607{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001608 struct mmc_data *data = host->data;
1609 int init_cnt = cnt;
1610
James Hogan34b664a2011-06-24 13:57:56 +01001611 /* try and push anything in the part_buf */
1612 if (unlikely(host->part_buf_count)) {
1613 int len = dw_mci_push_part_bytes(host, buf, cnt);
1614 buf += len;
1615 cnt -= len;
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001616
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001617 if (host->part_buf_count == 8) {
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001618 mci_writeq(host, DATA(host->data_offset),
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001619 host->part_buf);
James Hogan34b664a2011-06-24 13:57:56 +01001620 host->part_buf_count = 0;
1621 }
1622 }
1623#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1624 if (unlikely((unsigned long)buf & 0x7)) {
1625 while (cnt >= 8) {
1626 u64 aligned_buf[16];
1627 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1628 int items = len >> 3;
1629 int i;
1630 /* memcpy from input buffer into aligned buffer */
1631 memcpy(aligned_buf, buf, len);
1632 buf += len;
1633 cnt -= len;
1634 /* push data from aligned buffer into fifo */
1635 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001636 mci_writeq(host, DATA(host->data_offset),
1637 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001638 }
1639 } else
1640#endif
1641 {
1642 u64 *pdata = buf;
1643 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001644 mci_writeq(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001645 buf = pdata;
1646 }
1647 /* put anything remaining in the part_buf */
1648 if (cnt) {
1649 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001650 /* Push data if we have reached the expected data length */
1651 if ((data->bytes_xfered + init_cnt) ==
1652 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001653 mci_writeq(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001654 host->part_buf);
Will Newtonf95f3852011-01-02 01:11:59 -05001655 }
1656}
1657
1658static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1659{
James Hogan34b664a2011-06-24 13:57:56 +01001660#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1661 if (unlikely((unsigned long)buf & 0x7)) {
1662 while (cnt >= 8) {
1663 /* pull data from fifo into aligned buffer */
1664 u64 aligned_buf[16];
1665 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1666 int items = len >> 3;
1667 int i;
1668 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001669 aligned_buf[i] = mci_readq(host,
1670 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001671 /* memcpy from aligned buffer into output buffer */
1672 memcpy(buf, aligned_buf, len);
1673 buf += len;
1674 cnt -= len;
1675 }
1676 } else
1677#endif
1678 {
1679 u64 *pdata = buf;
1680 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001681 *pdata++ = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001682 buf = pdata;
Will Newtonf95f3852011-01-02 01:11:59 -05001683 }
James Hogan34b664a2011-06-24 13:57:56 +01001684 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001685 host->part_buf = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001686 dw_mci_pull_final_bytes(host, buf, cnt);
1687 }
1688}
1689
1690static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1691{
1692 int len;
1693
1694 /* get remaining partial bytes */
1695 len = dw_mci_pull_part_bytes(host, buf, cnt);
1696 if (unlikely(len == cnt))
1697 return;
1698 buf += len;
1699 cnt -= len;
1700
1701 /* get the rest of the data */
1702 host->pull_data(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001703}
1704
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001705static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
Will Newtonf95f3852011-01-02 01:11:59 -05001706{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001707 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1708 void *buf;
1709 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001710 struct mmc_data *data = host->data;
1711 int shift = host->data_shift;
1712 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001713 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001714 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001715
1716 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001717 if (!sg_miter_next(sg_miter))
1718 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001719
Imre Deak4225fc82013-02-27 17:02:57 -08001720 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001721 buf = sg_miter->addr;
1722 remain = sg_miter->length;
1723 offset = 0;
1724
1725 do {
1726 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1727 << shift) + host->part_buf_count;
1728 len = min(remain, fcnt);
1729 if (!len)
1730 break;
1731 dw_mci_pull_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001732 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001733 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001734 remain -= len;
1735 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001736
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001737 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001738 status = mci_readl(host, MINTSTS);
1739 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001740 /* if the RXDR is ready read again */
1741 } while ((status & SDMMC_INT_RXDR) ||
1742 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001743
1744 if (!remain) {
1745 if (!sg_miter_next(sg_miter))
1746 goto done;
1747 sg_miter->consumed = 0;
1748 }
1749 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001750 return;
1751
1752done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001753 sg_miter_stop(sg_miter);
1754 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001755 smp_wmb();
1756 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1757}
1758
1759static void dw_mci_write_data_pio(struct dw_mci *host)
1760{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001761 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1762 void *buf;
1763 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001764 struct mmc_data *data = host->data;
1765 int shift = host->data_shift;
1766 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001767 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001768 unsigned int fifo_depth = host->fifo_depth;
1769 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001770
1771 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001772 if (!sg_miter_next(sg_miter))
1773 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001774
Imre Deak4225fc82013-02-27 17:02:57 -08001775 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001776 buf = sg_miter->addr;
1777 remain = sg_miter->length;
1778 offset = 0;
1779
1780 do {
1781 fcnt = ((fifo_depth -
1782 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1783 << shift) - host->part_buf_count;
1784 len = min(remain, fcnt);
1785 if (!len)
1786 break;
1787 host->push_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001788 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001789 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001790 remain -= len;
1791 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001792
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001793 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001794 status = mci_readl(host, MINTSTS);
1795 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
Will Newtonf95f3852011-01-02 01:11:59 -05001796 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001797
1798 if (!remain) {
1799 if (!sg_miter_next(sg_miter))
1800 goto done;
1801 sg_miter->consumed = 0;
1802 }
1803 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001804 return;
1805
1806done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001807 sg_miter_stop(sg_miter);
1808 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001809 smp_wmb();
1810 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1811}
1812
1813static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1814{
1815 if (!host->cmd_status)
1816 host->cmd_status = status;
1817
1818 smp_wmb();
1819
1820 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1821 tasklet_schedule(&host->tasklet);
1822}
1823
1824static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1825{
1826 struct dw_mci *host = dev_id;
Seungwon Jeon182c9082012-08-01 09:30:30 +09001827 u32 pending;
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301828 int i;
Will Newtonf95f3852011-01-02 01:11:59 -05001829
Markos Chandras1fb5f682013-03-12 10:53:11 +00001830 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1831
Doug Anderson476d79f2013-07-09 13:04:40 -07001832 /*
1833 * DTO fix - version 2.10a and below, and only if internal DMA
1834 * is configured.
1835 */
1836 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1837 if (!pending &&
1838 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1839 pending |= SDMMC_INT_DATA_OVER;
1840 }
1841
Markos Chandras1fb5f682013-03-12 10:53:11 +00001842 if (pending) {
Will Newtonf95f3852011-01-02 01:11:59 -05001843 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1844 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001845 host->cmd_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001846 smp_wmb();
1847 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -05001848 }
1849
1850 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1851 /* if there is an error report DATA_ERROR */
1852 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001853 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001854 smp_wmb();
1855 set_bit(EVENT_DATA_ERROR, &host->pending_events);
Seungwon Jeon9b2026a2012-08-01 09:30:40 +09001856 tasklet_schedule(&host->tasklet);
Will Newtonf95f3852011-01-02 01:11:59 -05001857 }
1858
1859 if (pending & SDMMC_INT_DATA_OVER) {
1860 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1861 if (!host->data_status)
Seungwon Jeon182c9082012-08-01 09:30:30 +09001862 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001863 smp_wmb();
1864 if (host->dir_status == DW_MCI_RECV_STATUS) {
1865 if (host->sg != NULL)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001866 dw_mci_read_data_pio(host, true);
Will Newtonf95f3852011-01-02 01:11:59 -05001867 }
1868 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1869 tasklet_schedule(&host->tasklet);
1870 }
1871
1872 if (pending & SDMMC_INT_RXDR) {
1873 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001874 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001875 dw_mci_read_data_pio(host, false);
Will Newtonf95f3852011-01-02 01:11:59 -05001876 }
1877
1878 if (pending & SDMMC_INT_TXDR) {
1879 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001880 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
Will Newtonf95f3852011-01-02 01:11:59 -05001881 dw_mci_write_data_pio(host);
1882 }
1883
1884 if (pending & SDMMC_INT_CMD_DONE) {
1885 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001886 dw_mci_cmd_interrupt(host, pending);
Will Newtonf95f3852011-01-02 01:11:59 -05001887 }
1888
1889 if (pending & SDMMC_INT_CD) {
1890 mci_writel(host, RINTSTS, SDMMC_INT_CD);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07001891 queue_work(host->card_workqueue, &host->card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001892 }
1893
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301894 /* Handle SDIO Interrupts */
1895 for (i = 0; i < host->num_slots; i++) {
1896 struct dw_mci_slot *slot = host->slot[i];
1897 if (pending & SDMMC_INT_SDIO(i)) {
1898 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1899 mmc_signal_sdio_irq(slot->mmc);
1900 }
1901 }
1902
Markos Chandras1fb5f682013-03-12 10:53:11 +00001903 }
Will Newtonf95f3852011-01-02 01:11:59 -05001904
1905#ifdef CONFIG_MMC_DW_IDMAC
1906 /* Handle DMA interrupts */
1907 pending = mci_readl(host, IDSTS);
1908 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1909 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1910 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
Will Newtonf95f3852011-01-02 01:11:59 -05001911 host->dma_ops->complete(host);
1912 }
1913#endif
1914
1915 return IRQ_HANDLED;
1916}
1917
James Hogan1791b13e2011-06-24 13:55:55 +01001918static void dw_mci_work_routine_card(struct work_struct *work)
Will Newtonf95f3852011-01-02 01:11:59 -05001919{
James Hogan1791b13e2011-06-24 13:55:55 +01001920 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001921 int i;
1922
1923 for (i = 0; i < host->num_slots; i++) {
1924 struct dw_mci_slot *slot = host->slot[i];
1925 struct mmc_host *mmc = slot->mmc;
1926 struct mmc_request *mrq;
1927 int present;
Will Newtonf95f3852011-01-02 01:11:59 -05001928
1929 present = dw_mci_get_cd(mmc);
1930 while (present != slot->last_detect_state) {
Will Newtonf95f3852011-01-02 01:11:59 -05001931 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1932 present ? "inserted" : "removed");
1933
James Hogan1791b13e2011-06-24 13:55:55 +01001934 spin_lock_bh(&host->lock);
1935
Will Newtonf95f3852011-01-02 01:11:59 -05001936 /* Card change detected */
1937 slot->last_detect_state = present;
1938
Will Newtonf95f3852011-01-02 01:11:59 -05001939 /* Clean up queue if present */
1940 mrq = slot->mrq;
1941 if (mrq) {
1942 if (mrq == host->mrq) {
1943 host->data = NULL;
1944 host->cmd = NULL;
1945
1946 switch (host->state) {
1947 case STATE_IDLE:
1948 break;
1949 case STATE_SENDING_CMD:
1950 mrq->cmd->error = -ENOMEDIUM;
1951 if (!mrq->data)
1952 break;
1953 /* fall through */
1954 case STATE_SENDING_DATA:
1955 mrq->data->error = -ENOMEDIUM;
1956 dw_mci_stop_dma(host);
1957 break;
1958 case STATE_DATA_BUSY:
1959 case STATE_DATA_ERROR:
1960 if (mrq->data->error == -EINPROGRESS)
1961 mrq->data->error = -ENOMEDIUM;
Will Newtonf95f3852011-01-02 01:11:59 -05001962 /* fall through */
1963 case STATE_SENDING_STOP:
Seungwon Jeon90c21432013-08-31 00:14:05 +09001964 if (mrq->stop)
1965 mrq->stop->error = -ENOMEDIUM;
Will Newtonf95f3852011-01-02 01:11:59 -05001966 break;
1967 }
1968
1969 dw_mci_request_end(host, mrq);
1970 } else {
1971 list_del(&slot->queue_node);
1972 mrq->cmd->error = -ENOMEDIUM;
1973 if (mrq->data)
1974 mrq->data->error = -ENOMEDIUM;
1975 if (mrq->stop)
1976 mrq->stop->error = -ENOMEDIUM;
1977
1978 spin_unlock(&host->lock);
1979 mmc_request_done(slot->mmc, mrq);
1980 spin_lock(&host->lock);
1981 }
1982 }
1983
1984 /* Power down slot */
1985 if (present == 0) {
Seungwon Jeon31bff452013-08-31 00:14:23 +09001986 /* Clear down the FIFO */
1987 dw_mci_fifo_reset(host);
Will Newtonf95f3852011-01-02 01:11:59 -05001988#ifdef CONFIG_MMC_DW_IDMAC
Seungwon Jeon5ce9d962013-08-31 00:14:33 +09001989 dw_mci_idmac_reset(host);
Will Newtonf95f3852011-01-02 01:11:59 -05001990#endif
1991
1992 }
1993
James Hogan1791b13e2011-06-24 13:55:55 +01001994 spin_unlock_bh(&host->lock);
1995
Will Newtonf95f3852011-01-02 01:11:59 -05001996 present = dw_mci_get_cd(mmc);
1997 }
1998
1999 mmc_detect_change(slot->mmc,
2000 msecs_to_jiffies(host->pdata->detect_delay_ms));
2001 }
2002}
2003
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002004#ifdef CONFIG_OF
2005/* given a slot id, find out the device node representing that slot */
2006static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2007{
2008 struct device_node *np;
2009 const __be32 *addr;
2010 int len;
2011
2012 if (!dev || !dev->of_node)
2013 return NULL;
2014
2015 for_each_child_of_node(dev->of_node, np) {
2016 addr = of_get_property(np, "reg", &len);
2017 if (!addr || (len < sizeof(int)))
2018 continue;
2019 if (be32_to_cpup(addr) == slot)
2020 return np;
2021 }
2022 return NULL;
2023}
2024
Doug Andersona70aaa62013-01-11 17:03:50 +00002025static struct dw_mci_of_slot_quirks {
2026 char *quirk;
2027 int id;
2028} of_slot_quirks[] = {
2029 {
2030 .quirk = "disable-wp",
2031 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2032 },
2033};
2034
2035static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2036{
2037 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2038 int quirks = 0;
2039 int idx;
2040
2041 /* get quirks */
2042 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2043 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2044 quirks |= of_slot_quirks[idx].id;
2045
2046 return quirks;
2047}
2048
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002049/* find out bus-width for a given slot */
2050static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2051{
2052 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2053 u32 bus_wd = 1;
2054
2055 if (!np)
2056 return 1;
2057
2058 if (of_property_read_u32(np, "bus-width", &bus_wd))
2059 dev_err(dev, "bus-width property not found, assuming width"
2060 " as 1\n");
2061 return bus_wd;
2062}
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002063
2064/* find the write protect gpio for a given slot; or -1 if none specified */
2065static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2066{
2067 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2068 int gpio;
2069
2070 if (!np)
2071 return -EINVAL;
2072
2073 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2074
2075 /* Having a missing entry is valid; return silently */
2076 if (!gpio_is_valid(gpio))
2077 return -EINVAL;
2078
2079 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2080 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2081 return -EINVAL;
2082 }
2083
2084 return gpio;
2085}
Zhangfei Gaobf626e52014-01-09 22:35:10 +08002086
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08002087/* find the cd gpio for a given slot */
Zhangfei Gaobf626e52014-01-09 22:35:10 +08002088static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2089 struct mmc_host *mmc)
2090{
2091 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2092 int gpio;
2093
2094 if (!np)
2095 return;
2096
2097 gpio = of_get_named_gpio(np, "cd-gpios", 0);
2098
2099 /* Having a missing entry is valid; return silently */
2100 if (!gpio_is_valid(gpio))
2101 return;
2102
2103 if (mmc_gpio_request_cd(mmc, gpio, 0))
2104 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2105}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002106#else /* CONFIG_OF */
Doug Andersona70aaa62013-01-11 17:03:50 +00002107static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2108{
2109 return 0;
2110}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002111static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2112{
2113 return 1;
2114}
2115static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2116{
2117 return NULL;
2118}
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002119static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2120{
2121 return -EINVAL;
2122}
Zhangfei Gaobf626e52014-01-09 22:35:10 +08002123static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2124 struct mmc_host *mmc)
2125{
2126 return;
2127}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002128#endif /* CONFIG_OF */
2129
Jaehoon Chung36c179a2012-08-23 20:31:48 +09002130static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
Will Newtonf95f3852011-01-02 01:11:59 -05002131{
2132 struct mmc_host *mmc;
2133 struct dw_mci_slot *slot;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002134 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002135 int ctrl_id, ret;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002136 u32 freq[2];
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002137 u8 bus_width;
Will Newtonf95f3852011-01-02 01:11:59 -05002138
Thomas Abraham4a909202012-09-17 18:16:35 +00002139 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
Will Newtonf95f3852011-01-02 01:11:59 -05002140 if (!mmc)
2141 return -ENOMEM;
2142
2143 slot = mmc_priv(mmc);
2144 slot->id = id;
2145 slot->mmc = mmc;
2146 slot->host = host;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002147 host->slot[id] = slot;
Will Newtonf95f3852011-01-02 01:11:59 -05002148
Doug Andersona70aaa62013-01-11 17:03:50 +00002149 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2150
Will Newtonf95f3852011-01-02 01:11:59 -05002151 mmc->ops = &dw_mci_ops;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002152 if (of_property_read_u32_array(host->dev->of_node,
2153 "clock-freq-min-max", freq, 2)) {
2154 mmc->f_min = DW_MCI_FREQ_MIN;
2155 mmc->f_max = DW_MCI_FREQ_MAX;
2156 } else {
2157 mmc->f_min = freq[0];
2158 mmc->f_max = freq[1];
2159 }
Will Newtonf95f3852011-01-02 01:11:59 -05002160
2161 if (host->pdata->get_ocr)
2162 mmc->ocr_avail = host->pdata->get_ocr(id);
2163 else
2164 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2165
2166 /*
2167 * Start with slot power disabled, it will be enabled when a card
2168 * is detected.
2169 */
2170 if (host->pdata->setpower)
2171 host->pdata->setpower(id, 0);
2172
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002173 if (host->pdata->caps)
2174 mmc->caps = host->pdata->caps;
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002175
Abhilash Kesavanab269122012-11-19 10:26:21 +05302176 if (host->pdata->pm_caps)
2177 mmc->pm_caps = host->pdata->pm_caps;
2178
Thomas Abraham800d78b2012-09-17 18:16:42 +00002179 if (host->dev->of_node) {
2180 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2181 if (ctrl_id < 0)
2182 ctrl_id = 0;
2183 } else {
2184 ctrl_id = to_platform_device(host->dev)->id;
2185 }
James Hogancb27a842012-10-16 09:43:08 +01002186 if (drv_data && drv_data->caps)
2187 mmc->caps |= drv_data->caps[ctrl_id];
Thomas Abraham800d78b2012-09-17 18:16:42 +00002188
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002189 if (host->pdata->caps2)
2190 mmc->caps2 = host->pdata->caps2;
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002191
Will Newtonf95f3852011-01-02 01:11:59 -05002192 if (host->pdata->get_bus_wd)
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002193 bus_width = host->pdata->get_bus_wd(slot->id);
2194 else if (host->dev->of_node)
2195 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2196 else
2197 bus_width = 1;
2198
2199 switch (bus_width) {
2200 case 8:
2201 mmc->caps |= MMC_CAP_8_BIT_DATA;
2202 case 4:
2203 mmc->caps |= MMC_CAP_4_BIT_DATA;
2204 }
Will Newtonf95f3852011-01-02 01:11:59 -05002205
Will Newtonf95f3852011-01-02 01:11:59 -05002206 if (host->pdata->blk_settings) {
2207 mmc->max_segs = host->pdata->blk_settings->max_segs;
2208 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2209 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2210 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2211 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2212 } else {
2213 /* Useful defaults if platform data is unset. */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002214#ifdef CONFIG_MMC_DW_IDMAC
2215 mmc->max_segs = host->ring_size;
2216 mmc->max_blk_size = 65536;
2217 mmc->max_blk_count = host->ring_size;
2218 mmc->max_seg_size = 0x1000;
2219 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2220#else
Will Newtonf95f3852011-01-02 01:11:59 -05002221 mmc->max_segs = 64;
2222 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2223 mmc->max_blk_count = 512;
2224 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2225 mmc->max_seg_size = mmc->max_req_size;
Will Newtonf95f3852011-01-02 01:11:59 -05002226#endif /* CONFIG_MMC_DW_IDMAC */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002227 }
Will Newtonf95f3852011-01-02 01:11:59 -05002228
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002229 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
Zhangfei Gaobf626e52014-01-09 22:35:10 +08002230 dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002231
Jaehoon Chung0cea5292013-02-15 23:45:45 +09002232 ret = mmc_add_host(mmc);
2233 if (ret)
2234 goto err_setup_bus;
Will Newtonf95f3852011-01-02 01:11:59 -05002235
2236#if defined(CONFIG_DEBUG_FS)
2237 dw_mci_init_debugfs(slot);
2238#endif
2239
2240 /* Card initially undetected */
2241 slot->last_detect_state = 0;
2242
Will Newtonf95f3852011-01-02 01:11:59 -05002243 return 0;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002244
2245err_setup_bus:
2246 mmc_free_host(mmc);
2247 return -EINVAL;
Will Newtonf95f3852011-01-02 01:11:59 -05002248}
2249
2250static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2251{
2252 /* Shutdown detect IRQ */
2253 if (slot->host->pdata->exit)
2254 slot->host->pdata->exit(id);
2255
2256 /* Debugfs stuff is cleaned up by mmc core */
2257 mmc_remove_host(slot->mmc);
2258 slot->host->slot[id] = NULL;
2259 mmc_free_host(slot->mmc);
2260}
2261
2262static void dw_mci_init_dma(struct dw_mci *host)
2263{
2264 /* Alloc memory for sg translation */
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002265 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
Will Newtonf95f3852011-01-02 01:11:59 -05002266 &host->sg_dma, GFP_KERNEL);
2267 if (!host->sg_cpu) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002268 dev_err(host->dev, "%s: could not alloc DMA memory\n",
Will Newtonf95f3852011-01-02 01:11:59 -05002269 __func__);
2270 goto no_dma;
2271 }
2272
2273 /* Determine which DMA interface to use */
2274#ifdef CONFIG_MMC_DW_IDMAC
2275 host->dma_ops = &dw_mci_idmac_ops;
Seungwon Jeon00956ea2012-09-28 19:13:11 +09002276 dev_info(host->dev, "Using internal DMA controller.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002277#endif
2278
2279 if (!host->dma_ops)
2280 goto no_dma;
2281
Jaehoon Chunge1631f92012-04-18 15:42:31 +09002282 if (host->dma_ops->init && host->dma_ops->start &&
2283 host->dma_ops->stop && host->dma_ops->cleanup) {
Will Newtonf95f3852011-01-02 01:11:59 -05002284 if (host->dma_ops->init(host)) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002285 dev_err(host->dev, "%s: Unable to initialize "
Will Newtonf95f3852011-01-02 01:11:59 -05002286 "DMA Controller.\n", __func__);
2287 goto no_dma;
2288 }
2289 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00002290 dev_err(host->dev, "DMA initialization not found.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002291 goto no_dma;
2292 }
2293
2294 host->use_dma = 1;
2295 return;
2296
2297no_dma:
Thomas Abraham4a909202012-09-17 18:16:35 +00002298 dev_info(host->dev, "Using PIO mode.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002299 host->use_dma = 0;
2300 return;
2301}
2302
Seungwon Jeon31bff452013-08-31 00:14:23 +09002303static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
Will Newtonf95f3852011-01-02 01:11:59 -05002304{
2305 unsigned long timeout = jiffies + msecs_to_jiffies(500);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002306 u32 ctrl;
Will Newtonf95f3852011-01-02 01:11:59 -05002307
Seungwon Jeon31bff452013-08-31 00:14:23 +09002308 ctrl = mci_readl(host, CTRL);
2309 ctrl |= reset;
2310 mci_writel(host, CTRL, ctrl);
Will Newtonf95f3852011-01-02 01:11:59 -05002311
2312 /* wait till resets clear */
2313 do {
2314 ctrl = mci_readl(host, CTRL);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002315 if (!(ctrl & reset))
Will Newtonf95f3852011-01-02 01:11:59 -05002316 return true;
2317 } while (time_before(jiffies, timeout));
2318
Seungwon Jeon31bff452013-08-31 00:14:23 +09002319 dev_err(host->dev,
2320 "Timeout resetting block (ctrl reset %#x)\n",
2321 ctrl & reset);
Will Newtonf95f3852011-01-02 01:11:59 -05002322
2323 return false;
2324}
2325
Seungwon Jeon31bff452013-08-31 00:14:23 +09002326static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2327{
2328 /*
2329 * Reseting generates a block interrupt, hence setting
2330 * the scatter-gather pointer to NULL.
2331 */
2332 if (host->sg) {
2333 sg_miter_stop(&host->sg_miter);
2334 host->sg = NULL;
2335 }
2336
2337 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2338}
2339
2340static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2341{
2342 return dw_mci_ctrl_reset(host,
2343 SDMMC_CTRL_FIFO_RESET |
2344 SDMMC_CTRL_RESET |
2345 SDMMC_CTRL_DMA_RESET);
2346}
2347
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002348#ifdef CONFIG_OF
2349static struct dw_mci_of_quirks {
2350 char *quirk;
2351 int id;
2352} of_quirks[] = {
2353 {
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002354 .quirk = "broken-cd",
2355 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2356 },
2357};
2358
2359static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2360{
2361 struct dw_mci_board *pdata;
2362 struct device *dev = host->dev;
2363 struct device_node *np = dev->of_node;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002364 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002365 int idx, ret;
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002366 u32 clock_frequency;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002367
2368 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2369 if (!pdata) {
2370 dev_err(dev, "could not allocate memory for pdata\n");
2371 return ERR_PTR(-ENOMEM);
2372 }
2373
2374 /* find out number of slots supported */
2375 if (of_property_read_u32(dev->of_node, "num-slots",
2376 &pdata->num_slots)) {
2377 dev_info(dev, "num-slots property not found, "
2378 "assuming 1 slot is available\n");
2379 pdata->num_slots = 1;
2380 }
2381
2382 /* get quirks */
2383 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2384 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2385 pdata->quirks |= of_quirks[idx].id;
2386
2387 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2388 dev_info(dev, "fifo-depth property not found, using "
2389 "value of FIFOTH register as default\n");
2390
2391 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2392
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002393 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2394 pdata->bus_hz = clock_frequency;
2395
James Hogancb27a842012-10-16 09:43:08 +01002396 if (drv_data && drv_data->parse_dt) {
2397 ret = drv_data->parse_dt(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002398 if (ret)
2399 return ERR_PTR(ret);
2400 }
2401
Abhilash Kesavanab269122012-11-19 10:26:21 +05302402 if (of_find_property(np, "keep-power-in-suspend", NULL))
2403 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2404
2405 if (of_find_property(np, "enable-sdio-wakeup", NULL))
2406 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2407
Seungwon Jeon10b49842013-08-31 00:13:22 +09002408 if (of_find_property(np, "supports-highspeed", NULL))
2409 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2410
Seungwon Jeon5dd63f52013-08-31 00:13:09 +09002411 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2412 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2413
2414 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2415 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2416
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08002417 if (of_get_property(np, "cd-inverted", NULL))
2418 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
2419
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002420 return pdata;
2421}
2422
2423#else /* CONFIG_OF */
2424static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2425{
2426 return ERR_PTR(-EINVAL);
2427}
2428#endif /* CONFIG_OF */
2429
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302430int dw_mci_probe(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002431{
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002432 const struct dw_mci_drv_data *drv_data = host->drv_data;
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302433 int width, i, ret = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002434 u32 fifo_size;
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002435 int init_slots = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002436
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002437 if (!host->pdata) {
2438 host->pdata = dw_mci_parse_dt(host);
2439 if (IS_ERR(host->pdata)) {
2440 dev_err(host->dev, "platform data not available\n");
2441 return -EINVAL;
2442 }
Will Newtonf95f3852011-01-02 01:11:59 -05002443 }
2444
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302445 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002446 dev_err(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -05002447 "Platform data must supply select_slot function\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302448 return -ENODEV;
Will Newtonf95f3852011-01-02 01:11:59 -05002449 }
2450
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002451 host->biu_clk = devm_clk_get(host->dev, "biu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002452 if (IS_ERR(host->biu_clk)) {
2453 dev_dbg(host->dev, "biu clock not available\n");
2454 } else {
2455 ret = clk_prepare_enable(host->biu_clk);
2456 if (ret) {
2457 dev_err(host->dev, "failed to enable biu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002458 return ret;
2459 }
Will Newtonf95f3852011-01-02 01:11:59 -05002460 }
2461
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002462 host->ciu_clk = devm_clk_get(host->dev, "ciu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002463 if (IS_ERR(host->ciu_clk)) {
2464 dev_dbg(host->dev, "ciu clock not available\n");
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002465 host->bus_hz = host->pdata->bus_hz;
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002466 } else {
2467 ret = clk_prepare_enable(host->ciu_clk);
2468 if (ret) {
2469 dev_err(host->dev, "failed to enable ciu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002470 goto err_clk_biu;
2471 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002472
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002473 if (host->pdata->bus_hz) {
2474 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2475 if (ret)
2476 dev_warn(host->dev,
2477 "Unable to set bus rate to %ul\n",
2478 host->pdata->bus_hz);
2479 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002480 host->bus_hz = clk_get_rate(host->ciu_clk);
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002481 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002482
Yuvaraj Kumar C D002f0d52013-08-31 00:12:19 +09002483 if (drv_data && drv_data->init) {
2484 ret = drv_data->init(host);
2485 if (ret) {
2486 dev_err(host->dev,
2487 "implementation specific init failed\n");
2488 goto err_clk_ciu;
2489 }
2490 }
2491
James Hogancb27a842012-10-16 09:43:08 +01002492 if (drv_data && drv_data->setup_clock) {
2493 ret = drv_data->setup_clock(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002494 if (ret) {
2495 dev_err(host->dev,
2496 "implementation specific clock setup failed\n");
2497 goto err_clk_ciu;
2498 }
2499 }
2500
Mark Browna55d6ff2013-07-29 21:55:27 +01002501 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
Doug Anderson870556a2013-06-07 10:28:29 -07002502 if (IS_ERR(host->vmmc)) {
2503 ret = PTR_ERR(host->vmmc);
2504 if (ret == -EPROBE_DEFER)
2505 goto err_clk_ciu;
2506
2507 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2508 host->vmmc = NULL;
2509 } else {
2510 ret = regulator_enable(host->vmmc);
2511 if (ret) {
2512 if (ret != -EPROBE_DEFER)
2513 dev_err(host->dev,
2514 "regulator_enable fail: %d\n", ret);
2515 goto err_clk_ciu;
2516 }
2517 }
2518
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002519 if (!host->bus_hz) {
2520 dev_err(host->dev,
2521 "Platform data must supply bus speed\n");
2522 ret = -ENODEV;
Doug Anderson870556a2013-06-07 10:28:29 -07002523 goto err_regulator;
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002524 }
2525
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302526 host->quirks = host->pdata->quirks;
Will Newtonf95f3852011-01-02 01:11:59 -05002527
2528 spin_lock_init(&host->lock);
2529 INIT_LIST_HEAD(&host->queue);
2530
Will Newtonf95f3852011-01-02 01:11:59 -05002531 /*
2532 * Get the host data width - this assumes that HCON has been set with
2533 * the correct values.
2534 */
2535 i = (mci_readl(host, HCON) >> 7) & 0x7;
2536 if (!i) {
2537 host->push_data = dw_mci_push_data16;
2538 host->pull_data = dw_mci_pull_data16;
2539 width = 16;
2540 host->data_shift = 1;
2541 } else if (i == 2) {
2542 host->push_data = dw_mci_push_data64;
2543 host->pull_data = dw_mci_pull_data64;
2544 width = 64;
2545 host->data_shift = 3;
2546 } else {
2547 /* Check for a reserved value, and warn if it is */
2548 WARN((i != 1),
2549 "HCON reports a reserved host data width!\n"
2550 "Defaulting to 32-bit access.\n");
2551 host->push_data = dw_mci_push_data32;
2552 host->pull_data = dw_mci_pull_data32;
2553 width = 32;
2554 host->data_shift = 2;
2555 }
2556
2557 /* Reset all blocks */
Seungwon Jeon31bff452013-08-31 00:14:23 +09002558 if (!dw_mci_ctrl_all_reset(host))
Seungwon Jeon141a7122012-05-22 13:01:03 +09002559 return -ENODEV;
2560
2561 host->dma_ops = host->pdata->dma_ops;
2562 dw_mci_init_dma(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002563
2564 /* Clear the interrupts for the host controller */
2565 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2566 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2567
2568 /* Put in max timeout */
2569 mci_writel(host, TMOUT, 0xFFFFFFFF);
2570
2571 /*
2572 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2573 * Tx Mark = fifo_size / 2 DMA Size = 8
2574 */
James Hoganb86d8252011-06-24 13:57:18 +01002575 if (!host->pdata->fifo_depth) {
2576 /*
2577 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2578 * have been overwritten by the bootloader, just like we're
2579 * about to do, so if you know the value for your hardware, you
2580 * should put it in the platform data.
2581 */
2582 fifo_size = mci_readl(host, FIFOTH);
Jaehoon Chung8234e862012-01-11 09:28:21 +00002583 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
James Hoganb86d8252011-06-24 13:57:18 +01002584 } else {
2585 fifo_size = host->pdata->fifo_depth;
2586 }
2587 host->fifo_depth = fifo_size;
Seungwon Jeon52426892013-08-31 00:13:42 +09002588 host->fifoth_val =
2589 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002590 mci_writel(host, FIFOTH, host->fifoth_val);
Will Newtonf95f3852011-01-02 01:11:59 -05002591
2592 /* disable clock to CIU */
2593 mci_writel(host, CLKENA, 0);
2594 mci_writel(host, CLKSRC, 0);
2595
James Hogan63008762013-03-12 10:43:54 +00002596 /*
2597 * In 2.40a spec, Data offset is changed.
2598 * Need to check the version-id and set data-offset for DATA register.
2599 */
2600 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2601 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2602
2603 if (host->verid < DW_MMC_240A)
2604 host->data_offset = DATA_OFFSET;
2605 else
2606 host->data_offset = DATA_240A_OFFSET;
2607
Will Newtonf95f3852011-01-02 01:11:59 -05002608 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002609 host->card_workqueue = alloc_workqueue("dw-mci-card",
James Hogan1791b13e2011-06-24 13:55:55 +01002610 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
Wei Yongjunef7aef92013-04-19 09:25:45 +08002611 if (!host->card_workqueue) {
2612 ret = -ENOMEM;
James Hogan1791b13e2011-06-24 13:55:55 +01002613 goto err_dmaunmap;
Wei Yongjunef7aef92013-04-19 09:25:45 +08002614 }
James Hogan1791b13e2011-06-24 13:55:55 +01002615 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002616 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2617 host->irq_flags, "dw-mci", host);
Will Newtonf95f3852011-01-02 01:11:59 -05002618 if (ret)
James Hogan1791b13e2011-06-24 13:55:55 +01002619 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002620
Will Newtonf95f3852011-01-02 01:11:59 -05002621 if (host->pdata->num_slots)
2622 host->num_slots = host->pdata->num_slots;
2623 else
2624 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2625
Yuvaraj CD2da1d7f2012-10-08 14:29:51 +05302626 /*
2627 * Enable interrupts for command done, data over, data empty, card det,
2628 * receive ready and error such as transmit, receive timeout, crc error
2629 */
2630 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2631 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2632 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2633 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2634 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2635
2636 dev_info(host->dev, "DW MMC controller at irq %d, "
2637 "%d bit host data width, "
2638 "%u deep fifo\n",
2639 host->irq, width, fifo_size);
2640
Will Newtonf95f3852011-01-02 01:11:59 -05002641 /* We need at least one slot to succeed */
2642 for (i = 0; i < host->num_slots; i++) {
2643 ret = dw_mci_init_slot(host, i);
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002644 if (ret)
2645 dev_dbg(host->dev, "slot %d init failed\n", i);
2646 else
2647 init_slots++;
2648 }
2649
2650 if (init_slots) {
2651 dev_info(host->dev, "%d slots initialized\n", init_slots);
2652 } else {
2653 dev_dbg(host->dev, "attempted to initialize %d slots, "
2654 "but failed on all\n", host->num_slots);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002655 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002656 }
2657
Will Newtonf95f3852011-01-02 01:11:59 -05002658 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
Thomas Abraham4a909202012-09-17 18:16:35 +00002659 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002660
2661 return 0;
2662
James Hogan1791b13e2011-06-24 13:55:55 +01002663err_workqueue:
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002664 destroy_workqueue(host->card_workqueue);
James Hogan1791b13e2011-06-24 13:55:55 +01002665
Will Newtonf95f3852011-01-02 01:11:59 -05002666err_dmaunmap:
2667 if (host->use_dma && host->dma_ops->exit)
2668 host->dma_ops->exit(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002669
Doug Anderson870556a2013-06-07 10:28:29 -07002670err_regulator:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002671 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002672 regulator_disable(host->vmmc);
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002673
2674err_clk_ciu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002675 if (!IS_ERR(host->ciu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002676 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002677
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002678err_clk_biu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002679 if (!IS_ERR(host->biu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002680 clk_disable_unprepare(host->biu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002681
Will Newtonf95f3852011-01-02 01:11:59 -05002682 return ret;
2683}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302684EXPORT_SYMBOL(dw_mci_probe);
Will Newtonf95f3852011-01-02 01:11:59 -05002685
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302686void dw_mci_remove(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002687{
Will Newtonf95f3852011-01-02 01:11:59 -05002688 int i;
2689
2690 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2691 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2692
Will Newtonf95f3852011-01-02 01:11:59 -05002693 for (i = 0; i < host->num_slots; i++) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002694 dev_dbg(host->dev, "remove slot %d\n", i);
Will Newtonf95f3852011-01-02 01:11:59 -05002695 if (host->slot[i])
2696 dw_mci_cleanup_slot(host->slot[i], i);
2697 }
2698
2699 /* disable clock to CIU */
2700 mci_writel(host, CLKENA, 0);
2701 mci_writel(host, CLKSRC, 0);
2702
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002703 destroy_workqueue(host->card_workqueue);
Will Newtonf95f3852011-01-02 01:11:59 -05002704
2705 if (host->use_dma && host->dma_ops->exit)
2706 host->dma_ops->exit(host);
2707
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002708 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002709 regulator_disable(host->vmmc);
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002710
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002711 if (!IS_ERR(host->ciu_clk))
2712 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002713
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002714 if (!IS_ERR(host->biu_clk))
2715 clk_disable_unprepare(host->biu_clk);
Will Newtonf95f3852011-01-02 01:11:59 -05002716}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302717EXPORT_SYMBOL(dw_mci_remove);
2718
2719
Will Newtonf95f3852011-01-02 01:11:59 -05002720
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002721#ifdef CONFIG_PM_SLEEP
Will Newtonf95f3852011-01-02 01:11:59 -05002722/*
2723 * TODO: we should probably disable the clock to the card in the suspend path.
2724 */
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302725int dw_mci_suspend(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002726{
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002727 if (host->vmmc)
2728 regulator_disable(host->vmmc);
2729
Will Newtonf95f3852011-01-02 01:11:59 -05002730 return 0;
2731}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302732EXPORT_SYMBOL(dw_mci_suspend);
Will Newtonf95f3852011-01-02 01:11:59 -05002733
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302734int dw_mci_resume(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002735{
2736 int i, ret;
Will Newtonf95f3852011-01-02 01:11:59 -05002737
Sachin Kamatf2f942c2013-04-04 11:25:10 +05302738 if (host->vmmc) {
2739 ret = regulator_enable(host->vmmc);
2740 if (ret) {
2741 dev_err(host->dev,
2742 "failed to enable regulator: %d\n", ret);
2743 return ret;
2744 }
2745 }
Jaehoon Chung1d6c4e02011-05-11 15:52:39 +09002746
Seungwon Jeon31bff452013-08-31 00:14:23 +09002747 if (!dw_mci_ctrl_all_reset(host)) {
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002748 ret = -ENODEV;
2749 return ret;
2750 }
2751
Jonathan Kliegman3bfe6192012-06-14 13:31:55 -04002752 if (host->use_dma && host->dma_ops->init)
Seungwon Jeon141a7122012-05-22 13:01:03 +09002753 host->dma_ops->init(host);
2754
Seungwon Jeon52426892013-08-31 00:13:42 +09002755 /*
2756 * Restore the initial value at FIFOTH register
2757 * And Invalidate the prev_blksz with zero
2758 */
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002759 mci_writel(host, FIFOTH, host->fifoth_val);
Seungwon Jeon52426892013-08-31 00:13:42 +09002760 host->prev_blksz = 0;
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002761
Doug Anderson2eb29442013-08-31 00:11:49 +09002762 /* Put in max timeout */
2763 mci_writel(host, TMOUT, 0xFFFFFFFF);
2764
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002765 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2766 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2767 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2768 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2769 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2770
Will Newtonf95f3852011-01-02 01:11:59 -05002771 for (i = 0; i < host->num_slots; i++) {
2772 struct dw_mci_slot *slot = host->slot[i];
2773 if (!slot)
2774 continue;
Abhilash Kesavanab269122012-11-19 10:26:21 +05302775 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2776 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2777 dw_mci_setup_bus(slot, true);
2778 }
Will Newtonf95f3852011-01-02 01:11:59 -05002779 }
Will Newtonf95f3852011-01-02 01:11:59 -05002780 return 0;
2781}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302782EXPORT_SYMBOL(dw_mci_resume);
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002783#endif /* CONFIG_PM_SLEEP */
2784
Will Newtonf95f3852011-01-02 01:11:59 -05002785static int __init dw_mci_init(void)
2786{
Sachin Kamat8e1c4e42013-04-04 11:25:11 +05302787 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302788 return 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002789}
2790
2791static void __exit dw_mci_exit(void)
2792{
Will Newtonf95f3852011-01-02 01:11:59 -05002793}
2794
2795module_init(dw_mci_init);
2796module_exit(dw_mci_exit);
2797
2798MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2799MODULE_AUTHOR("NXP Semiconductor VietNam");
2800MODULE_AUTHOR("Imagination Technologies Ltd");
2801MODULE_LICENSE("GPL v2");