blob: 4bce0deec362c02ab8b5c99cf074ab0b7d669d6c [file] [log] [blame]
Will Newtonf95f3852011-01-02 01:11:59 -05001/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
Will Newtonf95f3852011-01-02 01:11:59 -050025#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
Seungwon Jeon90c21432013-08-31 00:14:05 +090032#include <linux/mmc/sdio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050033#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
Jaehoon Chungc07946a2011-02-25 11:08:14 +090035#include <linux/regulator/consumer.h>
James Hogan1791b13e2011-06-24 13:55:55 +010036#include <linux/workqueue.h>
Thomas Abrahamc91eab42012-09-17 18:16:40 +000037#include <linux/of.h>
Doug Anderson55a6ceb2013-01-11 17:03:53 +000038#include <linux/of_gpio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050039
40#include "dw_mmc.h"
41
42/* Common flag combinations */
Jaehoon Chung3f7eec62013-05-27 13:47:57 +090043#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
Will Newtonf95f3852011-01-02 01:11:59 -050044 SDMMC_INT_HTO | SDMMC_INT_SBE | \
45 SDMMC_INT_EBE)
46#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
47 SDMMC_INT_RESP_ERR)
48#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
49 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
50#define DW_MCI_SEND_STATUS 1
51#define DW_MCI_RECV_STATUS 2
52#define DW_MCI_DMA_THRESHOLD 16
53
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +090054#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
55#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
56
Will Newtonf95f3852011-01-02 01:11:59 -050057#ifdef CONFIG_MMC_DW_IDMAC
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +090058#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
59 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
60 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
61 SDMMC_IDMAC_INT_TI)
62
Will Newtonf95f3852011-01-02 01:11:59 -050063struct idmac_desc {
64 u32 des0; /* Control Descriptor */
65#define IDMAC_DES0_DIC BIT(1)
66#define IDMAC_DES0_LD BIT(2)
67#define IDMAC_DES0_FD BIT(3)
68#define IDMAC_DES0_CH BIT(4)
69#define IDMAC_DES0_ER BIT(5)
70#define IDMAC_DES0_CES BIT(30)
71#define IDMAC_DES0_OWN BIT(31)
72
73 u32 des1; /* Buffer sizes */
74#define IDMAC_SET_BUFFER1_SIZE(d, s) \
Shashidhar Hiremath9b7bbe12011-07-29 08:49:50 -040075 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
Will Newtonf95f3852011-01-02 01:11:59 -050076
77 u32 des2; /* buffer 1 physical address */
78
79 u32 des3; /* buffer 2 physical address */
80};
81#endif /* CONFIG_MMC_DW_IDMAC */
82
Seungwon Jeon0976f162013-08-31 00:12:42 +090083static const u8 tuning_blk_pattern_4bit[] = {
84 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
85 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
86 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
87 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
88 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
89 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
90 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
91 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
92};
Will Newtonf95f3852011-01-02 01:11:59 -050093
Seungwon Jeon0976f162013-08-31 00:12:42 +090094static const u8 tuning_blk_pattern_8bit[] = {
95 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
96 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
97 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
98 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
99 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
100 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
101 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
102 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
103 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
104 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
105 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
106 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
107 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
108 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
109 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
110 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
Will Newtonf95f3852011-01-02 01:11:59 -0500111};
112
Seungwon Jeon31bff452013-08-31 00:14:23 +0900113static inline bool dw_mci_fifo_reset(struct dw_mci *host);
114static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
115
Will Newtonf95f3852011-01-02 01:11:59 -0500116#if defined(CONFIG_DEBUG_FS)
117static int dw_mci_req_show(struct seq_file *s, void *v)
118{
119 struct dw_mci_slot *slot = s->private;
120 struct mmc_request *mrq;
121 struct mmc_command *cmd;
122 struct mmc_command *stop;
123 struct mmc_data *data;
124
125 /* Make sure we get a consistent snapshot */
126 spin_lock_bh(&slot->host->lock);
127 mrq = slot->mrq;
128
129 if (mrq) {
130 cmd = mrq->cmd;
131 data = mrq->data;
132 stop = mrq->stop;
133
134 if (cmd)
135 seq_printf(s,
136 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
137 cmd->opcode, cmd->arg, cmd->flags,
138 cmd->resp[0], cmd->resp[1], cmd->resp[2],
139 cmd->resp[2], cmd->error);
140 if (data)
141 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
142 data->bytes_xfered, data->blocks,
143 data->blksz, data->flags, data->error);
144 if (stop)
145 seq_printf(s,
146 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
147 stop->opcode, stop->arg, stop->flags,
148 stop->resp[0], stop->resp[1], stop->resp[2],
149 stop->resp[2], stop->error);
150 }
151
152 spin_unlock_bh(&slot->host->lock);
153
154 return 0;
155}
156
157static int dw_mci_req_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, dw_mci_req_show, inode->i_private);
160}
161
162static const struct file_operations dw_mci_req_fops = {
163 .owner = THIS_MODULE,
164 .open = dw_mci_req_open,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = single_release,
168};
169
170static int dw_mci_regs_show(struct seq_file *s, void *v)
171{
172 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
173 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
174 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
175 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
176 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
177 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
178
179 return 0;
180}
181
182static int dw_mci_regs_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, dw_mci_regs_show, inode->i_private);
185}
186
187static const struct file_operations dw_mci_regs_fops = {
188 .owner = THIS_MODULE,
189 .open = dw_mci_regs_open,
190 .read = seq_read,
191 .llseek = seq_lseek,
192 .release = single_release,
193};
194
195static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
196{
197 struct mmc_host *mmc = slot->mmc;
198 struct dw_mci *host = slot->host;
199 struct dentry *root;
200 struct dentry *node;
201
202 root = mmc->debugfs_root;
203 if (!root)
204 return;
205
206 node = debugfs_create_file("regs", S_IRUSR, root, host,
207 &dw_mci_regs_fops);
208 if (!node)
209 goto err;
210
211 node = debugfs_create_file("req", S_IRUSR, root, slot,
212 &dw_mci_req_fops);
213 if (!node)
214 goto err;
215
216 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
217 if (!node)
218 goto err;
219
220 node = debugfs_create_x32("pending_events", S_IRUSR, root,
221 (u32 *)&host->pending_events);
222 if (!node)
223 goto err;
224
225 node = debugfs_create_x32("completed_events", S_IRUSR, root,
226 (u32 *)&host->completed_events);
227 if (!node)
228 goto err;
229
230 return;
231
232err:
233 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
234}
235#endif /* defined(CONFIG_DEBUG_FS) */
236
237static void dw_mci_set_timeout(struct dw_mci *host)
238{
239 /* timeout (maximum) */
240 mci_writel(host, TMOUT, 0xffffffff);
241}
242
243static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
244{
245 struct mmc_data *data;
Thomas Abraham800d78b2012-09-17 18:16:42 +0000246 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000247 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Will Newtonf95f3852011-01-02 01:11:59 -0500248 u32 cmdr;
249 cmd->error = -EINPROGRESS;
250
251 cmdr = cmd->opcode;
252
Seungwon Jeon90c21432013-08-31 00:14:05 +0900253 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
254 cmd->opcode == MMC_GO_IDLE_STATE ||
255 cmd->opcode == MMC_GO_INACTIVE_STATE ||
256 (cmd->opcode == SD_IO_RW_DIRECT &&
257 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
Will Newtonf95f3852011-01-02 01:11:59 -0500258 cmdr |= SDMMC_CMD_STOP;
259 else
Seungwon Jeon90c21432013-08-31 00:14:05 +0900260 if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
261 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500262
263 if (cmd->flags & MMC_RSP_PRESENT) {
264 /* We expect a response, so set this bit */
265 cmdr |= SDMMC_CMD_RESP_EXP;
266 if (cmd->flags & MMC_RSP_136)
267 cmdr |= SDMMC_CMD_RESP_LONG;
268 }
269
270 if (cmd->flags & MMC_RSP_CRC)
271 cmdr |= SDMMC_CMD_RESP_CRC;
272
273 data = cmd->data;
274 if (data) {
275 cmdr |= SDMMC_CMD_DAT_EXP;
276 if (data->flags & MMC_DATA_STREAM)
277 cmdr |= SDMMC_CMD_STRM_MODE;
278 if (data->flags & MMC_DATA_WRITE)
279 cmdr |= SDMMC_CMD_DAT_WR;
280 }
281
James Hogancb27a842012-10-16 09:43:08 +0100282 if (drv_data && drv_data->prepare_command)
283 drv_data->prepare_command(slot->host, &cmdr);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000284
Will Newtonf95f3852011-01-02 01:11:59 -0500285 return cmdr;
286}
287
Seungwon Jeon90c21432013-08-31 00:14:05 +0900288static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
289{
290 struct mmc_command *stop;
291 u32 cmdr;
292
293 if (!cmd->data)
294 return 0;
295
296 stop = &host->stop_abort;
297 cmdr = cmd->opcode;
298 memset(stop, 0, sizeof(struct mmc_command));
299
300 if (cmdr == MMC_READ_SINGLE_BLOCK ||
301 cmdr == MMC_READ_MULTIPLE_BLOCK ||
302 cmdr == MMC_WRITE_BLOCK ||
303 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
304 stop->opcode = MMC_STOP_TRANSMISSION;
305 stop->arg = 0;
306 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
307 } else if (cmdr == SD_IO_RW_EXTENDED) {
308 stop->opcode = SD_IO_RW_DIRECT;
309 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
310 ((cmd->arg >> 28) & 0x7);
311 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
312 } else {
313 return 0;
314 }
315
316 cmdr = stop->opcode | SDMMC_CMD_STOP |
317 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
318
319 return cmdr;
320}
321
Will Newtonf95f3852011-01-02 01:11:59 -0500322static void dw_mci_start_command(struct dw_mci *host,
323 struct mmc_command *cmd, u32 cmd_flags)
324{
325 host->cmd = cmd;
Thomas Abraham4a909202012-09-17 18:16:35 +0000326 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500327 "start command: ARGR=0x%08x CMDR=0x%08x\n",
328 cmd->arg, cmd_flags);
329
330 mci_writel(host, CMDARG, cmd->arg);
331 wmb();
332
333 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
334}
335
Seungwon Jeon90c21432013-08-31 00:14:05 +0900336static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
Will Newtonf95f3852011-01-02 01:11:59 -0500337{
Seungwon Jeon90c21432013-08-31 00:14:05 +0900338 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
339 dw_mci_start_command(host, stop, host->stop_cmdr);
Will Newtonf95f3852011-01-02 01:11:59 -0500340}
341
342/* DMA interface functions */
343static void dw_mci_stop_dma(struct dw_mci *host)
344{
James Hogan03e8cb52011-06-29 09:28:43 +0100345 if (host->using_dma) {
Will Newtonf95f3852011-01-02 01:11:59 -0500346 host->dma_ops->stop(host);
347 host->dma_ops->cleanup(host);
Will Newtonf95f3852011-01-02 01:11:59 -0500348 }
Seungwon Jeonaa50f252013-08-31 00:14:38 +0900349
350 /* Data transfer was stopped by the interrupt handler */
351 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -0500352}
353
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900354static int dw_mci_get_dma_dir(struct mmc_data *data)
355{
356 if (data->flags & MMC_DATA_WRITE)
357 return DMA_TO_DEVICE;
358 else
359 return DMA_FROM_DEVICE;
360}
361
Jaehoon Chung9beee912012-02-16 11:19:38 +0900362#ifdef CONFIG_MMC_DW_IDMAC
Will Newtonf95f3852011-01-02 01:11:59 -0500363static void dw_mci_dma_cleanup(struct dw_mci *host)
364{
365 struct mmc_data *data = host->data;
366
367 if (data)
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900368 if (!data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000369 dma_unmap_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900370 data->sg,
371 data->sg_len,
372 dw_mci_get_dma_dir(data));
Will Newtonf95f3852011-01-02 01:11:59 -0500373}
374
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900375static void dw_mci_idmac_reset(struct dw_mci *host)
376{
377 u32 bmod = mci_readl(host, BMOD);
378 /* Software reset of DMA */
379 bmod |= SDMMC_IDMAC_SWRESET;
380 mci_writel(host, BMOD, bmod);
381}
382
Will Newtonf95f3852011-01-02 01:11:59 -0500383static void dw_mci_idmac_stop_dma(struct dw_mci *host)
384{
385 u32 temp;
386
387 /* Disable and reset the IDMAC interface */
388 temp = mci_readl(host, CTRL);
389 temp &= ~SDMMC_CTRL_USE_IDMAC;
390 temp |= SDMMC_CTRL_DMA_RESET;
391 mci_writel(host, CTRL, temp);
392
393 /* Stop the IDMAC running */
394 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900395 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900396 temp |= SDMMC_IDMAC_SWRESET;
Will Newtonf95f3852011-01-02 01:11:59 -0500397 mci_writel(host, BMOD, temp);
398}
399
400static void dw_mci_idmac_complete_dma(struct dw_mci *host)
401{
402 struct mmc_data *data = host->data;
403
Thomas Abraham4a909202012-09-17 18:16:35 +0000404 dev_vdbg(host->dev, "DMA complete\n");
Will Newtonf95f3852011-01-02 01:11:59 -0500405
406 host->dma_ops->cleanup(host);
407
408 /*
409 * If the card was removed, data will be NULL. No point in trying to
410 * send the stop command or waiting for NBUSY in this case.
411 */
412 if (data) {
413 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
414 tasklet_schedule(&host->tasklet);
415 }
416}
417
418static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
419 unsigned int sg_len)
420{
421 int i;
422 struct idmac_desc *desc = host->sg_cpu;
423
424 for (i = 0; i < sg_len; i++, desc++) {
425 unsigned int length = sg_dma_len(&data->sg[i]);
426 u32 mem_addr = sg_dma_address(&data->sg[i]);
427
428 /* Set the OWN bit and disable interrupts for this descriptor */
429 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
430
431 /* Buffer length */
432 IDMAC_SET_BUFFER1_SIZE(desc, length);
433
434 /* Physical address to DMA to/from */
435 desc->des2 = mem_addr;
436 }
437
438 /* Set first descriptor */
439 desc = host->sg_cpu;
440 desc->des0 |= IDMAC_DES0_FD;
441
442 /* Set last descriptor */
443 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
444 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
445 desc->des0 |= IDMAC_DES0_LD;
446
447 wmb();
448}
449
450static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
451{
452 u32 temp;
453
454 dw_mci_translate_sglist(host, host->data, sg_len);
455
456 /* Select IDMAC interface */
457 temp = mci_readl(host, CTRL);
458 temp |= SDMMC_CTRL_USE_IDMAC;
459 mci_writel(host, CTRL, temp);
460
461 wmb();
462
463 /* Enable the IDMAC */
464 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900465 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
Will Newtonf95f3852011-01-02 01:11:59 -0500466 mci_writel(host, BMOD, temp);
467
468 /* Start it running */
469 mci_writel(host, PLDMND, 1);
470}
471
472static int dw_mci_idmac_init(struct dw_mci *host)
473{
474 struct idmac_desc *p;
Seungwon Jeon897b69e2012-09-19 13:58:31 +0800475 int i;
Will Newtonf95f3852011-01-02 01:11:59 -0500476
477 /* Number of descriptors in the ring buffer */
478 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
479
480 /* Forward link the descriptor list */
481 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
482 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
483
484 /* Set the last descriptor as the end-of-ring descriptor */
485 p->des3 = host->sg_dma;
486 p->des0 = IDMAC_DES0_ER;
487
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900488 dw_mci_idmac_reset(host);
Seungwon Jeon141a7122012-05-22 13:01:03 +0900489
Will Newtonf95f3852011-01-02 01:11:59 -0500490 /* Mask out interrupts - get Tx & Rx complete only */
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +0900491 mci_writel(host, IDSTS, IDMAC_INT_CLR);
Will Newtonf95f3852011-01-02 01:11:59 -0500492 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
493 SDMMC_IDMAC_INT_TI);
494
495 /* Set the descriptor base address */
496 mci_writel(host, DBADDR, host->sg_dma);
497 return 0;
498}
499
Arnd Bergmann8e2b36e2012-11-06 22:55:31 +0100500static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
Seungwon Jeon885c3e82012-02-20 11:01:43 +0900501 .init = dw_mci_idmac_init,
502 .start = dw_mci_idmac_start_dma,
503 .stop = dw_mci_idmac_stop_dma,
504 .complete = dw_mci_idmac_complete_dma,
505 .cleanup = dw_mci_dma_cleanup,
506};
507#endif /* CONFIG_MMC_DW_IDMAC */
508
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900509static int dw_mci_pre_dma_transfer(struct dw_mci *host,
510 struct mmc_data *data,
511 bool next)
Will Newtonf95f3852011-01-02 01:11:59 -0500512{
513 struct scatterlist *sg;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900514 unsigned int i, sg_len;
Will Newtonf95f3852011-01-02 01:11:59 -0500515
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900516 if (!next && data->host_cookie)
517 return data->host_cookie;
Will Newtonf95f3852011-01-02 01:11:59 -0500518
519 /*
520 * We don't do DMA on "complex" transfers, i.e. with
521 * non-word-aligned buffers or lengths. Also, we don't bother
522 * with all the DMA setup overhead for short transfers.
523 */
524 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
525 return -EINVAL;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900526
Will Newtonf95f3852011-01-02 01:11:59 -0500527 if (data->blksz & 3)
528 return -EINVAL;
529
530 for_each_sg(data->sg, sg, data->sg_len, i) {
531 if (sg->offset & 3 || sg->length & 3)
532 return -EINVAL;
533 }
534
Thomas Abraham4a909202012-09-17 18:16:35 +0000535 sg_len = dma_map_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900536 data->sg,
537 data->sg_len,
538 dw_mci_get_dma_dir(data));
539 if (sg_len == 0)
540 return -EINVAL;
541
542 if (next)
543 data->host_cookie = sg_len;
544
545 return sg_len;
546}
547
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900548static void dw_mci_pre_req(struct mmc_host *mmc,
549 struct mmc_request *mrq,
550 bool is_first_req)
551{
552 struct dw_mci_slot *slot = mmc_priv(mmc);
553 struct mmc_data *data = mrq->data;
554
555 if (!slot->host->use_dma || !data)
556 return;
557
558 if (data->host_cookie) {
559 data->host_cookie = 0;
560 return;
561 }
562
563 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
564 data->host_cookie = 0;
565}
566
567static void dw_mci_post_req(struct mmc_host *mmc,
568 struct mmc_request *mrq,
569 int err)
570{
571 struct dw_mci_slot *slot = mmc_priv(mmc);
572 struct mmc_data *data = mrq->data;
573
574 if (!slot->host->use_dma || !data)
575 return;
576
577 if (data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000578 dma_unmap_sg(slot->host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900579 data->sg,
580 data->sg_len,
581 dw_mci_get_dma_dir(data));
582 data->host_cookie = 0;
583}
584
Seungwon Jeon52426892013-08-31 00:13:42 +0900585static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
586{
587#ifdef CONFIG_MMC_DW_IDMAC
588 unsigned int blksz = data->blksz;
589 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
590 u32 fifo_width = 1 << host->data_shift;
591 u32 blksz_depth = blksz / fifo_width, fifoth_val;
592 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
593 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
594
595 tx_wmark = (host->fifo_depth) / 2;
596 tx_wmark_invers = host->fifo_depth - tx_wmark;
597
598 /*
599 * MSIZE is '1',
600 * if blksz is not a multiple of the FIFO width
601 */
602 if (blksz % fifo_width) {
603 msize = 0;
604 rx_wmark = 1;
605 goto done;
606 }
607
608 do {
609 if (!((blksz_depth % mszs[idx]) ||
610 (tx_wmark_invers % mszs[idx]))) {
611 msize = idx;
612 rx_wmark = mszs[idx] - 1;
613 break;
614 }
615 } while (--idx > 0);
616 /*
617 * If idx is '0', it won't be tried
618 * Thus, initial values are uesed
619 */
620done:
621 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
622 mci_writel(host, FIFOTH, fifoth_val);
623#endif
624}
625
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900626static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
627{
628 unsigned int blksz = data->blksz;
629 u32 blksz_depth, fifo_depth;
630 u16 thld_size;
631
632 WARN_ON(!(data->flags & MMC_DATA_READ));
633
634 if (host->timing != MMC_TIMING_MMC_HS200 &&
635 host->timing != MMC_TIMING_UHS_SDR104)
636 goto disable;
637
638 blksz_depth = blksz / (1 << host->data_shift);
639 fifo_depth = host->fifo_depth;
640
641 if (blksz_depth > fifo_depth)
642 goto disable;
643
644 /*
645 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
646 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
647 * Currently just choose blksz.
648 */
649 thld_size = blksz;
650 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
651 return;
652
653disable:
654 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
655}
656
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900657static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
658{
659 int sg_len;
660 u32 temp;
661
662 host->using_dma = 0;
663
664 /* If we don't have a channel, we can't do DMA */
665 if (!host->use_dma)
666 return -ENODEV;
667
668 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900669 if (sg_len < 0) {
670 host->dma_ops->stop(host);
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900671 return sg_len;
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900672 }
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900673
James Hogan03e8cb52011-06-29 09:28:43 +0100674 host->using_dma = 1;
675
Thomas Abraham4a909202012-09-17 18:16:35 +0000676 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500677 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
678 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
679 sg_len);
680
Seungwon Jeon52426892013-08-31 00:13:42 +0900681 /*
682 * Decide the MSIZE and RX/TX Watermark.
683 * If current block size is same with previous size,
684 * no need to update fifoth.
685 */
686 if (host->prev_blksz != data->blksz)
687 dw_mci_adjust_fifoth(host, data);
688
Will Newtonf95f3852011-01-02 01:11:59 -0500689 /* Enable the DMA interface */
690 temp = mci_readl(host, CTRL);
691 temp |= SDMMC_CTRL_DMA_ENABLE;
692 mci_writel(host, CTRL, temp);
693
694 /* Disable RX/TX IRQs, let DMA handle it */
695 temp = mci_readl(host, INTMASK);
696 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
697 mci_writel(host, INTMASK, temp);
698
699 host->dma_ops->start(host, sg_len);
700
701 return 0;
702}
703
704static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
705{
706 u32 temp;
707
708 data->error = -EINPROGRESS;
709
710 WARN_ON(host->data);
711 host->sg = NULL;
712 host->data = data;
713
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900714 if (data->flags & MMC_DATA_READ) {
James Hogan55c5efbc2011-06-29 09:29:58 +0100715 host->dir_status = DW_MCI_RECV_STATUS;
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900716 dw_mci_ctrl_rd_thld(host, data);
717 } else {
James Hogan55c5efbc2011-06-29 09:29:58 +0100718 host->dir_status = DW_MCI_SEND_STATUS;
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900719 }
James Hogan55c5efbc2011-06-29 09:29:58 +0100720
Will Newtonf95f3852011-01-02 01:11:59 -0500721 if (dw_mci_submit_data_dma(host, data)) {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +0900722 int flags = SG_MITER_ATOMIC;
723 if (host->data->flags & MMC_DATA_READ)
724 flags |= SG_MITER_TO_SG;
725 else
726 flags |= SG_MITER_FROM_SG;
727
728 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
Will Newtonf95f3852011-01-02 01:11:59 -0500729 host->sg = data->sg;
James Hogan34b664a2011-06-24 13:57:56 +0100730 host->part_buf_start = 0;
731 host->part_buf_count = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500732
James Hoganb40af3a2011-06-24 13:54:06 +0100733 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
Will Newtonf95f3852011-01-02 01:11:59 -0500734 temp = mci_readl(host, INTMASK);
735 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
736 mci_writel(host, INTMASK, temp);
737
738 temp = mci_readl(host, CTRL);
739 temp &= ~SDMMC_CTRL_DMA_ENABLE;
740 mci_writel(host, CTRL, temp);
Seungwon Jeon52426892013-08-31 00:13:42 +0900741
742 /*
743 * Use the initial fifoth_val for PIO mode.
744 * If next issued data may be transfered by DMA mode,
745 * prev_blksz should be invalidated.
746 */
747 mci_writel(host, FIFOTH, host->fifoth_val);
748 host->prev_blksz = 0;
749 } else {
750 /*
751 * Keep the current block size.
752 * It will be used to decide whether to update
753 * fifoth register next time.
754 */
755 host->prev_blksz = data->blksz;
Will Newtonf95f3852011-01-02 01:11:59 -0500756 }
757}
758
759static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
760{
761 struct dw_mci *host = slot->host;
762 unsigned long timeout = jiffies + msecs_to_jiffies(500);
763 unsigned int cmd_status = 0;
764
765 mci_writel(host, CMDARG, arg);
766 wmb();
767 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
768
769 while (time_before(jiffies, timeout)) {
770 cmd_status = mci_readl(host, CMD);
771 if (!(cmd_status & SDMMC_CMD_START))
772 return;
773 }
774 dev_err(&slot->mmc->class_dev,
775 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
776 cmd, arg, cmd_status);
777}
778
Abhilash Kesavanab269122012-11-19 10:26:21 +0530779static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
Will Newtonf95f3852011-01-02 01:11:59 -0500780{
781 struct dw_mci *host = slot->host;
Doug Andersonfdf492a2013-08-31 00:11:43 +0900782 unsigned int clock = slot->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500783 u32 div;
Doug Anderson9623b5b2012-07-25 08:33:17 -0700784 u32 clk_en_a;
Will Newtonf95f3852011-01-02 01:11:59 -0500785
Doug Andersonfdf492a2013-08-31 00:11:43 +0900786 if (!clock) {
787 mci_writel(host, CLKENA, 0);
788 mci_send_cmd(slot,
789 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
790 } else if (clock != host->current_speed || force_clkinit) {
791 div = host->bus_hz / clock;
792 if (host->bus_hz % clock && host->bus_hz > clock)
Will Newtonf95f3852011-01-02 01:11:59 -0500793 /*
794 * move the + 1 after the divide to prevent
795 * over-clocking the card.
796 */
Seungwon Jeone4199902012-05-22 13:01:21 +0900797 div += 1;
798
Doug Andersonfdf492a2013-08-31 00:11:43 +0900799 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500800
Doug Andersonfdf492a2013-08-31 00:11:43 +0900801 if ((clock << div) != slot->__clk_old || force_clkinit)
802 dev_info(&slot->mmc->class_dev,
803 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
804 slot->id, host->bus_hz, clock,
805 div ? ((host->bus_hz / div) >> 1) :
806 host->bus_hz, div);
Will Newtonf95f3852011-01-02 01:11:59 -0500807
808 /* disable clock */
809 mci_writel(host, CLKENA, 0);
810 mci_writel(host, CLKSRC, 0);
811
812 /* inform CIU */
813 mci_send_cmd(slot,
814 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
815
816 /* set clock to desired speed */
817 mci_writel(host, CLKDIV, div);
818
819 /* inform CIU */
820 mci_send_cmd(slot,
821 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
822
Doug Anderson9623b5b2012-07-25 08:33:17 -0700823 /* enable clock; only low power if no SDIO */
824 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
825 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
826 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
827 mci_writel(host, CLKENA, clk_en_a);
Will Newtonf95f3852011-01-02 01:11:59 -0500828
829 /* inform CIU */
830 mci_send_cmd(slot,
831 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
832
Doug Andersonfdf492a2013-08-31 00:11:43 +0900833 /* keep the clock with reflecting clock dividor */
834 slot->__clk_old = clock << div;
Will Newtonf95f3852011-01-02 01:11:59 -0500835 }
836
Doug Andersonfdf492a2013-08-31 00:11:43 +0900837 host->current_speed = clock;
838
Will Newtonf95f3852011-01-02 01:11:59 -0500839 /* Set the current slot bus width */
Seungwon Jeon1d56c452011-06-20 17:23:53 +0900840 mci_writel(host, CTYPE, (slot->ctype << slot->id));
Will Newtonf95f3852011-01-02 01:11:59 -0500841}
842
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900843static void __dw_mci_start_request(struct dw_mci *host,
844 struct dw_mci_slot *slot,
845 struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -0500846{
847 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -0500848 struct mmc_data *data;
849 u32 cmdflags;
850
851 mrq = slot->mrq;
852 if (host->pdata->select_slot)
853 host->pdata->select_slot(slot->id);
854
Will Newtonf95f3852011-01-02 01:11:59 -0500855 host->cur_slot = slot;
856 host->mrq = mrq;
857
858 host->pending_events = 0;
859 host->completed_events = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +0900860 host->cmd_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500861 host->data_status = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +0900862 host->dir_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500863
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900864 data = cmd->data;
Will Newtonf95f3852011-01-02 01:11:59 -0500865 if (data) {
866 dw_mci_set_timeout(host);
867 mci_writel(host, BYTCNT, data->blksz*data->blocks);
868 mci_writel(host, BLKSIZ, data->blksz);
869 }
870
Will Newtonf95f3852011-01-02 01:11:59 -0500871 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
872
873 /* this is the first command, send the initialization clock */
874 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
875 cmdflags |= SDMMC_CMD_INIT;
876
877 if (data) {
878 dw_mci_submit_data(host, data);
879 wmb();
880 }
881
882 dw_mci_start_command(host, cmd, cmdflags);
883
884 if (mrq->stop)
885 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +0900886 else
887 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
Will Newtonf95f3852011-01-02 01:11:59 -0500888}
889
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900890static void dw_mci_start_request(struct dw_mci *host,
891 struct dw_mci_slot *slot)
892{
893 struct mmc_request *mrq = slot->mrq;
894 struct mmc_command *cmd;
895
896 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
897 __dw_mci_start_request(host, slot, cmd);
898}
899
James Hogan7456caa2011-06-24 13:55:10 +0100900/* must be called with host->lock held */
Will Newtonf95f3852011-01-02 01:11:59 -0500901static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
902 struct mmc_request *mrq)
903{
904 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
905 host->state);
906
Will Newtonf95f3852011-01-02 01:11:59 -0500907 slot->mrq = mrq;
908
909 if (host->state == STATE_IDLE) {
910 host->state = STATE_SENDING_CMD;
911 dw_mci_start_request(host, slot);
912 } else {
913 list_add_tail(&slot->queue_node, &host->queue);
914 }
Will Newtonf95f3852011-01-02 01:11:59 -0500915}
916
917static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
918{
919 struct dw_mci_slot *slot = mmc_priv(mmc);
920 struct dw_mci *host = slot->host;
921
922 WARN_ON(slot->mrq);
923
James Hogan7456caa2011-06-24 13:55:10 +0100924 /*
925 * The check for card presence and queueing of the request must be
926 * atomic, otherwise the card could be removed in between and the
927 * request wouldn't fail until another card was inserted.
928 */
929 spin_lock_bh(&host->lock);
930
Will Newtonf95f3852011-01-02 01:11:59 -0500931 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
James Hogan7456caa2011-06-24 13:55:10 +0100932 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500933 mrq->cmd->error = -ENOMEDIUM;
934 mmc_request_done(mmc, mrq);
935 return;
936 }
937
Will Newtonf95f3852011-01-02 01:11:59 -0500938 dw_mci_queue_request(host, slot, mrq);
James Hogan7456caa2011-06-24 13:55:10 +0100939
940 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500941}
942
943static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
944{
945 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000946 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900947 u32 regs;
Will Newtonf95f3852011-01-02 01:11:59 -0500948
Will Newtonf95f3852011-01-02 01:11:59 -0500949 switch (ios->bus_width) {
Will Newtonf95f3852011-01-02 01:11:59 -0500950 case MMC_BUS_WIDTH_4:
951 slot->ctype = SDMMC_CTYPE_4BIT;
952 break;
Jaehoon Chungc9b2a062011-02-17 16:12:38 +0900953 case MMC_BUS_WIDTH_8:
954 slot->ctype = SDMMC_CTYPE_8BIT;
955 break;
Jaehoon Chungb2f7cb42012-11-08 17:35:31 +0900956 default:
957 /* set default 1 bit mode */
958 slot->ctype = SDMMC_CTYPE_1BIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500959 }
960
Seungwon Jeon3f514292012-01-02 16:00:02 +0900961 regs = mci_readl(slot->host, UHS_REG);
962
Jaehoon Chung41babf72011-02-24 13:46:11 +0900963 /* DDR mode set */
Seungwon Jeon3f514292012-01-02 16:00:02 +0900964 if (ios->timing == MMC_TIMING_UHS_DDR50)
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900965 regs |= ((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900966 else
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900967 regs &= ~((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900968
969 mci_writel(slot->host, UHS_REG, regs);
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900970 slot->host->timing = ios->timing;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900971
Doug Andersonfdf492a2013-08-31 00:11:43 +0900972 /*
973 * Use mirror of ios->clock to prevent race with mmc
974 * core ios update when finding the minimum.
975 */
976 slot->clock = ios->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500977
James Hogancb27a842012-10-16 09:43:08 +0100978 if (drv_data && drv_data->set_ios)
979 drv_data->set_ios(slot->host, ios);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000980
Jaehoon Chungbf7cb222012-11-08 17:35:29 +0900981 /* Slot specific timing and width adjustment */
982 dw_mci_setup_bus(slot, false);
983
Will Newtonf95f3852011-01-02 01:11:59 -0500984 switch (ios->power_mode) {
985 case MMC_POWER_UP:
986 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
James Hogane6f34e22013-03-12 10:43:32 +0000987 /* Power up slot */
988 if (slot->host->pdata->setpower)
989 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900990 regs = mci_readl(slot->host, PWREN);
991 regs |= (1 << slot->id);
992 mci_writel(slot->host, PWREN, regs);
James Hogane6f34e22013-03-12 10:43:32 +0000993 break;
994 case MMC_POWER_OFF:
995 /* Power down slot */
996 if (slot->host->pdata->setpower)
997 slot->host->pdata->setpower(slot->id, 0);
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900998 regs = mci_readl(slot->host, PWREN);
999 regs &= ~(1 << slot->id);
1000 mci_writel(slot->host, PWREN, regs);
Will Newtonf95f3852011-01-02 01:11:59 -05001001 break;
1002 default:
1003 break;
1004 }
1005}
1006
1007static int dw_mci_get_ro(struct mmc_host *mmc)
1008{
1009 int read_only;
1010 struct dw_mci_slot *slot = mmc_priv(mmc);
1011 struct dw_mci_board *brd = slot->host->pdata;
1012
1013 /* Use platform get_ro function, else try on board write protect */
Doug Anderson96406392013-01-11 17:03:54 +00001014 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
Thomas Abrahamb4967aa2012-09-17 18:16:39 +00001015 read_only = 0;
1016 else if (brd->get_ro)
Will Newtonf95f3852011-01-02 01:11:59 -05001017 read_only = brd->get_ro(slot->id);
Doug Anderson55a6ceb2013-01-11 17:03:53 +00001018 else if (gpio_is_valid(slot->wp_gpio))
1019 read_only = gpio_get_value(slot->wp_gpio);
Will Newtonf95f3852011-01-02 01:11:59 -05001020 else
1021 read_only =
1022 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1023
1024 dev_dbg(&mmc->class_dev, "card is %s\n",
1025 read_only ? "read-only" : "read-write");
1026
1027 return read_only;
1028}
1029
1030static int dw_mci_get_cd(struct mmc_host *mmc)
1031{
1032 int present;
1033 struct dw_mci_slot *slot = mmc_priv(mmc);
1034 struct dw_mci_board *brd = slot->host->pdata;
1035
1036 /* Use platform get_cd function, else try onboard card detect */
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09001037 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1038 present = 1;
1039 else if (brd->get_cd)
Will Newtonf95f3852011-01-02 01:11:59 -05001040 present = !brd->get_cd(slot->id);
1041 else
1042 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1043 == 0 ? 1 : 0;
1044
1045 if (present)
1046 dev_dbg(&mmc->class_dev, "card is present\n");
1047 else
1048 dev_dbg(&mmc->class_dev, "card is not present\n");
1049
1050 return present;
1051}
1052
Doug Anderson9623b5b2012-07-25 08:33:17 -07001053/*
1054 * Disable lower power mode.
1055 *
1056 * Low power mode will stop the card clock when idle. According to the
1057 * description of the CLKENA register we should disable low power mode
1058 * for SDIO cards if we need SDIO interrupts to work.
1059 *
1060 * This function is fast if low power mode is already disabled.
1061 */
1062static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1063{
1064 struct dw_mci *host = slot->host;
1065 u32 clk_en_a;
1066 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1067
1068 clk_en_a = mci_readl(host, CLKENA);
1069
1070 if (clk_en_a & clken_low_pwr) {
1071 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1072 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1073 SDMMC_CMD_PRV_DAT_WAIT, 0);
1074 }
1075}
1076
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301077static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1078{
1079 struct dw_mci_slot *slot = mmc_priv(mmc);
1080 struct dw_mci *host = slot->host;
1081 u32 int_mask;
1082
1083 /* Enable/disable Slot Specific SDIO interrupt */
1084 int_mask = mci_readl(host, INTMASK);
1085 if (enb) {
Doug Anderson9623b5b2012-07-25 08:33:17 -07001086 /*
1087 * Turn off low power mode if it was enabled. This is a bit of
1088 * a heavy operation and we disable / enable IRQs a lot, so
1089 * we'll leave low power mode disabled and it will get
1090 * re-enabled again in dw_mci_setup_bus().
1091 */
1092 dw_mci_disable_low_power(slot);
1093
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301094 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +09001095 (int_mask | SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301096 } else {
1097 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +09001098 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301099 }
1100}
1101
Seungwon Jeon0976f162013-08-31 00:12:42 +09001102static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1103{
1104 struct dw_mci_slot *slot = mmc_priv(mmc);
1105 struct dw_mci *host = slot->host;
1106 const struct dw_mci_drv_data *drv_data = host->drv_data;
1107 struct dw_mci_tuning_data tuning_data;
1108 int err = -ENOSYS;
1109
1110 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1111 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1112 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1113 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1114 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1115 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1116 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1117 } else {
1118 return -EINVAL;
1119 }
1120 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1121 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1122 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1123 } else {
1124 dev_err(host->dev,
1125 "Undefined command(%d) for tuning\n", opcode);
1126 return -EINVAL;
1127 }
1128
1129 if (drv_data && drv_data->execute_tuning)
1130 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1131 return err;
1132}
1133
Will Newtonf95f3852011-01-02 01:11:59 -05001134static const struct mmc_host_ops dw_mci_ops = {
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301135 .request = dw_mci_request,
Seungwon Jeon9aa51402012-02-06 16:55:07 +09001136 .pre_req = dw_mci_pre_req,
1137 .post_req = dw_mci_post_req,
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301138 .set_ios = dw_mci_set_ios,
1139 .get_ro = dw_mci_get_ro,
1140 .get_cd = dw_mci_get_cd,
1141 .enable_sdio_irq = dw_mci_enable_sdio_irq,
Seungwon Jeon0976f162013-08-31 00:12:42 +09001142 .execute_tuning = dw_mci_execute_tuning,
Will Newtonf95f3852011-01-02 01:11:59 -05001143};
1144
1145static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1146 __releases(&host->lock)
1147 __acquires(&host->lock)
1148{
1149 struct dw_mci_slot *slot;
1150 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1151
1152 WARN_ON(host->cmd || host->data);
1153
1154 host->cur_slot->mrq = NULL;
1155 host->mrq = NULL;
1156 if (!list_empty(&host->queue)) {
1157 slot = list_entry(host->queue.next,
1158 struct dw_mci_slot, queue_node);
1159 list_del(&slot->queue_node);
Thomas Abraham4a909202012-09-17 18:16:35 +00001160 dev_vdbg(host->dev, "list not empty: %s is next\n",
Will Newtonf95f3852011-01-02 01:11:59 -05001161 mmc_hostname(slot->mmc));
1162 host->state = STATE_SENDING_CMD;
1163 dw_mci_start_request(host, slot);
1164 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00001165 dev_vdbg(host->dev, "list empty\n");
Will Newtonf95f3852011-01-02 01:11:59 -05001166 host->state = STATE_IDLE;
1167 }
1168
1169 spin_unlock(&host->lock);
1170 mmc_request_done(prev_mmc, mrq);
1171 spin_lock(&host->lock);
1172}
1173
Seungwon Jeone352c812013-08-31 00:14:17 +09001174static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -05001175{
1176 u32 status = host->cmd_status;
1177
1178 host->cmd_status = 0;
1179
1180 /* Read the response from the card (up to 16 bytes) */
1181 if (cmd->flags & MMC_RSP_PRESENT) {
1182 if (cmd->flags & MMC_RSP_136) {
1183 cmd->resp[3] = mci_readl(host, RESP0);
1184 cmd->resp[2] = mci_readl(host, RESP1);
1185 cmd->resp[1] = mci_readl(host, RESP2);
1186 cmd->resp[0] = mci_readl(host, RESP3);
1187 } else {
1188 cmd->resp[0] = mci_readl(host, RESP0);
1189 cmd->resp[1] = 0;
1190 cmd->resp[2] = 0;
1191 cmd->resp[3] = 0;
1192 }
1193 }
1194
1195 if (status & SDMMC_INT_RTO)
1196 cmd->error = -ETIMEDOUT;
1197 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1198 cmd->error = -EILSEQ;
1199 else if (status & SDMMC_INT_RESP_ERR)
1200 cmd->error = -EIO;
1201 else
1202 cmd->error = 0;
1203
1204 if (cmd->error) {
1205 /* newer ip versions need a delay between retries */
1206 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1207 mdelay(20);
Will Newtonf95f3852011-01-02 01:11:59 -05001208 }
Seungwon Jeone352c812013-08-31 00:14:17 +09001209
1210 return cmd->error;
1211}
1212
1213static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1214{
Seungwon Jeon31bff452013-08-31 00:14:23 +09001215 u32 status = host->data_status;
Seungwon Jeone352c812013-08-31 00:14:17 +09001216
1217 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1218 if (status & SDMMC_INT_DRTO) {
1219 data->error = -ETIMEDOUT;
1220 } else if (status & SDMMC_INT_DCRC) {
1221 data->error = -EILSEQ;
1222 } else if (status & SDMMC_INT_EBE) {
1223 if (host->dir_status ==
1224 DW_MCI_SEND_STATUS) {
1225 /*
1226 * No data CRC status was returned.
1227 * The number of bytes transferred
1228 * will be exaggerated in PIO mode.
1229 */
1230 data->bytes_xfered = 0;
1231 data->error = -ETIMEDOUT;
1232 } else if (host->dir_status ==
1233 DW_MCI_RECV_STATUS) {
1234 data->error = -EIO;
1235 }
1236 } else {
1237 /* SDMMC_INT_SBE is included */
1238 data->error = -EIO;
1239 }
1240
1241 dev_err(host->dev, "data error, status 0x%08x\n", status);
1242
1243 /*
1244 * After an error, there may be data lingering
Seungwon Jeon31bff452013-08-31 00:14:23 +09001245 * in the FIFO
Seungwon Jeone352c812013-08-31 00:14:17 +09001246 */
Seungwon Jeon31bff452013-08-31 00:14:23 +09001247 dw_mci_fifo_reset(host);
Seungwon Jeone352c812013-08-31 00:14:17 +09001248 } else {
1249 data->bytes_xfered = data->blocks * data->blksz;
1250 data->error = 0;
1251 }
1252
1253 return data->error;
Will Newtonf95f3852011-01-02 01:11:59 -05001254}
1255
1256static void dw_mci_tasklet_func(unsigned long priv)
1257{
1258 struct dw_mci *host = (struct dw_mci *)priv;
1259 struct mmc_data *data;
1260 struct mmc_command *cmd;
Seungwon Jeone352c812013-08-31 00:14:17 +09001261 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001262 enum dw_mci_state state;
1263 enum dw_mci_state prev_state;
Seungwon Jeone352c812013-08-31 00:14:17 +09001264 unsigned int err;
Will Newtonf95f3852011-01-02 01:11:59 -05001265
1266 spin_lock(&host->lock);
1267
1268 state = host->state;
1269 data = host->data;
Seungwon Jeone352c812013-08-31 00:14:17 +09001270 mrq = host->mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001271
1272 do {
1273 prev_state = state;
1274
1275 switch (state) {
1276 case STATE_IDLE:
1277 break;
1278
1279 case STATE_SENDING_CMD:
1280 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1281 &host->pending_events))
1282 break;
1283
1284 cmd = host->cmd;
1285 host->cmd = NULL;
1286 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001287 err = dw_mci_command_complete(host, cmd);
1288 if (cmd == mrq->sbc && !err) {
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001289 prev_state = state = STATE_SENDING_CMD;
1290 __dw_mci_start_request(host, host->cur_slot,
Seungwon Jeone352c812013-08-31 00:14:17 +09001291 mrq->cmd);
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001292 goto unlock;
1293 }
1294
Seungwon Jeone352c812013-08-31 00:14:17 +09001295 if (cmd->data && err) {
Seungwon Jeon71abb132013-08-31 00:13:59 +09001296 dw_mci_stop_dma(host);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001297 send_stop_abort(host, data);
1298 state = STATE_SENDING_STOP;
1299 break;
Seungwon Jeon71abb132013-08-31 00:13:59 +09001300 }
1301
Seungwon Jeone352c812013-08-31 00:14:17 +09001302 if (!cmd->data || err) {
1303 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05001304 goto unlock;
1305 }
1306
1307 prev_state = state = STATE_SENDING_DATA;
1308 /* fall through */
1309
1310 case STATE_SENDING_DATA:
1311 if (test_and_clear_bit(EVENT_DATA_ERROR,
1312 &host->pending_events)) {
1313 dw_mci_stop_dma(host);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001314 send_stop_abort(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001315 state = STATE_DATA_ERROR;
1316 break;
1317 }
1318
1319 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1320 &host->pending_events))
1321 break;
1322
1323 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1324 prev_state = state = STATE_DATA_BUSY;
1325 /* fall through */
1326
1327 case STATE_DATA_BUSY:
1328 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1329 &host->pending_events))
1330 break;
1331
1332 host->data = NULL;
1333 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001334 err = dw_mci_data_complete(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001335
Seungwon Jeone352c812013-08-31 00:14:17 +09001336 if (!err) {
1337 if (!data->stop || mrq->sbc) {
1338 if (mrq->sbc)
1339 data->stop->error = 0;
1340 dw_mci_request_end(host, mrq);
1341 goto unlock;
Will Newtonf95f3852011-01-02 01:11:59 -05001342 }
Will Newtonf95f3852011-01-02 01:11:59 -05001343
Seungwon Jeon90c21432013-08-31 00:14:05 +09001344 /* stop command for open-ended transfer*/
Seungwon Jeone352c812013-08-31 00:14:17 +09001345 if (data->stop)
1346 send_stop_abort(host, data);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001347 }
Seungwon Jeone352c812013-08-31 00:14:17 +09001348
1349 /*
1350 * If err has non-zero,
1351 * stop-abort command has been already issued.
1352 */
1353 prev_state = state = STATE_SENDING_STOP;
1354
Will Newtonf95f3852011-01-02 01:11:59 -05001355 /* fall through */
1356
1357 case STATE_SENDING_STOP:
1358 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1359 &host->pending_events))
1360 break;
1361
Seungwon Jeon71abb132013-08-31 00:13:59 +09001362 /* CMD error in data command */
Seungwon Jeon31bff452013-08-31 00:14:23 +09001363 if (mrq->cmd->error && mrq->data)
1364 dw_mci_fifo_reset(host);
Seungwon Jeon71abb132013-08-31 00:13:59 +09001365
Will Newtonf95f3852011-01-02 01:11:59 -05001366 host->cmd = NULL;
Seungwon Jeon71abb132013-08-31 00:13:59 +09001367 host->data = NULL;
Seungwon Jeon90c21432013-08-31 00:14:05 +09001368
Seungwon Jeone352c812013-08-31 00:14:17 +09001369 if (mrq->stop)
1370 dw_mci_command_complete(host, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001371 else
1372 host->cmd_status = 0;
1373
Seungwon Jeone352c812013-08-31 00:14:17 +09001374 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05001375 goto unlock;
1376
1377 case STATE_DATA_ERROR:
1378 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1379 &host->pending_events))
1380 break;
1381
1382 state = STATE_DATA_BUSY;
1383 break;
1384 }
1385 } while (state != prev_state);
1386
1387 host->state = state;
1388unlock:
1389 spin_unlock(&host->lock);
1390
1391}
1392
James Hogan34b664a2011-06-24 13:57:56 +01001393/* push final bytes to part_buf, only use during push */
1394static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1395{
1396 memcpy((void *)&host->part_buf, buf, cnt);
1397 host->part_buf_count = cnt;
1398}
1399
1400/* append bytes to part_buf, only use during push */
1401static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1402{
1403 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1404 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1405 host->part_buf_count += cnt;
1406 return cnt;
1407}
1408
1409/* pull first bytes from part_buf, only use during pull */
1410static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1411{
1412 cnt = min(cnt, (int)host->part_buf_count);
1413 if (cnt) {
1414 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1415 cnt);
1416 host->part_buf_count -= cnt;
1417 host->part_buf_start += cnt;
1418 }
1419 return cnt;
1420}
1421
1422/* pull final bytes from the part_buf, assuming it's just been filled */
1423static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1424{
1425 memcpy(buf, &host->part_buf, cnt);
1426 host->part_buf_start = cnt;
1427 host->part_buf_count = (1 << host->data_shift) - cnt;
1428}
1429
Will Newtonf95f3852011-01-02 01:11:59 -05001430static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1431{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001432 struct mmc_data *data = host->data;
1433 int init_cnt = cnt;
1434
James Hogan34b664a2011-06-24 13:57:56 +01001435 /* try and push anything in the part_buf */
1436 if (unlikely(host->part_buf_count)) {
1437 int len = dw_mci_push_part_bytes(host, buf, cnt);
1438 buf += len;
1439 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001440 if (host->part_buf_count == 2) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001441 mci_writew(host, DATA(host->data_offset),
1442 host->part_buf16);
James Hogan34b664a2011-06-24 13:57:56 +01001443 host->part_buf_count = 0;
1444 }
1445 }
1446#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1447 if (unlikely((unsigned long)buf & 0x1)) {
1448 while (cnt >= 2) {
1449 u16 aligned_buf[64];
1450 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1451 int items = len >> 1;
1452 int i;
1453 /* memcpy from input buffer into aligned buffer */
1454 memcpy(aligned_buf, buf, len);
1455 buf += len;
1456 cnt -= len;
1457 /* push data from aligned buffer into fifo */
1458 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001459 mci_writew(host, DATA(host->data_offset),
1460 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001461 }
1462 } else
1463#endif
1464 {
1465 u16 *pdata = buf;
1466 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001467 mci_writew(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001468 buf = pdata;
1469 }
1470 /* put anything remaining in the part_buf */
1471 if (cnt) {
1472 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001473 /* Push data if we have reached the expected data length */
1474 if ((data->bytes_xfered + init_cnt) ==
1475 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001476 mci_writew(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001477 host->part_buf16);
Will Newtonf95f3852011-01-02 01:11:59 -05001478 }
1479}
1480
1481static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1482{
James Hogan34b664a2011-06-24 13:57:56 +01001483#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1484 if (unlikely((unsigned long)buf & 0x1)) {
1485 while (cnt >= 2) {
1486 /* pull data from fifo into aligned buffer */
1487 u16 aligned_buf[64];
1488 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1489 int items = len >> 1;
1490 int i;
1491 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001492 aligned_buf[i] = mci_readw(host,
1493 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001494 /* memcpy from aligned buffer into output buffer */
1495 memcpy(buf, aligned_buf, len);
1496 buf += len;
1497 cnt -= len;
1498 }
1499 } else
1500#endif
1501 {
1502 u16 *pdata = buf;
1503 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001504 *pdata++ = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001505 buf = pdata;
1506 }
1507 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001508 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001509 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001510 }
1511}
1512
1513static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1514{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001515 struct mmc_data *data = host->data;
1516 int init_cnt = cnt;
1517
James Hogan34b664a2011-06-24 13:57:56 +01001518 /* try and push anything in the part_buf */
1519 if (unlikely(host->part_buf_count)) {
1520 int len = dw_mci_push_part_bytes(host, buf, cnt);
1521 buf += len;
1522 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001523 if (host->part_buf_count == 4) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001524 mci_writel(host, DATA(host->data_offset),
1525 host->part_buf32);
James Hogan34b664a2011-06-24 13:57:56 +01001526 host->part_buf_count = 0;
1527 }
1528 }
1529#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1530 if (unlikely((unsigned long)buf & 0x3)) {
1531 while (cnt >= 4) {
1532 u32 aligned_buf[32];
1533 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1534 int items = len >> 2;
1535 int i;
1536 /* memcpy from input buffer into aligned buffer */
1537 memcpy(aligned_buf, buf, len);
1538 buf += len;
1539 cnt -= len;
1540 /* push data from aligned buffer into fifo */
1541 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001542 mci_writel(host, DATA(host->data_offset),
1543 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001544 }
1545 } else
1546#endif
1547 {
1548 u32 *pdata = buf;
1549 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001550 mci_writel(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001551 buf = pdata;
1552 }
1553 /* put anything remaining in the part_buf */
1554 if (cnt) {
1555 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001556 /* Push data if we have reached the expected data length */
1557 if ((data->bytes_xfered + init_cnt) ==
1558 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001559 mci_writel(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001560 host->part_buf32);
Will Newtonf95f3852011-01-02 01:11:59 -05001561 }
1562}
1563
1564static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1565{
James Hogan34b664a2011-06-24 13:57:56 +01001566#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1567 if (unlikely((unsigned long)buf & 0x3)) {
1568 while (cnt >= 4) {
1569 /* pull data from fifo into aligned buffer */
1570 u32 aligned_buf[32];
1571 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1572 int items = len >> 2;
1573 int i;
1574 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001575 aligned_buf[i] = mci_readl(host,
1576 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001577 /* memcpy from aligned buffer into output buffer */
1578 memcpy(buf, aligned_buf, len);
1579 buf += len;
1580 cnt -= len;
1581 }
1582 } else
1583#endif
1584 {
1585 u32 *pdata = buf;
1586 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001587 *pdata++ = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001588 buf = pdata;
1589 }
1590 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001591 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001592 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001593 }
1594}
1595
1596static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1597{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001598 struct mmc_data *data = host->data;
1599 int init_cnt = cnt;
1600
James Hogan34b664a2011-06-24 13:57:56 +01001601 /* try and push anything in the part_buf */
1602 if (unlikely(host->part_buf_count)) {
1603 int len = dw_mci_push_part_bytes(host, buf, cnt);
1604 buf += len;
1605 cnt -= len;
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001606
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001607 if (host->part_buf_count == 8) {
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001608 mci_writeq(host, DATA(host->data_offset),
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001609 host->part_buf);
James Hogan34b664a2011-06-24 13:57:56 +01001610 host->part_buf_count = 0;
1611 }
1612 }
1613#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1614 if (unlikely((unsigned long)buf & 0x7)) {
1615 while (cnt >= 8) {
1616 u64 aligned_buf[16];
1617 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1618 int items = len >> 3;
1619 int i;
1620 /* memcpy from input buffer into aligned buffer */
1621 memcpy(aligned_buf, buf, len);
1622 buf += len;
1623 cnt -= len;
1624 /* push data from aligned buffer into fifo */
1625 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001626 mci_writeq(host, DATA(host->data_offset),
1627 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001628 }
1629 } else
1630#endif
1631 {
1632 u64 *pdata = buf;
1633 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001634 mci_writeq(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001635 buf = pdata;
1636 }
1637 /* put anything remaining in the part_buf */
1638 if (cnt) {
1639 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001640 /* Push data if we have reached the expected data length */
1641 if ((data->bytes_xfered + init_cnt) ==
1642 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001643 mci_writeq(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001644 host->part_buf);
Will Newtonf95f3852011-01-02 01:11:59 -05001645 }
1646}
1647
1648static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1649{
James Hogan34b664a2011-06-24 13:57:56 +01001650#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1651 if (unlikely((unsigned long)buf & 0x7)) {
1652 while (cnt >= 8) {
1653 /* pull data from fifo into aligned buffer */
1654 u64 aligned_buf[16];
1655 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1656 int items = len >> 3;
1657 int i;
1658 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001659 aligned_buf[i] = mci_readq(host,
1660 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001661 /* memcpy from aligned buffer into output buffer */
1662 memcpy(buf, aligned_buf, len);
1663 buf += len;
1664 cnt -= len;
1665 }
1666 } else
1667#endif
1668 {
1669 u64 *pdata = buf;
1670 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001671 *pdata++ = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001672 buf = pdata;
Will Newtonf95f3852011-01-02 01:11:59 -05001673 }
James Hogan34b664a2011-06-24 13:57:56 +01001674 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001675 host->part_buf = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001676 dw_mci_pull_final_bytes(host, buf, cnt);
1677 }
1678}
1679
1680static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1681{
1682 int len;
1683
1684 /* get remaining partial bytes */
1685 len = dw_mci_pull_part_bytes(host, buf, cnt);
1686 if (unlikely(len == cnt))
1687 return;
1688 buf += len;
1689 cnt -= len;
1690
1691 /* get the rest of the data */
1692 host->pull_data(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001693}
1694
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001695static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
Will Newtonf95f3852011-01-02 01:11:59 -05001696{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001697 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1698 void *buf;
1699 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001700 struct mmc_data *data = host->data;
1701 int shift = host->data_shift;
1702 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001703 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001704 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001705
1706 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001707 if (!sg_miter_next(sg_miter))
1708 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001709
Imre Deak4225fc82013-02-27 17:02:57 -08001710 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001711 buf = sg_miter->addr;
1712 remain = sg_miter->length;
1713 offset = 0;
1714
1715 do {
1716 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1717 << shift) + host->part_buf_count;
1718 len = min(remain, fcnt);
1719 if (!len)
1720 break;
1721 dw_mci_pull_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001722 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001723 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001724 remain -= len;
1725 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001726
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001727 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001728 status = mci_readl(host, MINTSTS);
1729 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001730 /* if the RXDR is ready read again */
1731 } while ((status & SDMMC_INT_RXDR) ||
1732 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001733
1734 if (!remain) {
1735 if (!sg_miter_next(sg_miter))
1736 goto done;
1737 sg_miter->consumed = 0;
1738 }
1739 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001740 return;
1741
1742done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001743 sg_miter_stop(sg_miter);
1744 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001745 smp_wmb();
1746 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1747}
1748
1749static void dw_mci_write_data_pio(struct dw_mci *host)
1750{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001751 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1752 void *buf;
1753 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001754 struct mmc_data *data = host->data;
1755 int shift = host->data_shift;
1756 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001757 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001758 unsigned int fifo_depth = host->fifo_depth;
1759 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001760
1761 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001762 if (!sg_miter_next(sg_miter))
1763 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001764
Imre Deak4225fc82013-02-27 17:02:57 -08001765 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001766 buf = sg_miter->addr;
1767 remain = sg_miter->length;
1768 offset = 0;
1769
1770 do {
1771 fcnt = ((fifo_depth -
1772 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1773 << shift) - host->part_buf_count;
1774 len = min(remain, fcnt);
1775 if (!len)
1776 break;
1777 host->push_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001778 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001779 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001780 remain -= len;
1781 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001782
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001783 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001784 status = mci_readl(host, MINTSTS);
1785 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
Will Newtonf95f3852011-01-02 01:11:59 -05001786 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001787
1788 if (!remain) {
1789 if (!sg_miter_next(sg_miter))
1790 goto done;
1791 sg_miter->consumed = 0;
1792 }
1793 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001794 return;
1795
1796done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001797 sg_miter_stop(sg_miter);
1798 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001799 smp_wmb();
1800 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1801}
1802
1803static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1804{
1805 if (!host->cmd_status)
1806 host->cmd_status = status;
1807
1808 smp_wmb();
1809
1810 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1811 tasklet_schedule(&host->tasklet);
1812}
1813
1814static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1815{
1816 struct dw_mci *host = dev_id;
Seungwon Jeon182c9082012-08-01 09:30:30 +09001817 u32 pending;
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301818 int i;
Will Newtonf95f3852011-01-02 01:11:59 -05001819
Markos Chandras1fb5f682013-03-12 10:53:11 +00001820 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1821
Doug Anderson476d79f2013-07-09 13:04:40 -07001822 /*
1823 * DTO fix - version 2.10a and below, and only if internal DMA
1824 * is configured.
1825 */
1826 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1827 if (!pending &&
1828 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1829 pending |= SDMMC_INT_DATA_OVER;
1830 }
1831
Markos Chandras1fb5f682013-03-12 10:53:11 +00001832 if (pending) {
Will Newtonf95f3852011-01-02 01:11:59 -05001833 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1834 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001835 host->cmd_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001836 smp_wmb();
1837 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -05001838 }
1839
1840 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1841 /* if there is an error report DATA_ERROR */
1842 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001843 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001844 smp_wmb();
1845 set_bit(EVENT_DATA_ERROR, &host->pending_events);
Seungwon Jeon9b2026a2012-08-01 09:30:40 +09001846 tasklet_schedule(&host->tasklet);
Will Newtonf95f3852011-01-02 01:11:59 -05001847 }
1848
1849 if (pending & SDMMC_INT_DATA_OVER) {
1850 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1851 if (!host->data_status)
Seungwon Jeon182c9082012-08-01 09:30:30 +09001852 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001853 smp_wmb();
1854 if (host->dir_status == DW_MCI_RECV_STATUS) {
1855 if (host->sg != NULL)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001856 dw_mci_read_data_pio(host, true);
Will Newtonf95f3852011-01-02 01:11:59 -05001857 }
1858 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1859 tasklet_schedule(&host->tasklet);
1860 }
1861
1862 if (pending & SDMMC_INT_RXDR) {
1863 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001864 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001865 dw_mci_read_data_pio(host, false);
Will Newtonf95f3852011-01-02 01:11:59 -05001866 }
1867
1868 if (pending & SDMMC_INT_TXDR) {
1869 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001870 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
Will Newtonf95f3852011-01-02 01:11:59 -05001871 dw_mci_write_data_pio(host);
1872 }
1873
1874 if (pending & SDMMC_INT_CMD_DONE) {
1875 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001876 dw_mci_cmd_interrupt(host, pending);
Will Newtonf95f3852011-01-02 01:11:59 -05001877 }
1878
1879 if (pending & SDMMC_INT_CD) {
1880 mci_writel(host, RINTSTS, SDMMC_INT_CD);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07001881 queue_work(host->card_workqueue, &host->card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001882 }
1883
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301884 /* Handle SDIO Interrupts */
1885 for (i = 0; i < host->num_slots; i++) {
1886 struct dw_mci_slot *slot = host->slot[i];
1887 if (pending & SDMMC_INT_SDIO(i)) {
1888 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1889 mmc_signal_sdio_irq(slot->mmc);
1890 }
1891 }
1892
Markos Chandras1fb5f682013-03-12 10:53:11 +00001893 }
Will Newtonf95f3852011-01-02 01:11:59 -05001894
1895#ifdef CONFIG_MMC_DW_IDMAC
1896 /* Handle DMA interrupts */
1897 pending = mci_readl(host, IDSTS);
1898 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1899 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1900 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
Will Newtonf95f3852011-01-02 01:11:59 -05001901 host->dma_ops->complete(host);
1902 }
1903#endif
1904
1905 return IRQ_HANDLED;
1906}
1907
James Hogan1791b13e2011-06-24 13:55:55 +01001908static void dw_mci_work_routine_card(struct work_struct *work)
Will Newtonf95f3852011-01-02 01:11:59 -05001909{
James Hogan1791b13e2011-06-24 13:55:55 +01001910 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001911 int i;
1912
1913 for (i = 0; i < host->num_slots; i++) {
1914 struct dw_mci_slot *slot = host->slot[i];
1915 struct mmc_host *mmc = slot->mmc;
1916 struct mmc_request *mrq;
1917 int present;
Will Newtonf95f3852011-01-02 01:11:59 -05001918
1919 present = dw_mci_get_cd(mmc);
1920 while (present != slot->last_detect_state) {
Will Newtonf95f3852011-01-02 01:11:59 -05001921 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1922 present ? "inserted" : "removed");
1923
James Hogan1791b13e2011-06-24 13:55:55 +01001924 spin_lock_bh(&host->lock);
1925
Will Newtonf95f3852011-01-02 01:11:59 -05001926 /* Card change detected */
1927 slot->last_detect_state = present;
1928
James Hogan1791b13e2011-06-24 13:55:55 +01001929 /* Mark card as present if applicable */
1930 if (present != 0)
Will Newtonf95f3852011-01-02 01:11:59 -05001931 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001932
1933 /* Clean up queue if present */
1934 mrq = slot->mrq;
1935 if (mrq) {
1936 if (mrq == host->mrq) {
1937 host->data = NULL;
1938 host->cmd = NULL;
1939
1940 switch (host->state) {
1941 case STATE_IDLE:
1942 break;
1943 case STATE_SENDING_CMD:
1944 mrq->cmd->error = -ENOMEDIUM;
1945 if (!mrq->data)
1946 break;
1947 /* fall through */
1948 case STATE_SENDING_DATA:
1949 mrq->data->error = -ENOMEDIUM;
1950 dw_mci_stop_dma(host);
1951 break;
1952 case STATE_DATA_BUSY:
1953 case STATE_DATA_ERROR:
1954 if (mrq->data->error == -EINPROGRESS)
1955 mrq->data->error = -ENOMEDIUM;
Will Newtonf95f3852011-01-02 01:11:59 -05001956 /* fall through */
1957 case STATE_SENDING_STOP:
Seungwon Jeon90c21432013-08-31 00:14:05 +09001958 if (mrq->stop)
1959 mrq->stop->error = -ENOMEDIUM;
Will Newtonf95f3852011-01-02 01:11:59 -05001960 break;
1961 }
1962
1963 dw_mci_request_end(host, mrq);
1964 } else {
1965 list_del(&slot->queue_node);
1966 mrq->cmd->error = -ENOMEDIUM;
1967 if (mrq->data)
1968 mrq->data->error = -ENOMEDIUM;
1969 if (mrq->stop)
1970 mrq->stop->error = -ENOMEDIUM;
1971
1972 spin_unlock(&host->lock);
1973 mmc_request_done(slot->mmc, mrq);
1974 spin_lock(&host->lock);
1975 }
1976 }
1977
1978 /* Power down slot */
1979 if (present == 0) {
Will Newtonf95f3852011-01-02 01:11:59 -05001980 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1981
Seungwon Jeon31bff452013-08-31 00:14:23 +09001982 /* Clear down the FIFO */
1983 dw_mci_fifo_reset(host);
Will Newtonf95f3852011-01-02 01:11:59 -05001984#ifdef CONFIG_MMC_DW_IDMAC
Seungwon Jeon5ce9d962013-08-31 00:14:33 +09001985 dw_mci_idmac_reset(host);
Will Newtonf95f3852011-01-02 01:11:59 -05001986#endif
1987
1988 }
1989
James Hogan1791b13e2011-06-24 13:55:55 +01001990 spin_unlock_bh(&host->lock);
1991
Will Newtonf95f3852011-01-02 01:11:59 -05001992 present = dw_mci_get_cd(mmc);
1993 }
1994
1995 mmc_detect_change(slot->mmc,
1996 msecs_to_jiffies(host->pdata->detect_delay_ms));
1997 }
1998}
1999
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002000#ifdef CONFIG_OF
2001/* given a slot id, find out the device node representing that slot */
2002static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2003{
2004 struct device_node *np;
2005 const __be32 *addr;
2006 int len;
2007
2008 if (!dev || !dev->of_node)
2009 return NULL;
2010
2011 for_each_child_of_node(dev->of_node, np) {
2012 addr = of_get_property(np, "reg", &len);
2013 if (!addr || (len < sizeof(int)))
2014 continue;
2015 if (be32_to_cpup(addr) == slot)
2016 return np;
2017 }
2018 return NULL;
2019}
2020
Doug Andersona70aaa62013-01-11 17:03:50 +00002021static struct dw_mci_of_slot_quirks {
2022 char *quirk;
2023 int id;
2024} of_slot_quirks[] = {
2025 {
2026 .quirk = "disable-wp",
2027 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2028 },
2029};
2030
2031static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2032{
2033 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2034 int quirks = 0;
2035 int idx;
2036
2037 /* get quirks */
2038 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2039 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2040 quirks |= of_slot_quirks[idx].id;
2041
2042 return quirks;
2043}
2044
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002045/* find out bus-width for a given slot */
2046static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2047{
2048 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2049 u32 bus_wd = 1;
2050
2051 if (!np)
2052 return 1;
2053
2054 if (of_property_read_u32(np, "bus-width", &bus_wd))
2055 dev_err(dev, "bus-width property not found, assuming width"
2056 " as 1\n");
2057 return bus_wd;
2058}
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002059
2060/* find the write protect gpio for a given slot; or -1 if none specified */
2061static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2062{
2063 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2064 int gpio;
2065
2066 if (!np)
2067 return -EINVAL;
2068
2069 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2070
2071 /* Having a missing entry is valid; return silently */
2072 if (!gpio_is_valid(gpio))
2073 return -EINVAL;
2074
2075 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2076 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2077 return -EINVAL;
2078 }
2079
2080 return gpio;
2081}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002082#else /* CONFIG_OF */
Doug Andersona70aaa62013-01-11 17:03:50 +00002083static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2084{
2085 return 0;
2086}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002087static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2088{
2089 return 1;
2090}
2091static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2092{
2093 return NULL;
2094}
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002095static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2096{
2097 return -EINVAL;
2098}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002099#endif /* CONFIG_OF */
2100
Jaehoon Chung36c179a2012-08-23 20:31:48 +09002101static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
Will Newtonf95f3852011-01-02 01:11:59 -05002102{
2103 struct mmc_host *mmc;
2104 struct dw_mci_slot *slot;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002105 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002106 int ctrl_id, ret;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002107 u32 freq[2];
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002108 u8 bus_width;
Will Newtonf95f3852011-01-02 01:11:59 -05002109
Thomas Abraham4a909202012-09-17 18:16:35 +00002110 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
Will Newtonf95f3852011-01-02 01:11:59 -05002111 if (!mmc)
2112 return -ENOMEM;
2113
2114 slot = mmc_priv(mmc);
2115 slot->id = id;
2116 slot->mmc = mmc;
2117 slot->host = host;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002118 host->slot[id] = slot;
Will Newtonf95f3852011-01-02 01:11:59 -05002119
Doug Andersona70aaa62013-01-11 17:03:50 +00002120 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2121
Will Newtonf95f3852011-01-02 01:11:59 -05002122 mmc->ops = &dw_mci_ops;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002123 if (of_property_read_u32_array(host->dev->of_node,
2124 "clock-freq-min-max", freq, 2)) {
2125 mmc->f_min = DW_MCI_FREQ_MIN;
2126 mmc->f_max = DW_MCI_FREQ_MAX;
2127 } else {
2128 mmc->f_min = freq[0];
2129 mmc->f_max = freq[1];
2130 }
Will Newtonf95f3852011-01-02 01:11:59 -05002131
2132 if (host->pdata->get_ocr)
2133 mmc->ocr_avail = host->pdata->get_ocr(id);
2134 else
2135 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2136
2137 /*
2138 * Start with slot power disabled, it will be enabled when a card
2139 * is detected.
2140 */
2141 if (host->pdata->setpower)
2142 host->pdata->setpower(id, 0);
2143
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002144 if (host->pdata->caps)
2145 mmc->caps = host->pdata->caps;
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002146
Abhilash Kesavanab269122012-11-19 10:26:21 +05302147 if (host->pdata->pm_caps)
2148 mmc->pm_caps = host->pdata->pm_caps;
2149
Thomas Abraham800d78b2012-09-17 18:16:42 +00002150 if (host->dev->of_node) {
2151 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2152 if (ctrl_id < 0)
2153 ctrl_id = 0;
2154 } else {
2155 ctrl_id = to_platform_device(host->dev)->id;
2156 }
James Hogancb27a842012-10-16 09:43:08 +01002157 if (drv_data && drv_data->caps)
2158 mmc->caps |= drv_data->caps[ctrl_id];
Thomas Abraham800d78b2012-09-17 18:16:42 +00002159
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002160 if (host->pdata->caps2)
2161 mmc->caps2 = host->pdata->caps2;
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002162
Will Newtonf95f3852011-01-02 01:11:59 -05002163 if (host->pdata->get_bus_wd)
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002164 bus_width = host->pdata->get_bus_wd(slot->id);
2165 else if (host->dev->of_node)
2166 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2167 else
2168 bus_width = 1;
2169
2170 switch (bus_width) {
2171 case 8:
2172 mmc->caps |= MMC_CAP_8_BIT_DATA;
2173 case 4:
2174 mmc->caps |= MMC_CAP_4_BIT_DATA;
2175 }
Will Newtonf95f3852011-01-02 01:11:59 -05002176
Will Newtonf95f3852011-01-02 01:11:59 -05002177 if (host->pdata->blk_settings) {
2178 mmc->max_segs = host->pdata->blk_settings->max_segs;
2179 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2180 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2181 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2182 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2183 } else {
2184 /* Useful defaults if platform data is unset. */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002185#ifdef CONFIG_MMC_DW_IDMAC
2186 mmc->max_segs = host->ring_size;
2187 mmc->max_blk_size = 65536;
2188 mmc->max_blk_count = host->ring_size;
2189 mmc->max_seg_size = 0x1000;
2190 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2191#else
Will Newtonf95f3852011-01-02 01:11:59 -05002192 mmc->max_segs = 64;
2193 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2194 mmc->max_blk_count = 512;
2195 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2196 mmc->max_seg_size = mmc->max_req_size;
Will Newtonf95f3852011-01-02 01:11:59 -05002197#endif /* CONFIG_MMC_DW_IDMAC */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002198 }
Will Newtonf95f3852011-01-02 01:11:59 -05002199
2200 if (dw_mci_get_cd(mmc))
2201 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2202 else
2203 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2204
Doug Anderson55a6ceb2013-01-11 17:03:53 +00002205 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2206
Jaehoon Chung0cea5292013-02-15 23:45:45 +09002207 ret = mmc_add_host(mmc);
2208 if (ret)
2209 goto err_setup_bus;
Will Newtonf95f3852011-01-02 01:11:59 -05002210
2211#if defined(CONFIG_DEBUG_FS)
2212 dw_mci_init_debugfs(slot);
2213#endif
2214
2215 /* Card initially undetected */
2216 slot->last_detect_state = 0;
2217
Will Newtonf95f3852011-01-02 01:11:59 -05002218 return 0;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002219
2220err_setup_bus:
2221 mmc_free_host(mmc);
2222 return -EINVAL;
Will Newtonf95f3852011-01-02 01:11:59 -05002223}
2224
2225static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2226{
2227 /* Shutdown detect IRQ */
2228 if (slot->host->pdata->exit)
2229 slot->host->pdata->exit(id);
2230
2231 /* Debugfs stuff is cleaned up by mmc core */
2232 mmc_remove_host(slot->mmc);
2233 slot->host->slot[id] = NULL;
2234 mmc_free_host(slot->mmc);
2235}
2236
2237static void dw_mci_init_dma(struct dw_mci *host)
2238{
2239 /* Alloc memory for sg translation */
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002240 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
Will Newtonf95f3852011-01-02 01:11:59 -05002241 &host->sg_dma, GFP_KERNEL);
2242 if (!host->sg_cpu) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002243 dev_err(host->dev, "%s: could not alloc DMA memory\n",
Will Newtonf95f3852011-01-02 01:11:59 -05002244 __func__);
2245 goto no_dma;
2246 }
2247
2248 /* Determine which DMA interface to use */
2249#ifdef CONFIG_MMC_DW_IDMAC
2250 host->dma_ops = &dw_mci_idmac_ops;
Seungwon Jeon00956ea2012-09-28 19:13:11 +09002251 dev_info(host->dev, "Using internal DMA controller.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002252#endif
2253
2254 if (!host->dma_ops)
2255 goto no_dma;
2256
Jaehoon Chunge1631f92012-04-18 15:42:31 +09002257 if (host->dma_ops->init && host->dma_ops->start &&
2258 host->dma_ops->stop && host->dma_ops->cleanup) {
Will Newtonf95f3852011-01-02 01:11:59 -05002259 if (host->dma_ops->init(host)) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002260 dev_err(host->dev, "%s: Unable to initialize "
Will Newtonf95f3852011-01-02 01:11:59 -05002261 "DMA Controller.\n", __func__);
2262 goto no_dma;
2263 }
2264 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00002265 dev_err(host->dev, "DMA initialization not found.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002266 goto no_dma;
2267 }
2268
2269 host->use_dma = 1;
2270 return;
2271
2272no_dma:
Thomas Abraham4a909202012-09-17 18:16:35 +00002273 dev_info(host->dev, "Using PIO mode.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002274 host->use_dma = 0;
2275 return;
2276}
2277
Seungwon Jeon31bff452013-08-31 00:14:23 +09002278static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
Will Newtonf95f3852011-01-02 01:11:59 -05002279{
2280 unsigned long timeout = jiffies + msecs_to_jiffies(500);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002281 u32 ctrl;
Will Newtonf95f3852011-01-02 01:11:59 -05002282
Seungwon Jeon31bff452013-08-31 00:14:23 +09002283 ctrl = mci_readl(host, CTRL);
2284 ctrl |= reset;
2285 mci_writel(host, CTRL, ctrl);
Will Newtonf95f3852011-01-02 01:11:59 -05002286
2287 /* wait till resets clear */
2288 do {
2289 ctrl = mci_readl(host, CTRL);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002290 if (!(ctrl & reset))
Will Newtonf95f3852011-01-02 01:11:59 -05002291 return true;
2292 } while (time_before(jiffies, timeout));
2293
Seungwon Jeon31bff452013-08-31 00:14:23 +09002294 dev_err(host->dev,
2295 "Timeout resetting block (ctrl reset %#x)\n",
2296 ctrl & reset);
Will Newtonf95f3852011-01-02 01:11:59 -05002297
2298 return false;
2299}
2300
Seungwon Jeon31bff452013-08-31 00:14:23 +09002301static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2302{
2303 /*
2304 * Reseting generates a block interrupt, hence setting
2305 * the scatter-gather pointer to NULL.
2306 */
2307 if (host->sg) {
2308 sg_miter_stop(&host->sg_miter);
2309 host->sg = NULL;
2310 }
2311
2312 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2313}
2314
2315static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2316{
2317 return dw_mci_ctrl_reset(host,
2318 SDMMC_CTRL_FIFO_RESET |
2319 SDMMC_CTRL_RESET |
2320 SDMMC_CTRL_DMA_RESET);
2321}
2322
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002323#ifdef CONFIG_OF
2324static struct dw_mci_of_quirks {
2325 char *quirk;
2326 int id;
2327} of_quirks[] = {
2328 {
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002329 .quirk = "broken-cd",
2330 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2331 },
2332};
2333
2334static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2335{
2336 struct dw_mci_board *pdata;
2337 struct device *dev = host->dev;
2338 struct device_node *np = dev->of_node;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002339 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002340 int idx, ret;
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002341 u32 clock_frequency;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002342
2343 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2344 if (!pdata) {
2345 dev_err(dev, "could not allocate memory for pdata\n");
2346 return ERR_PTR(-ENOMEM);
2347 }
2348
2349 /* find out number of slots supported */
2350 if (of_property_read_u32(dev->of_node, "num-slots",
2351 &pdata->num_slots)) {
2352 dev_info(dev, "num-slots property not found, "
2353 "assuming 1 slot is available\n");
2354 pdata->num_slots = 1;
2355 }
2356
2357 /* get quirks */
2358 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2359 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2360 pdata->quirks |= of_quirks[idx].id;
2361
2362 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2363 dev_info(dev, "fifo-depth property not found, using "
2364 "value of FIFOTH register as default\n");
2365
2366 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2367
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002368 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2369 pdata->bus_hz = clock_frequency;
2370
James Hogancb27a842012-10-16 09:43:08 +01002371 if (drv_data && drv_data->parse_dt) {
2372 ret = drv_data->parse_dt(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002373 if (ret)
2374 return ERR_PTR(ret);
2375 }
2376
Abhilash Kesavanab269122012-11-19 10:26:21 +05302377 if (of_find_property(np, "keep-power-in-suspend", NULL))
2378 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2379
2380 if (of_find_property(np, "enable-sdio-wakeup", NULL))
2381 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2382
Seungwon Jeon10b49842013-08-31 00:13:22 +09002383 if (of_find_property(np, "supports-highspeed", NULL))
2384 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2385
Seungwon Jeon5dd63f52013-08-31 00:13:09 +09002386 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2387 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2388
2389 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2390 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2391
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002392 return pdata;
2393}
2394
2395#else /* CONFIG_OF */
2396static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2397{
2398 return ERR_PTR(-EINVAL);
2399}
2400#endif /* CONFIG_OF */
2401
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302402int dw_mci_probe(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002403{
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002404 const struct dw_mci_drv_data *drv_data = host->drv_data;
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302405 int width, i, ret = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002406 u32 fifo_size;
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002407 int init_slots = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002408
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002409 if (!host->pdata) {
2410 host->pdata = dw_mci_parse_dt(host);
2411 if (IS_ERR(host->pdata)) {
2412 dev_err(host->dev, "platform data not available\n");
2413 return -EINVAL;
2414 }
Will Newtonf95f3852011-01-02 01:11:59 -05002415 }
2416
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302417 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002418 dev_err(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -05002419 "Platform data must supply select_slot function\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302420 return -ENODEV;
Will Newtonf95f3852011-01-02 01:11:59 -05002421 }
2422
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002423 host->biu_clk = devm_clk_get(host->dev, "biu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002424 if (IS_ERR(host->biu_clk)) {
2425 dev_dbg(host->dev, "biu clock not available\n");
2426 } else {
2427 ret = clk_prepare_enable(host->biu_clk);
2428 if (ret) {
2429 dev_err(host->dev, "failed to enable biu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002430 return ret;
2431 }
Will Newtonf95f3852011-01-02 01:11:59 -05002432 }
2433
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002434 host->ciu_clk = devm_clk_get(host->dev, "ciu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002435 if (IS_ERR(host->ciu_clk)) {
2436 dev_dbg(host->dev, "ciu clock not available\n");
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002437 host->bus_hz = host->pdata->bus_hz;
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002438 } else {
2439 ret = clk_prepare_enable(host->ciu_clk);
2440 if (ret) {
2441 dev_err(host->dev, "failed to enable ciu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002442 goto err_clk_biu;
2443 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002444
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002445 if (host->pdata->bus_hz) {
2446 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2447 if (ret)
2448 dev_warn(host->dev,
2449 "Unable to set bus rate to %ul\n",
2450 host->pdata->bus_hz);
2451 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002452 host->bus_hz = clk_get_rate(host->ciu_clk);
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002453 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002454
Yuvaraj Kumar C D002f0d52013-08-31 00:12:19 +09002455 if (drv_data && drv_data->init) {
2456 ret = drv_data->init(host);
2457 if (ret) {
2458 dev_err(host->dev,
2459 "implementation specific init failed\n");
2460 goto err_clk_ciu;
2461 }
2462 }
2463
James Hogancb27a842012-10-16 09:43:08 +01002464 if (drv_data && drv_data->setup_clock) {
2465 ret = drv_data->setup_clock(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002466 if (ret) {
2467 dev_err(host->dev,
2468 "implementation specific clock setup failed\n");
2469 goto err_clk_ciu;
2470 }
2471 }
2472
Mark Browna55d6ff2013-07-29 21:55:27 +01002473 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
Doug Anderson870556a2013-06-07 10:28:29 -07002474 if (IS_ERR(host->vmmc)) {
2475 ret = PTR_ERR(host->vmmc);
2476 if (ret == -EPROBE_DEFER)
2477 goto err_clk_ciu;
2478
2479 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2480 host->vmmc = NULL;
2481 } else {
2482 ret = regulator_enable(host->vmmc);
2483 if (ret) {
2484 if (ret != -EPROBE_DEFER)
2485 dev_err(host->dev,
2486 "regulator_enable fail: %d\n", ret);
2487 goto err_clk_ciu;
2488 }
2489 }
2490
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002491 if (!host->bus_hz) {
2492 dev_err(host->dev,
2493 "Platform data must supply bus speed\n");
2494 ret = -ENODEV;
Doug Anderson870556a2013-06-07 10:28:29 -07002495 goto err_regulator;
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002496 }
2497
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302498 host->quirks = host->pdata->quirks;
Will Newtonf95f3852011-01-02 01:11:59 -05002499
2500 spin_lock_init(&host->lock);
2501 INIT_LIST_HEAD(&host->queue);
2502
Will Newtonf95f3852011-01-02 01:11:59 -05002503 /*
2504 * Get the host data width - this assumes that HCON has been set with
2505 * the correct values.
2506 */
2507 i = (mci_readl(host, HCON) >> 7) & 0x7;
2508 if (!i) {
2509 host->push_data = dw_mci_push_data16;
2510 host->pull_data = dw_mci_pull_data16;
2511 width = 16;
2512 host->data_shift = 1;
2513 } else if (i == 2) {
2514 host->push_data = dw_mci_push_data64;
2515 host->pull_data = dw_mci_pull_data64;
2516 width = 64;
2517 host->data_shift = 3;
2518 } else {
2519 /* Check for a reserved value, and warn if it is */
2520 WARN((i != 1),
2521 "HCON reports a reserved host data width!\n"
2522 "Defaulting to 32-bit access.\n");
2523 host->push_data = dw_mci_push_data32;
2524 host->pull_data = dw_mci_pull_data32;
2525 width = 32;
2526 host->data_shift = 2;
2527 }
2528
2529 /* Reset all blocks */
Seungwon Jeon31bff452013-08-31 00:14:23 +09002530 if (!dw_mci_ctrl_all_reset(host))
Seungwon Jeon141a7122012-05-22 13:01:03 +09002531 return -ENODEV;
2532
2533 host->dma_ops = host->pdata->dma_ops;
2534 dw_mci_init_dma(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002535
2536 /* Clear the interrupts for the host controller */
2537 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2538 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2539
2540 /* Put in max timeout */
2541 mci_writel(host, TMOUT, 0xFFFFFFFF);
2542
2543 /*
2544 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2545 * Tx Mark = fifo_size / 2 DMA Size = 8
2546 */
James Hoganb86d8252011-06-24 13:57:18 +01002547 if (!host->pdata->fifo_depth) {
2548 /*
2549 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2550 * have been overwritten by the bootloader, just like we're
2551 * about to do, so if you know the value for your hardware, you
2552 * should put it in the platform data.
2553 */
2554 fifo_size = mci_readl(host, FIFOTH);
Jaehoon Chung8234e862012-01-11 09:28:21 +00002555 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
James Hoganb86d8252011-06-24 13:57:18 +01002556 } else {
2557 fifo_size = host->pdata->fifo_depth;
2558 }
2559 host->fifo_depth = fifo_size;
Seungwon Jeon52426892013-08-31 00:13:42 +09002560 host->fifoth_val =
2561 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002562 mci_writel(host, FIFOTH, host->fifoth_val);
Will Newtonf95f3852011-01-02 01:11:59 -05002563
2564 /* disable clock to CIU */
2565 mci_writel(host, CLKENA, 0);
2566 mci_writel(host, CLKSRC, 0);
2567
James Hogan63008762013-03-12 10:43:54 +00002568 /*
2569 * In 2.40a spec, Data offset is changed.
2570 * Need to check the version-id and set data-offset for DATA register.
2571 */
2572 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2573 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2574
2575 if (host->verid < DW_MMC_240A)
2576 host->data_offset = DATA_OFFSET;
2577 else
2578 host->data_offset = DATA_240A_OFFSET;
2579
Will Newtonf95f3852011-01-02 01:11:59 -05002580 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002581 host->card_workqueue = alloc_workqueue("dw-mci-card",
James Hogan1791b13e2011-06-24 13:55:55 +01002582 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
Wei Yongjunef7aef92013-04-19 09:25:45 +08002583 if (!host->card_workqueue) {
2584 ret = -ENOMEM;
James Hogan1791b13e2011-06-24 13:55:55 +01002585 goto err_dmaunmap;
Wei Yongjunef7aef92013-04-19 09:25:45 +08002586 }
James Hogan1791b13e2011-06-24 13:55:55 +01002587 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002588 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2589 host->irq_flags, "dw-mci", host);
Will Newtonf95f3852011-01-02 01:11:59 -05002590 if (ret)
James Hogan1791b13e2011-06-24 13:55:55 +01002591 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002592
Will Newtonf95f3852011-01-02 01:11:59 -05002593 if (host->pdata->num_slots)
2594 host->num_slots = host->pdata->num_slots;
2595 else
2596 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2597
Yuvaraj CD2da1d7f2012-10-08 14:29:51 +05302598 /*
2599 * Enable interrupts for command done, data over, data empty, card det,
2600 * receive ready and error such as transmit, receive timeout, crc error
2601 */
2602 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2603 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2604 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2605 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2606 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2607
2608 dev_info(host->dev, "DW MMC controller at irq %d, "
2609 "%d bit host data width, "
2610 "%u deep fifo\n",
2611 host->irq, width, fifo_size);
2612
Will Newtonf95f3852011-01-02 01:11:59 -05002613 /* We need at least one slot to succeed */
2614 for (i = 0; i < host->num_slots; i++) {
2615 ret = dw_mci_init_slot(host, i);
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002616 if (ret)
2617 dev_dbg(host->dev, "slot %d init failed\n", i);
2618 else
2619 init_slots++;
2620 }
2621
2622 if (init_slots) {
2623 dev_info(host->dev, "%d slots initialized\n", init_slots);
2624 } else {
2625 dev_dbg(host->dev, "attempted to initialize %d slots, "
2626 "but failed on all\n", host->num_slots);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002627 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002628 }
2629
Will Newtonf95f3852011-01-02 01:11:59 -05002630 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
Thomas Abraham4a909202012-09-17 18:16:35 +00002631 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002632
2633 return 0;
2634
James Hogan1791b13e2011-06-24 13:55:55 +01002635err_workqueue:
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002636 destroy_workqueue(host->card_workqueue);
James Hogan1791b13e2011-06-24 13:55:55 +01002637
Will Newtonf95f3852011-01-02 01:11:59 -05002638err_dmaunmap:
2639 if (host->use_dma && host->dma_ops->exit)
2640 host->dma_ops->exit(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002641
Doug Anderson870556a2013-06-07 10:28:29 -07002642err_regulator:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002643 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002644 regulator_disable(host->vmmc);
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002645
2646err_clk_ciu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002647 if (!IS_ERR(host->ciu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002648 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002649
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002650err_clk_biu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002651 if (!IS_ERR(host->biu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002652 clk_disable_unprepare(host->biu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002653
Will Newtonf95f3852011-01-02 01:11:59 -05002654 return ret;
2655}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302656EXPORT_SYMBOL(dw_mci_probe);
Will Newtonf95f3852011-01-02 01:11:59 -05002657
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302658void dw_mci_remove(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002659{
Will Newtonf95f3852011-01-02 01:11:59 -05002660 int i;
2661
2662 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2663 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2664
Will Newtonf95f3852011-01-02 01:11:59 -05002665 for (i = 0; i < host->num_slots; i++) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002666 dev_dbg(host->dev, "remove slot %d\n", i);
Will Newtonf95f3852011-01-02 01:11:59 -05002667 if (host->slot[i])
2668 dw_mci_cleanup_slot(host->slot[i], i);
2669 }
2670
2671 /* disable clock to CIU */
2672 mci_writel(host, CLKENA, 0);
2673 mci_writel(host, CLKSRC, 0);
2674
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002675 destroy_workqueue(host->card_workqueue);
Will Newtonf95f3852011-01-02 01:11:59 -05002676
2677 if (host->use_dma && host->dma_ops->exit)
2678 host->dma_ops->exit(host);
2679
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002680 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002681 regulator_disable(host->vmmc);
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002682
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002683 if (!IS_ERR(host->ciu_clk))
2684 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002685
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002686 if (!IS_ERR(host->biu_clk))
2687 clk_disable_unprepare(host->biu_clk);
Will Newtonf95f3852011-01-02 01:11:59 -05002688}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302689EXPORT_SYMBOL(dw_mci_remove);
2690
2691
Will Newtonf95f3852011-01-02 01:11:59 -05002692
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002693#ifdef CONFIG_PM_SLEEP
Will Newtonf95f3852011-01-02 01:11:59 -05002694/*
2695 * TODO: we should probably disable the clock to the card in the suspend path.
2696 */
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302697int dw_mci_suspend(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002698{
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002699 if (host->vmmc)
2700 regulator_disable(host->vmmc);
2701
Will Newtonf95f3852011-01-02 01:11:59 -05002702 return 0;
2703}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302704EXPORT_SYMBOL(dw_mci_suspend);
Will Newtonf95f3852011-01-02 01:11:59 -05002705
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302706int dw_mci_resume(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002707{
2708 int i, ret;
Will Newtonf95f3852011-01-02 01:11:59 -05002709
Sachin Kamatf2f942c2013-04-04 11:25:10 +05302710 if (host->vmmc) {
2711 ret = regulator_enable(host->vmmc);
2712 if (ret) {
2713 dev_err(host->dev,
2714 "failed to enable regulator: %d\n", ret);
2715 return ret;
2716 }
2717 }
Jaehoon Chung1d6c4e02011-05-11 15:52:39 +09002718
Seungwon Jeon31bff452013-08-31 00:14:23 +09002719 if (!dw_mci_ctrl_all_reset(host)) {
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002720 ret = -ENODEV;
2721 return ret;
2722 }
2723
Jonathan Kliegman3bfe6192012-06-14 13:31:55 -04002724 if (host->use_dma && host->dma_ops->init)
Seungwon Jeon141a7122012-05-22 13:01:03 +09002725 host->dma_ops->init(host);
2726
Seungwon Jeon52426892013-08-31 00:13:42 +09002727 /*
2728 * Restore the initial value at FIFOTH register
2729 * And Invalidate the prev_blksz with zero
2730 */
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002731 mci_writel(host, FIFOTH, host->fifoth_val);
Seungwon Jeon52426892013-08-31 00:13:42 +09002732 host->prev_blksz = 0;
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002733
Doug Anderson2eb29442013-08-31 00:11:49 +09002734 /* Put in max timeout */
2735 mci_writel(host, TMOUT, 0xFFFFFFFF);
2736
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002737 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2738 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2739 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2740 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2741 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2742
Will Newtonf95f3852011-01-02 01:11:59 -05002743 for (i = 0; i < host->num_slots; i++) {
2744 struct dw_mci_slot *slot = host->slot[i];
2745 if (!slot)
2746 continue;
Abhilash Kesavanab269122012-11-19 10:26:21 +05302747 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2748 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2749 dw_mci_setup_bus(slot, true);
2750 }
Will Newtonf95f3852011-01-02 01:11:59 -05002751 }
Will Newtonf95f3852011-01-02 01:11:59 -05002752 return 0;
2753}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302754EXPORT_SYMBOL(dw_mci_resume);
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002755#endif /* CONFIG_PM_SLEEP */
2756
Will Newtonf95f3852011-01-02 01:11:59 -05002757static int __init dw_mci_init(void)
2758{
Sachin Kamat8e1c4e42013-04-04 11:25:11 +05302759 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302760 return 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002761}
2762
2763static void __exit dw_mci_exit(void)
2764{
Will Newtonf95f3852011-01-02 01:11:59 -05002765}
2766
2767module_init(dw_mci_init);
2768module_exit(dw_mci_exit);
2769
2770MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2771MODULE_AUTHOR("NXP Semiconductor VietNam");
2772MODULE_AUTHOR("Imagination Technologies Ltd");
2773MODULE_LICENSE("GPL v2");