blob: 1ac227c603b7e13a687b631eff298ce16222707d [file] [log] [blame]
Will Newtonf95f3852011-01-02 01:11:59 -05001/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
Will Newtonf95f3852011-01-02 01:11:59 -050025#include <linux/seq_file.h>
26#include <linux/slab.h>
27#include <linux/stat.h>
28#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/mmc/host.h>
31#include <linux/mmc/mmc.h>
Seungwon Jeon90c21432013-08-31 00:14:05 +090032#include <linux/mmc/sdio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050033#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
Jaehoon Chungc07946a2011-02-25 11:08:14 +090035#include <linux/regulator/consumer.h>
James Hogan1791b13e2011-06-24 13:55:55 +010036#include <linux/workqueue.h>
Thomas Abrahamc91eab42012-09-17 18:16:40 +000037#include <linux/of.h>
Doug Anderson55a6ceb2013-01-11 17:03:53 +000038#include <linux/of_gpio.h>
Zhangfei Gaobf626e52014-01-09 22:35:10 +080039#include <linux/mmc/slot-gpio.h>
Will Newtonf95f3852011-01-02 01:11:59 -050040
41#include "dw_mmc.h"
42
43/* Common flag combinations */
Jaehoon Chung3f7eec62013-05-27 13:47:57 +090044#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
Will Newtonf95f3852011-01-02 01:11:59 -050045 SDMMC_INT_HTO | SDMMC_INT_SBE | \
46 SDMMC_INT_EBE)
47#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
48 SDMMC_INT_RESP_ERR)
49#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
50 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
51#define DW_MCI_SEND_STATUS 1
52#define DW_MCI_RECV_STATUS 2
53#define DW_MCI_DMA_THRESHOLD 16
54
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +090055#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
56#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
57
Will Newtonf95f3852011-01-02 01:11:59 -050058#ifdef CONFIG_MMC_DW_IDMAC
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +090059#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 SDMMC_IDMAC_INT_TI)
63
Will Newtonf95f3852011-01-02 01:11:59 -050064struct idmac_desc {
65 u32 des0; /* Control Descriptor */
66#define IDMAC_DES0_DIC BIT(1)
67#define IDMAC_DES0_LD BIT(2)
68#define IDMAC_DES0_FD BIT(3)
69#define IDMAC_DES0_CH BIT(4)
70#define IDMAC_DES0_ER BIT(5)
71#define IDMAC_DES0_CES BIT(30)
72#define IDMAC_DES0_OWN BIT(31)
73
74 u32 des1; /* Buffer sizes */
75#define IDMAC_SET_BUFFER1_SIZE(d, s) \
Shashidhar Hiremath9b7bbe12011-07-29 08:49:50 -040076 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
Will Newtonf95f3852011-01-02 01:11:59 -050077
78 u32 des2; /* buffer 1 physical address */
79
80 u32 des3; /* buffer 2 physical address */
81};
82#endif /* CONFIG_MMC_DW_IDMAC */
83
Seungwon Jeon0976f162013-08-31 00:12:42 +090084static const u8 tuning_blk_pattern_4bit[] = {
85 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
86 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
87 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
88 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
89 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
90 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
91 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
92 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
93};
Will Newtonf95f3852011-01-02 01:11:59 -050094
Seungwon Jeon0976f162013-08-31 00:12:42 +090095static const u8 tuning_blk_pattern_8bit[] = {
96 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
97 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
98 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
99 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
100 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
101 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
102 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
103 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
104 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
105 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
106 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
107 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
108 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
109 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
110 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
111 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
Will Newtonf95f3852011-01-02 01:11:59 -0500112};
113
Seungwon Jeon31bff452013-08-31 00:14:23 +0900114static inline bool dw_mci_fifo_reset(struct dw_mci *host);
115static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
116
Will Newtonf95f3852011-01-02 01:11:59 -0500117#if defined(CONFIG_DEBUG_FS)
118static int dw_mci_req_show(struct seq_file *s, void *v)
119{
120 struct dw_mci_slot *slot = s->private;
121 struct mmc_request *mrq;
122 struct mmc_command *cmd;
123 struct mmc_command *stop;
124 struct mmc_data *data;
125
126 /* Make sure we get a consistent snapshot */
127 spin_lock_bh(&slot->host->lock);
128 mrq = slot->mrq;
129
130 if (mrq) {
131 cmd = mrq->cmd;
132 data = mrq->data;
133 stop = mrq->stop;
134
135 if (cmd)
136 seq_printf(s,
137 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
138 cmd->opcode, cmd->arg, cmd->flags,
139 cmd->resp[0], cmd->resp[1], cmd->resp[2],
140 cmd->resp[2], cmd->error);
141 if (data)
142 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
143 data->bytes_xfered, data->blocks,
144 data->blksz, data->flags, data->error);
145 if (stop)
146 seq_printf(s,
147 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148 stop->opcode, stop->arg, stop->flags,
149 stop->resp[0], stop->resp[1], stop->resp[2],
150 stop->resp[2], stop->error);
151 }
152
153 spin_unlock_bh(&slot->host->lock);
154
155 return 0;
156}
157
158static int dw_mci_req_open(struct inode *inode, struct file *file)
159{
160 return single_open(file, dw_mci_req_show, inode->i_private);
161}
162
163static const struct file_operations dw_mci_req_fops = {
164 .owner = THIS_MODULE,
165 .open = dw_mci_req_open,
166 .read = seq_read,
167 .llseek = seq_lseek,
168 .release = single_release,
169};
170
171static int dw_mci_regs_show(struct seq_file *s, void *v)
172{
173 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
174 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
175 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
176 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
177 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
178 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
179
180 return 0;
181}
182
183static int dw_mci_regs_open(struct inode *inode, struct file *file)
184{
185 return single_open(file, dw_mci_regs_show, inode->i_private);
186}
187
188static const struct file_operations dw_mci_regs_fops = {
189 .owner = THIS_MODULE,
190 .open = dw_mci_regs_open,
191 .read = seq_read,
192 .llseek = seq_lseek,
193 .release = single_release,
194};
195
196static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
197{
198 struct mmc_host *mmc = slot->mmc;
199 struct dw_mci *host = slot->host;
200 struct dentry *root;
201 struct dentry *node;
202
203 root = mmc->debugfs_root;
204 if (!root)
205 return;
206
207 node = debugfs_create_file("regs", S_IRUSR, root, host,
208 &dw_mci_regs_fops);
209 if (!node)
210 goto err;
211
212 node = debugfs_create_file("req", S_IRUSR, root, slot,
213 &dw_mci_req_fops);
214 if (!node)
215 goto err;
216
217 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
218 if (!node)
219 goto err;
220
221 node = debugfs_create_x32("pending_events", S_IRUSR, root,
222 (u32 *)&host->pending_events);
223 if (!node)
224 goto err;
225
226 node = debugfs_create_x32("completed_events", S_IRUSR, root,
227 (u32 *)&host->completed_events);
228 if (!node)
229 goto err;
230
231 return;
232
233err:
234 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
235}
236#endif /* defined(CONFIG_DEBUG_FS) */
237
Will Newtonf95f3852011-01-02 01:11:59 -0500238static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
239{
240 struct mmc_data *data;
Thomas Abraham800d78b2012-09-17 18:16:42 +0000241 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000242 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Will Newtonf95f3852011-01-02 01:11:59 -0500243 u32 cmdr;
244 cmd->error = -EINPROGRESS;
245
246 cmdr = cmd->opcode;
247
Seungwon Jeon90c21432013-08-31 00:14:05 +0900248 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
249 cmd->opcode == MMC_GO_IDLE_STATE ||
250 cmd->opcode == MMC_GO_INACTIVE_STATE ||
251 (cmd->opcode == SD_IO_RW_DIRECT &&
252 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
Will Newtonf95f3852011-01-02 01:11:59 -0500253 cmdr |= SDMMC_CMD_STOP;
Jaehoon Chung4a1b27a2014-03-03 11:36:44 +0900254 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
255 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500256
257 if (cmd->flags & MMC_RSP_PRESENT) {
258 /* We expect a response, so set this bit */
259 cmdr |= SDMMC_CMD_RESP_EXP;
260 if (cmd->flags & MMC_RSP_136)
261 cmdr |= SDMMC_CMD_RESP_LONG;
262 }
263
264 if (cmd->flags & MMC_RSP_CRC)
265 cmdr |= SDMMC_CMD_RESP_CRC;
266
267 data = cmd->data;
268 if (data) {
269 cmdr |= SDMMC_CMD_DAT_EXP;
270 if (data->flags & MMC_DATA_STREAM)
271 cmdr |= SDMMC_CMD_STRM_MODE;
272 if (data->flags & MMC_DATA_WRITE)
273 cmdr |= SDMMC_CMD_DAT_WR;
274 }
275
James Hogancb27a842012-10-16 09:43:08 +0100276 if (drv_data && drv_data->prepare_command)
277 drv_data->prepare_command(slot->host, &cmdr);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000278
Will Newtonf95f3852011-01-02 01:11:59 -0500279 return cmdr;
280}
281
Seungwon Jeon90c21432013-08-31 00:14:05 +0900282static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
283{
284 struct mmc_command *stop;
285 u32 cmdr;
286
287 if (!cmd->data)
288 return 0;
289
290 stop = &host->stop_abort;
291 cmdr = cmd->opcode;
292 memset(stop, 0, sizeof(struct mmc_command));
293
294 if (cmdr == MMC_READ_SINGLE_BLOCK ||
295 cmdr == MMC_READ_MULTIPLE_BLOCK ||
296 cmdr == MMC_WRITE_BLOCK ||
297 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
298 stop->opcode = MMC_STOP_TRANSMISSION;
299 stop->arg = 0;
300 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
301 } else if (cmdr == SD_IO_RW_EXTENDED) {
302 stop->opcode = SD_IO_RW_DIRECT;
303 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
304 ((cmd->arg >> 28) & 0x7);
305 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
306 } else {
307 return 0;
308 }
309
310 cmdr = stop->opcode | SDMMC_CMD_STOP |
311 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
312
313 return cmdr;
314}
315
Will Newtonf95f3852011-01-02 01:11:59 -0500316static void dw_mci_start_command(struct dw_mci *host,
317 struct mmc_command *cmd, u32 cmd_flags)
318{
319 host->cmd = cmd;
Thomas Abraham4a909202012-09-17 18:16:35 +0000320 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500321 "start command: ARGR=0x%08x CMDR=0x%08x\n",
322 cmd->arg, cmd_flags);
323
324 mci_writel(host, CMDARG, cmd->arg);
325 wmb();
326
327 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
328}
329
Seungwon Jeon90c21432013-08-31 00:14:05 +0900330static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
Will Newtonf95f3852011-01-02 01:11:59 -0500331{
Seungwon Jeon90c21432013-08-31 00:14:05 +0900332 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
333 dw_mci_start_command(host, stop, host->stop_cmdr);
Will Newtonf95f3852011-01-02 01:11:59 -0500334}
335
336/* DMA interface functions */
337static void dw_mci_stop_dma(struct dw_mci *host)
338{
James Hogan03e8cb52011-06-29 09:28:43 +0100339 if (host->using_dma) {
Will Newtonf95f3852011-01-02 01:11:59 -0500340 host->dma_ops->stop(host);
341 host->dma_ops->cleanup(host);
Will Newtonf95f3852011-01-02 01:11:59 -0500342 }
Seungwon Jeonaa50f252013-08-31 00:14:38 +0900343
344 /* Data transfer was stopped by the interrupt handler */
345 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -0500346}
347
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900348static int dw_mci_get_dma_dir(struct mmc_data *data)
349{
350 if (data->flags & MMC_DATA_WRITE)
351 return DMA_TO_DEVICE;
352 else
353 return DMA_FROM_DEVICE;
354}
355
Jaehoon Chung9beee912012-02-16 11:19:38 +0900356#ifdef CONFIG_MMC_DW_IDMAC
Will Newtonf95f3852011-01-02 01:11:59 -0500357static void dw_mci_dma_cleanup(struct dw_mci *host)
358{
359 struct mmc_data *data = host->data;
360
361 if (data)
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900362 if (!data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000363 dma_unmap_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900364 data->sg,
365 data->sg_len,
366 dw_mci_get_dma_dir(data));
Will Newtonf95f3852011-01-02 01:11:59 -0500367}
368
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900369static void dw_mci_idmac_reset(struct dw_mci *host)
370{
371 u32 bmod = mci_readl(host, BMOD);
372 /* Software reset of DMA */
373 bmod |= SDMMC_IDMAC_SWRESET;
374 mci_writel(host, BMOD, bmod);
375}
376
Will Newtonf95f3852011-01-02 01:11:59 -0500377static void dw_mci_idmac_stop_dma(struct dw_mci *host)
378{
379 u32 temp;
380
381 /* Disable and reset the IDMAC interface */
382 temp = mci_readl(host, CTRL);
383 temp &= ~SDMMC_CTRL_USE_IDMAC;
384 temp |= SDMMC_CTRL_DMA_RESET;
385 mci_writel(host, CTRL, temp);
386
387 /* Stop the IDMAC running */
388 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900389 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900390 temp |= SDMMC_IDMAC_SWRESET;
Will Newtonf95f3852011-01-02 01:11:59 -0500391 mci_writel(host, BMOD, temp);
392}
393
394static void dw_mci_idmac_complete_dma(struct dw_mci *host)
395{
396 struct mmc_data *data = host->data;
397
Thomas Abraham4a909202012-09-17 18:16:35 +0000398 dev_vdbg(host->dev, "DMA complete\n");
Will Newtonf95f3852011-01-02 01:11:59 -0500399
400 host->dma_ops->cleanup(host);
401
402 /*
403 * If the card was removed, data will be NULL. No point in trying to
404 * send the stop command or waiting for NBUSY in this case.
405 */
406 if (data) {
407 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
408 tasklet_schedule(&host->tasklet);
409 }
410}
411
412static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
413 unsigned int sg_len)
414{
415 int i;
416 struct idmac_desc *desc = host->sg_cpu;
417
418 for (i = 0; i < sg_len; i++, desc++) {
419 unsigned int length = sg_dma_len(&data->sg[i]);
420 u32 mem_addr = sg_dma_address(&data->sg[i]);
421
422 /* Set the OWN bit and disable interrupts for this descriptor */
423 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
424
425 /* Buffer length */
426 IDMAC_SET_BUFFER1_SIZE(desc, length);
427
428 /* Physical address to DMA to/from */
429 desc->des2 = mem_addr;
430 }
431
432 /* Set first descriptor */
433 desc = host->sg_cpu;
434 desc->des0 |= IDMAC_DES0_FD;
435
436 /* Set last descriptor */
437 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
438 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
439 desc->des0 |= IDMAC_DES0_LD;
440
441 wmb();
442}
443
444static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
445{
446 u32 temp;
447
448 dw_mci_translate_sglist(host, host->data, sg_len);
449
450 /* Select IDMAC interface */
451 temp = mci_readl(host, CTRL);
452 temp |= SDMMC_CTRL_USE_IDMAC;
453 mci_writel(host, CTRL, temp);
454
455 wmb();
456
457 /* Enable the IDMAC */
458 temp = mci_readl(host, BMOD);
Jaehoon Chunga5289a42011-02-25 11:08:13 +0900459 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
Will Newtonf95f3852011-01-02 01:11:59 -0500460 mci_writel(host, BMOD, temp);
461
462 /* Start it running */
463 mci_writel(host, PLDMND, 1);
464}
465
466static int dw_mci_idmac_init(struct dw_mci *host)
467{
468 struct idmac_desc *p;
Seungwon Jeon897b69e2012-09-19 13:58:31 +0800469 int i;
Will Newtonf95f3852011-01-02 01:11:59 -0500470
471 /* Number of descriptors in the ring buffer */
472 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
473
474 /* Forward link the descriptor list */
475 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
476 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
477
478 /* Set the last descriptor as the end-of-ring descriptor */
479 p->des3 = host->sg_dma;
480 p->des0 = IDMAC_DES0_ER;
481
Seungwon Jeon5ce9d962013-08-31 00:14:33 +0900482 dw_mci_idmac_reset(host);
Seungwon Jeon141a7122012-05-22 13:01:03 +0900483
Will Newtonf95f3852011-01-02 01:11:59 -0500484 /* Mask out interrupts - get Tx & Rx complete only */
Joonyoung Shimfc79a4d2013-04-26 15:35:22 +0900485 mci_writel(host, IDSTS, IDMAC_INT_CLR);
Will Newtonf95f3852011-01-02 01:11:59 -0500486 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
487 SDMMC_IDMAC_INT_TI);
488
489 /* Set the descriptor base address */
490 mci_writel(host, DBADDR, host->sg_dma);
491 return 0;
492}
493
Arnd Bergmann8e2b36e2012-11-06 22:55:31 +0100494static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
Seungwon Jeon885c3e82012-02-20 11:01:43 +0900495 .init = dw_mci_idmac_init,
496 .start = dw_mci_idmac_start_dma,
497 .stop = dw_mci_idmac_stop_dma,
498 .complete = dw_mci_idmac_complete_dma,
499 .cleanup = dw_mci_dma_cleanup,
500};
501#endif /* CONFIG_MMC_DW_IDMAC */
502
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900503static int dw_mci_pre_dma_transfer(struct dw_mci *host,
504 struct mmc_data *data,
505 bool next)
Will Newtonf95f3852011-01-02 01:11:59 -0500506{
507 struct scatterlist *sg;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900508 unsigned int i, sg_len;
Will Newtonf95f3852011-01-02 01:11:59 -0500509
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900510 if (!next && data->host_cookie)
511 return data->host_cookie;
Will Newtonf95f3852011-01-02 01:11:59 -0500512
513 /*
514 * We don't do DMA on "complex" transfers, i.e. with
515 * non-word-aligned buffers or lengths. Also, we don't bother
516 * with all the DMA setup overhead for short transfers.
517 */
518 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
519 return -EINVAL;
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900520
Will Newtonf95f3852011-01-02 01:11:59 -0500521 if (data->blksz & 3)
522 return -EINVAL;
523
524 for_each_sg(data->sg, sg, data->sg_len, i) {
525 if (sg->offset & 3 || sg->length & 3)
526 return -EINVAL;
527 }
528
Thomas Abraham4a909202012-09-17 18:16:35 +0000529 sg_len = dma_map_sg(host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900530 data->sg,
531 data->sg_len,
532 dw_mci_get_dma_dir(data));
533 if (sg_len == 0)
534 return -EINVAL;
535
536 if (next)
537 data->host_cookie = sg_len;
538
539 return sg_len;
540}
541
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900542static void dw_mci_pre_req(struct mmc_host *mmc,
543 struct mmc_request *mrq,
544 bool is_first_req)
545{
546 struct dw_mci_slot *slot = mmc_priv(mmc);
547 struct mmc_data *data = mrq->data;
548
549 if (!slot->host->use_dma || !data)
550 return;
551
552 if (data->host_cookie) {
553 data->host_cookie = 0;
554 return;
555 }
556
557 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
558 data->host_cookie = 0;
559}
560
561static void dw_mci_post_req(struct mmc_host *mmc,
562 struct mmc_request *mrq,
563 int err)
564{
565 struct dw_mci_slot *slot = mmc_priv(mmc);
566 struct mmc_data *data = mrq->data;
567
568 if (!slot->host->use_dma || !data)
569 return;
570
571 if (data->host_cookie)
Thomas Abraham4a909202012-09-17 18:16:35 +0000572 dma_unmap_sg(slot->host->dev,
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900573 data->sg,
574 data->sg_len,
575 dw_mci_get_dma_dir(data));
576 data->host_cookie = 0;
577}
578
Seungwon Jeon524268992013-08-31 00:13:42 +0900579static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
580{
581#ifdef CONFIG_MMC_DW_IDMAC
582 unsigned int blksz = data->blksz;
583 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
584 u32 fifo_width = 1 << host->data_shift;
585 u32 blksz_depth = blksz / fifo_width, fifoth_val;
586 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
587 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
588
589 tx_wmark = (host->fifo_depth) / 2;
590 tx_wmark_invers = host->fifo_depth - tx_wmark;
591
592 /*
593 * MSIZE is '1',
594 * if blksz is not a multiple of the FIFO width
595 */
596 if (blksz % fifo_width) {
597 msize = 0;
598 rx_wmark = 1;
599 goto done;
600 }
601
602 do {
603 if (!((blksz_depth % mszs[idx]) ||
604 (tx_wmark_invers % mszs[idx]))) {
605 msize = idx;
606 rx_wmark = mszs[idx] - 1;
607 break;
608 }
609 } while (--idx > 0);
610 /*
611 * If idx is '0', it won't be tried
612 * Thus, initial values are uesed
613 */
614done:
615 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
616 mci_writel(host, FIFOTH, fifoth_val);
617#endif
618}
619
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900620static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
621{
622 unsigned int blksz = data->blksz;
623 u32 blksz_depth, fifo_depth;
624 u16 thld_size;
625
626 WARN_ON(!(data->flags & MMC_DATA_READ));
627
628 if (host->timing != MMC_TIMING_MMC_HS200 &&
629 host->timing != MMC_TIMING_UHS_SDR104)
630 goto disable;
631
632 blksz_depth = blksz / (1 << host->data_shift);
633 fifo_depth = host->fifo_depth;
634
635 if (blksz_depth > fifo_depth)
636 goto disable;
637
638 /*
639 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
640 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
641 * Currently just choose blksz.
642 */
643 thld_size = blksz;
644 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
645 return;
646
647disable:
648 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
649}
650
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900651static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
652{
653 int sg_len;
654 u32 temp;
655
656 host->using_dma = 0;
657
658 /* If we don't have a channel, we can't do DMA */
659 if (!host->use_dma)
660 return -ENODEV;
661
662 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900663 if (sg_len < 0) {
664 host->dma_ops->stop(host);
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900665 return sg_len;
Seungwon Jeona99aa9b2012-04-10 09:53:32 +0900666 }
Seungwon Jeon9aa51402012-02-06 16:55:07 +0900667
James Hogan03e8cb52011-06-29 09:28:43 +0100668 host->using_dma = 1;
669
Thomas Abraham4a909202012-09-17 18:16:35 +0000670 dev_vdbg(host->dev,
Will Newtonf95f3852011-01-02 01:11:59 -0500671 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
672 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
673 sg_len);
674
Seungwon Jeon524268992013-08-31 00:13:42 +0900675 /*
676 * Decide the MSIZE and RX/TX Watermark.
677 * If current block size is same with previous size,
678 * no need to update fifoth.
679 */
680 if (host->prev_blksz != data->blksz)
681 dw_mci_adjust_fifoth(host, data);
682
Will Newtonf95f3852011-01-02 01:11:59 -0500683 /* Enable the DMA interface */
684 temp = mci_readl(host, CTRL);
685 temp |= SDMMC_CTRL_DMA_ENABLE;
686 mci_writel(host, CTRL, temp);
687
688 /* Disable RX/TX IRQs, let DMA handle it */
689 temp = mci_readl(host, INTMASK);
690 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
691 mci_writel(host, INTMASK, temp);
692
693 host->dma_ops->start(host, sg_len);
694
695 return 0;
696}
697
698static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
699{
700 u32 temp;
701
702 data->error = -EINPROGRESS;
703
704 WARN_ON(host->data);
705 host->sg = NULL;
706 host->data = data;
707
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900708 if (data->flags & MMC_DATA_READ) {
James Hogan55c5efbc2011-06-29 09:29:58 +0100709 host->dir_status = DW_MCI_RECV_STATUS;
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900710 dw_mci_ctrl_rd_thld(host, data);
711 } else {
James Hogan55c5efbc2011-06-29 09:29:58 +0100712 host->dir_status = DW_MCI_SEND_STATUS;
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900713 }
James Hogan55c5efbc2011-06-29 09:29:58 +0100714
Will Newtonf95f3852011-01-02 01:11:59 -0500715 if (dw_mci_submit_data_dma(host, data)) {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +0900716 int flags = SG_MITER_ATOMIC;
717 if (host->data->flags & MMC_DATA_READ)
718 flags |= SG_MITER_TO_SG;
719 else
720 flags |= SG_MITER_FROM_SG;
721
722 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
Will Newtonf95f3852011-01-02 01:11:59 -0500723 host->sg = data->sg;
James Hogan34b664a2011-06-24 13:57:56 +0100724 host->part_buf_start = 0;
725 host->part_buf_count = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500726
James Hoganb40af3a2011-06-24 13:54:06 +0100727 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
Will Newtonf95f3852011-01-02 01:11:59 -0500728 temp = mci_readl(host, INTMASK);
729 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
730 mci_writel(host, INTMASK, temp);
731
732 temp = mci_readl(host, CTRL);
733 temp &= ~SDMMC_CTRL_DMA_ENABLE;
734 mci_writel(host, CTRL, temp);
Seungwon Jeon524268992013-08-31 00:13:42 +0900735
736 /*
737 * Use the initial fifoth_val for PIO mode.
738 * If next issued data may be transfered by DMA mode,
739 * prev_blksz should be invalidated.
740 */
741 mci_writel(host, FIFOTH, host->fifoth_val);
742 host->prev_blksz = 0;
743 } else {
744 /*
745 * Keep the current block size.
746 * It will be used to decide whether to update
747 * fifoth register next time.
748 */
749 host->prev_blksz = data->blksz;
Will Newtonf95f3852011-01-02 01:11:59 -0500750 }
751}
752
753static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
754{
755 struct dw_mci *host = slot->host;
756 unsigned long timeout = jiffies + msecs_to_jiffies(500);
757 unsigned int cmd_status = 0;
758
759 mci_writel(host, CMDARG, arg);
760 wmb();
761 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
762
763 while (time_before(jiffies, timeout)) {
764 cmd_status = mci_readl(host, CMD);
765 if (!(cmd_status & SDMMC_CMD_START))
766 return;
767 }
768 dev_err(&slot->mmc->class_dev,
769 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
770 cmd, arg, cmd_status);
771}
772
Abhilash Kesavanab269122012-11-19 10:26:21 +0530773static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
Will Newtonf95f3852011-01-02 01:11:59 -0500774{
775 struct dw_mci *host = slot->host;
Doug Andersonfdf492a2013-08-31 00:11:43 +0900776 unsigned int clock = slot->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500777 u32 div;
Doug Anderson9623b5b2012-07-25 08:33:17 -0700778 u32 clk_en_a;
Will Newtonf95f3852011-01-02 01:11:59 -0500779
Doug Andersonfdf492a2013-08-31 00:11:43 +0900780 if (!clock) {
781 mci_writel(host, CLKENA, 0);
782 mci_send_cmd(slot,
783 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
784 } else if (clock != host->current_speed || force_clkinit) {
785 div = host->bus_hz / clock;
786 if (host->bus_hz % clock && host->bus_hz > clock)
Will Newtonf95f3852011-01-02 01:11:59 -0500787 /*
788 * move the + 1 after the divide to prevent
789 * over-clocking the card.
790 */
Seungwon Jeone4199902012-05-22 13:01:21 +0900791 div += 1;
792
Doug Andersonfdf492a2013-08-31 00:11:43 +0900793 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500794
Doug Andersonfdf492a2013-08-31 00:11:43 +0900795 if ((clock << div) != slot->__clk_old || force_clkinit)
796 dev_info(&slot->mmc->class_dev,
797 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
798 slot->id, host->bus_hz, clock,
799 div ? ((host->bus_hz / div) >> 1) :
800 host->bus_hz, div);
Will Newtonf95f3852011-01-02 01:11:59 -0500801
802 /* disable clock */
803 mci_writel(host, CLKENA, 0);
804 mci_writel(host, CLKSRC, 0);
805
806 /* inform CIU */
807 mci_send_cmd(slot,
808 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
809
810 /* set clock to desired speed */
811 mci_writel(host, CLKDIV, div);
812
813 /* inform CIU */
814 mci_send_cmd(slot,
815 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
816
Doug Anderson9623b5b2012-07-25 08:33:17 -0700817 /* enable clock; only low power if no SDIO */
818 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
819 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
820 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
821 mci_writel(host, CLKENA, clk_en_a);
Will Newtonf95f3852011-01-02 01:11:59 -0500822
823 /* inform CIU */
824 mci_send_cmd(slot,
825 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
826
Doug Andersonfdf492a2013-08-31 00:11:43 +0900827 /* keep the clock with reflecting clock dividor */
828 slot->__clk_old = clock << div;
Will Newtonf95f3852011-01-02 01:11:59 -0500829 }
830
Doug Andersonfdf492a2013-08-31 00:11:43 +0900831 host->current_speed = clock;
832
Will Newtonf95f3852011-01-02 01:11:59 -0500833 /* Set the current slot bus width */
Seungwon Jeon1d56c452011-06-20 17:23:53 +0900834 mci_writel(host, CTYPE, (slot->ctype << slot->id));
Will Newtonf95f3852011-01-02 01:11:59 -0500835}
836
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900837static void __dw_mci_start_request(struct dw_mci *host,
838 struct dw_mci_slot *slot,
839 struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -0500840{
841 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -0500842 struct mmc_data *data;
843 u32 cmdflags;
844
845 mrq = slot->mrq;
Will Newtonf95f3852011-01-02 01:11:59 -0500846
Will Newtonf95f3852011-01-02 01:11:59 -0500847 host->cur_slot = slot;
848 host->mrq = mrq;
849
850 host->pending_events = 0;
851 host->completed_events = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +0900852 host->cmd_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500853 host->data_status = 0;
Seungwon Jeone352c812013-08-31 00:14:17 +0900854 host->dir_status = 0;
Will Newtonf95f3852011-01-02 01:11:59 -0500855
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900856 data = cmd->data;
Will Newtonf95f3852011-01-02 01:11:59 -0500857 if (data) {
Jaehoon Chungf16afa82014-03-03 11:36:45 +0900858 mci_writel(host, TMOUT, 0xFFFFFFFF);
Will Newtonf95f3852011-01-02 01:11:59 -0500859 mci_writel(host, BYTCNT, data->blksz*data->blocks);
860 mci_writel(host, BLKSIZ, data->blksz);
861 }
862
Will Newtonf95f3852011-01-02 01:11:59 -0500863 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
864
865 /* this is the first command, send the initialization clock */
866 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
867 cmdflags |= SDMMC_CMD_INIT;
868
869 if (data) {
870 dw_mci_submit_data(host, data);
871 wmb();
872 }
873
874 dw_mci_start_command(host, cmd, cmdflags);
875
876 if (mrq->stop)
877 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +0900878 else
879 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
Will Newtonf95f3852011-01-02 01:11:59 -0500880}
881
Seungwon Jeon053b3ce2011-12-22 18:01:29 +0900882static void dw_mci_start_request(struct dw_mci *host,
883 struct dw_mci_slot *slot)
884{
885 struct mmc_request *mrq = slot->mrq;
886 struct mmc_command *cmd;
887
888 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
889 __dw_mci_start_request(host, slot, cmd);
890}
891
James Hogan7456caa2011-06-24 13:55:10 +0100892/* must be called with host->lock held */
Will Newtonf95f3852011-01-02 01:11:59 -0500893static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
894 struct mmc_request *mrq)
895{
896 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
897 host->state);
898
Will Newtonf95f3852011-01-02 01:11:59 -0500899 slot->mrq = mrq;
900
901 if (host->state == STATE_IDLE) {
902 host->state = STATE_SENDING_CMD;
903 dw_mci_start_request(host, slot);
904 } else {
905 list_add_tail(&slot->queue_node, &host->queue);
906 }
Will Newtonf95f3852011-01-02 01:11:59 -0500907}
908
909static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
910{
911 struct dw_mci_slot *slot = mmc_priv(mmc);
912 struct dw_mci *host = slot->host;
913
914 WARN_ON(slot->mrq);
915
James Hogan7456caa2011-06-24 13:55:10 +0100916 /*
917 * The check for card presence and queueing of the request must be
918 * atomic, otherwise the card could be removed in between and the
919 * request wouldn't fail until another card was inserted.
920 */
921 spin_lock_bh(&host->lock);
922
Will Newtonf95f3852011-01-02 01:11:59 -0500923 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
James Hogan7456caa2011-06-24 13:55:10 +0100924 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500925 mrq->cmd->error = -ENOMEDIUM;
926 mmc_request_done(mmc, mrq);
927 return;
928 }
929
Will Newtonf95f3852011-01-02 01:11:59 -0500930 dw_mci_queue_request(host, slot, mrq);
James Hogan7456caa2011-06-24 13:55:10 +0100931
932 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -0500933}
934
935static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
936{
937 struct dw_mci_slot *slot = mmc_priv(mmc);
Arnd Bergmanne95baf12012-11-08 14:26:11 +0000938 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900939 u32 regs;
Will Newtonf95f3852011-01-02 01:11:59 -0500940
Will Newtonf95f3852011-01-02 01:11:59 -0500941 switch (ios->bus_width) {
Will Newtonf95f3852011-01-02 01:11:59 -0500942 case MMC_BUS_WIDTH_4:
943 slot->ctype = SDMMC_CTYPE_4BIT;
944 break;
Jaehoon Chungc9b2a062011-02-17 16:12:38 +0900945 case MMC_BUS_WIDTH_8:
946 slot->ctype = SDMMC_CTYPE_8BIT;
947 break;
Jaehoon Chungb2f7cb42012-11-08 17:35:31 +0900948 default:
949 /* set default 1 bit mode */
950 slot->ctype = SDMMC_CTYPE_1BIT;
Will Newtonf95f3852011-01-02 01:11:59 -0500951 }
952
Seungwon Jeon3f514292012-01-02 16:00:02 +0900953 regs = mci_readl(slot->host, UHS_REG);
954
Jaehoon Chung41babf72011-02-24 13:46:11 +0900955 /* DDR mode set */
Seungwon Jeoncab3a802014-03-14 21:12:43 +0900956 if (ios->timing == MMC_TIMING_MMC_DDR52)
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900957 regs |= ((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900958 else
Hyeonsu Kimc69042a2013-02-22 09:32:46 +0900959 regs &= ~((0x1 << slot->id) << 16);
Seungwon Jeon3f514292012-01-02 16:00:02 +0900960
961 mci_writel(slot->host, UHS_REG, regs);
Seungwon Jeonf1d27362013-08-31 00:13:55 +0900962 slot->host->timing = ios->timing;
Jaehoon Chung41babf72011-02-24 13:46:11 +0900963
Doug Andersonfdf492a2013-08-31 00:11:43 +0900964 /*
965 * Use mirror of ios->clock to prevent race with mmc
966 * core ios update when finding the minimum.
967 */
968 slot->clock = ios->clock;
Will Newtonf95f3852011-01-02 01:11:59 -0500969
James Hogancb27a842012-10-16 09:43:08 +0100970 if (drv_data && drv_data->set_ios)
971 drv_data->set_ios(slot->host, ios);
Thomas Abraham800d78b2012-09-17 18:16:42 +0000972
Jaehoon Chungbf7cb222012-11-08 17:35:29 +0900973 /* Slot specific timing and width adjustment */
974 dw_mci_setup_bus(slot, false);
975
Will Newtonf95f3852011-01-02 01:11:59 -0500976 switch (ios->power_mode) {
977 case MMC_POWER_UP:
978 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900979 regs = mci_readl(slot->host, PWREN);
980 regs |= (1 << slot->id);
981 mci_writel(slot->host, PWREN, regs);
James Hogane6f34e22013-03-12 10:43:32 +0000982 break;
983 case MMC_POWER_OFF:
Jaehoon Chung4366dcc2013-03-26 21:36:14 +0900984 regs = mci_readl(slot->host, PWREN);
985 regs &= ~(1 << slot->id);
986 mci_writel(slot->host, PWREN, regs);
Will Newtonf95f3852011-01-02 01:11:59 -0500987 break;
988 default:
989 break;
990 }
991}
992
993static int dw_mci_get_ro(struct mmc_host *mmc)
994{
995 int read_only;
996 struct dw_mci_slot *slot = mmc_priv(mmc);
Jaehoon Chung9795a842014-03-03 11:36:46 +0900997 int gpio_ro = mmc_gpio_get_ro(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -0500998
999 /* Use platform get_ro function, else try on board write protect */
Doug Anderson96406392013-01-11 17:03:54 +00001000 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
Thomas Abrahamb4967aa2012-09-17 18:16:39 +00001001 read_only = 0;
Jaehoon Chung9795a842014-03-03 11:36:46 +09001002 else if (!IS_ERR_VALUE(gpio_ro))
1003 read_only = gpio_ro;
Will Newtonf95f3852011-01-02 01:11:59 -05001004 else
1005 read_only =
1006 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1007
1008 dev_dbg(&mmc->class_dev, "card is %s\n",
1009 read_only ? "read-only" : "read-write");
1010
1011 return read_only;
1012}
1013
1014static int dw_mci_get_cd(struct mmc_host *mmc)
1015{
1016 int present;
1017 struct dw_mci_slot *slot = mmc_priv(mmc);
1018 struct dw_mci_board *brd = slot->host->pdata;
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001019 struct dw_mci *host = slot->host;
1020 int gpio_cd = mmc_gpio_get_cd(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -05001021
1022 /* Use platform get_cd function, else try onboard card detect */
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09001023 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1024 present = 1;
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001025 else if (!IS_ERR_VALUE(gpio_cd))
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001026 present = gpio_cd;
Will Newtonf95f3852011-01-02 01:11:59 -05001027 else
1028 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1029 == 0 ? 1 : 0;
1030
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001031 spin_lock_bh(&host->lock);
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001032 if (present) {
1033 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001034 dev_dbg(&mmc->class_dev, "card is present\n");
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001035 } else {
1036 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
Will Newtonf95f3852011-01-02 01:11:59 -05001037 dev_dbg(&mmc->class_dev, "card is not present\n");
Zhangfei Gaobf626e52014-01-09 22:35:10 +08001038 }
Zhangfei Gao7cf347b2014-01-16 20:48:47 +08001039 spin_unlock_bh(&host->lock);
Will Newtonf95f3852011-01-02 01:11:59 -05001040
1041 return present;
1042}
1043
Doug Anderson9623b5b2012-07-25 08:33:17 -07001044/*
1045 * Disable lower power mode.
1046 *
1047 * Low power mode will stop the card clock when idle. According to the
1048 * description of the CLKENA register we should disable low power mode
1049 * for SDIO cards if we need SDIO interrupts to work.
1050 *
1051 * This function is fast if low power mode is already disabled.
1052 */
1053static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1054{
1055 struct dw_mci *host = slot->host;
1056 u32 clk_en_a;
1057 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1058
1059 clk_en_a = mci_readl(host, CLKENA);
1060
1061 if (clk_en_a & clken_low_pwr) {
1062 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1063 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1064 SDMMC_CMD_PRV_DAT_WAIT, 0);
1065 }
1066}
1067
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301068static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1069{
1070 struct dw_mci_slot *slot = mmc_priv(mmc);
1071 struct dw_mci *host = slot->host;
1072 u32 int_mask;
1073
1074 /* Enable/disable Slot Specific SDIO interrupt */
1075 int_mask = mci_readl(host, INTMASK);
1076 if (enb) {
Doug Anderson9623b5b2012-07-25 08:33:17 -07001077 /*
1078 * Turn off low power mode if it was enabled. This is a bit of
1079 * a heavy operation and we disable / enable IRQs a lot, so
1080 * we'll leave low power mode disabled and it will get
1081 * re-enabled again in dw_mci_setup_bus().
1082 */
1083 dw_mci_disable_low_power(slot);
1084
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301085 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +09001086 (int_mask | SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301087 } else {
1088 mci_writel(host, INTMASK,
Kyoungil Kim705ad042012-05-14 17:38:48 +09001089 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301090 }
1091}
1092
Seungwon Jeon0976f162013-08-31 00:12:42 +09001093static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1094{
1095 struct dw_mci_slot *slot = mmc_priv(mmc);
1096 struct dw_mci *host = slot->host;
1097 const struct dw_mci_drv_data *drv_data = host->drv_data;
1098 struct dw_mci_tuning_data tuning_data;
1099 int err = -ENOSYS;
1100
1101 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1102 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1103 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1104 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1105 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1106 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1107 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1108 } else {
1109 return -EINVAL;
1110 }
1111 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1112 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1113 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1114 } else {
1115 dev_err(host->dev,
1116 "Undefined command(%d) for tuning\n", opcode);
1117 return -EINVAL;
1118 }
1119
1120 if (drv_data && drv_data->execute_tuning)
1121 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1122 return err;
1123}
1124
Will Newtonf95f3852011-01-02 01:11:59 -05001125static const struct mmc_host_ops dw_mci_ops = {
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301126 .request = dw_mci_request,
Seungwon Jeon9aa51402012-02-06 16:55:07 +09001127 .pre_req = dw_mci_pre_req,
1128 .post_req = dw_mci_post_req,
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301129 .set_ios = dw_mci_set_ios,
1130 .get_ro = dw_mci_get_ro,
1131 .get_cd = dw_mci_get_cd,
1132 .enable_sdio_irq = dw_mci_enable_sdio_irq,
Seungwon Jeon0976f162013-08-31 00:12:42 +09001133 .execute_tuning = dw_mci_execute_tuning,
Will Newtonf95f3852011-01-02 01:11:59 -05001134};
1135
1136static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1137 __releases(&host->lock)
1138 __acquires(&host->lock)
1139{
1140 struct dw_mci_slot *slot;
1141 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1142
1143 WARN_ON(host->cmd || host->data);
1144
1145 host->cur_slot->mrq = NULL;
1146 host->mrq = NULL;
1147 if (!list_empty(&host->queue)) {
1148 slot = list_entry(host->queue.next,
1149 struct dw_mci_slot, queue_node);
1150 list_del(&slot->queue_node);
Thomas Abraham4a909202012-09-17 18:16:35 +00001151 dev_vdbg(host->dev, "list not empty: %s is next\n",
Will Newtonf95f3852011-01-02 01:11:59 -05001152 mmc_hostname(slot->mmc));
1153 host->state = STATE_SENDING_CMD;
1154 dw_mci_start_request(host, slot);
1155 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00001156 dev_vdbg(host->dev, "list empty\n");
Will Newtonf95f3852011-01-02 01:11:59 -05001157 host->state = STATE_IDLE;
1158 }
1159
1160 spin_unlock(&host->lock);
1161 mmc_request_done(prev_mmc, mrq);
1162 spin_lock(&host->lock);
1163}
1164
Seungwon Jeone352c812013-08-31 00:14:17 +09001165static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
Will Newtonf95f3852011-01-02 01:11:59 -05001166{
1167 u32 status = host->cmd_status;
1168
1169 host->cmd_status = 0;
1170
1171 /* Read the response from the card (up to 16 bytes) */
1172 if (cmd->flags & MMC_RSP_PRESENT) {
1173 if (cmd->flags & MMC_RSP_136) {
1174 cmd->resp[3] = mci_readl(host, RESP0);
1175 cmd->resp[2] = mci_readl(host, RESP1);
1176 cmd->resp[1] = mci_readl(host, RESP2);
1177 cmd->resp[0] = mci_readl(host, RESP3);
1178 } else {
1179 cmd->resp[0] = mci_readl(host, RESP0);
1180 cmd->resp[1] = 0;
1181 cmd->resp[2] = 0;
1182 cmd->resp[3] = 0;
1183 }
1184 }
1185
1186 if (status & SDMMC_INT_RTO)
1187 cmd->error = -ETIMEDOUT;
1188 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1189 cmd->error = -EILSEQ;
1190 else if (status & SDMMC_INT_RESP_ERR)
1191 cmd->error = -EIO;
1192 else
1193 cmd->error = 0;
1194
1195 if (cmd->error) {
1196 /* newer ip versions need a delay between retries */
1197 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1198 mdelay(20);
Will Newtonf95f3852011-01-02 01:11:59 -05001199 }
Seungwon Jeone352c812013-08-31 00:14:17 +09001200
1201 return cmd->error;
1202}
1203
1204static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1205{
Seungwon Jeon31bff452013-08-31 00:14:23 +09001206 u32 status = host->data_status;
Seungwon Jeone352c812013-08-31 00:14:17 +09001207
1208 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1209 if (status & SDMMC_INT_DRTO) {
1210 data->error = -ETIMEDOUT;
1211 } else if (status & SDMMC_INT_DCRC) {
1212 data->error = -EILSEQ;
1213 } else if (status & SDMMC_INT_EBE) {
1214 if (host->dir_status ==
1215 DW_MCI_SEND_STATUS) {
1216 /*
1217 * No data CRC status was returned.
1218 * The number of bytes transferred
1219 * will be exaggerated in PIO mode.
1220 */
1221 data->bytes_xfered = 0;
1222 data->error = -ETIMEDOUT;
1223 } else if (host->dir_status ==
1224 DW_MCI_RECV_STATUS) {
1225 data->error = -EIO;
1226 }
1227 } else {
1228 /* SDMMC_INT_SBE is included */
1229 data->error = -EIO;
1230 }
1231
Doug Andersone6cc0122014-04-22 16:51:21 -07001232 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
Seungwon Jeone352c812013-08-31 00:14:17 +09001233
1234 /*
1235 * After an error, there may be data lingering
Seungwon Jeon31bff452013-08-31 00:14:23 +09001236 * in the FIFO
Seungwon Jeone352c812013-08-31 00:14:17 +09001237 */
Seungwon Jeon31bff452013-08-31 00:14:23 +09001238 dw_mci_fifo_reset(host);
Seungwon Jeone352c812013-08-31 00:14:17 +09001239 } else {
1240 data->bytes_xfered = data->blocks * data->blksz;
1241 data->error = 0;
1242 }
1243
1244 return data->error;
Will Newtonf95f3852011-01-02 01:11:59 -05001245}
1246
1247static void dw_mci_tasklet_func(unsigned long priv)
1248{
1249 struct dw_mci *host = (struct dw_mci *)priv;
1250 struct mmc_data *data;
1251 struct mmc_command *cmd;
Seungwon Jeone352c812013-08-31 00:14:17 +09001252 struct mmc_request *mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001253 enum dw_mci_state state;
1254 enum dw_mci_state prev_state;
Seungwon Jeone352c812013-08-31 00:14:17 +09001255 unsigned int err;
Will Newtonf95f3852011-01-02 01:11:59 -05001256
1257 spin_lock(&host->lock);
1258
1259 state = host->state;
1260 data = host->data;
Seungwon Jeone352c812013-08-31 00:14:17 +09001261 mrq = host->mrq;
Will Newtonf95f3852011-01-02 01:11:59 -05001262
1263 do {
1264 prev_state = state;
1265
1266 switch (state) {
1267 case STATE_IDLE:
1268 break;
1269
1270 case STATE_SENDING_CMD:
1271 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1272 &host->pending_events))
1273 break;
1274
1275 cmd = host->cmd;
1276 host->cmd = NULL;
1277 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001278 err = dw_mci_command_complete(host, cmd);
1279 if (cmd == mrq->sbc && !err) {
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001280 prev_state = state = STATE_SENDING_CMD;
1281 __dw_mci_start_request(host, host->cur_slot,
Seungwon Jeone352c812013-08-31 00:14:17 +09001282 mrq->cmd);
Seungwon Jeon053b3ce2011-12-22 18:01:29 +09001283 goto unlock;
1284 }
1285
Seungwon Jeone352c812013-08-31 00:14:17 +09001286 if (cmd->data && err) {
Seungwon Jeon71abb132013-08-31 00:13:59 +09001287 dw_mci_stop_dma(host);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001288 send_stop_abort(host, data);
1289 state = STATE_SENDING_STOP;
1290 break;
Seungwon Jeon71abb132013-08-31 00:13:59 +09001291 }
1292
Seungwon Jeone352c812013-08-31 00:14:17 +09001293 if (!cmd->data || err) {
1294 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05001295 goto unlock;
1296 }
1297
1298 prev_state = state = STATE_SENDING_DATA;
1299 /* fall through */
1300
1301 case STATE_SENDING_DATA:
1302 if (test_and_clear_bit(EVENT_DATA_ERROR,
1303 &host->pending_events)) {
1304 dw_mci_stop_dma(host);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001305 send_stop_abort(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001306 state = STATE_DATA_ERROR;
1307 break;
1308 }
1309
1310 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1311 &host->pending_events))
1312 break;
1313
1314 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1315 prev_state = state = STATE_DATA_BUSY;
1316 /* fall through */
1317
1318 case STATE_DATA_BUSY:
1319 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1320 &host->pending_events))
1321 break;
1322
1323 host->data = NULL;
1324 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
Seungwon Jeone352c812013-08-31 00:14:17 +09001325 err = dw_mci_data_complete(host, data);
Will Newtonf95f3852011-01-02 01:11:59 -05001326
Seungwon Jeone352c812013-08-31 00:14:17 +09001327 if (!err) {
1328 if (!data->stop || mrq->sbc) {
Sachin Kamat17c8bc82014-02-25 15:18:28 +05301329 if (mrq->sbc && data->stop)
Seungwon Jeone352c812013-08-31 00:14:17 +09001330 data->stop->error = 0;
1331 dw_mci_request_end(host, mrq);
1332 goto unlock;
Will Newtonf95f3852011-01-02 01:11:59 -05001333 }
Will Newtonf95f3852011-01-02 01:11:59 -05001334
Seungwon Jeon90c21432013-08-31 00:14:05 +09001335 /* stop command for open-ended transfer*/
Seungwon Jeone352c812013-08-31 00:14:17 +09001336 if (data->stop)
1337 send_stop_abort(host, data);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001338 }
Seungwon Jeone352c812013-08-31 00:14:17 +09001339
1340 /*
1341 * If err has non-zero,
1342 * stop-abort command has been already issued.
1343 */
1344 prev_state = state = STATE_SENDING_STOP;
1345
Will Newtonf95f3852011-01-02 01:11:59 -05001346 /* fall through */
1347
1348 case STATE_SENDING_STOP:
1349 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1350 &host->pending_events))
1351 break;
1352
Seungwon Jeon71abb132013-08-31 00:13:59 +09001353 /* CMD error in data command */
Seungwon Jeon31bff452013-08-31 00:14:23 +09001354 if (mrq->cmd->error && mrq->data)
1355 dw_mci_fifo_reset(host);
Seungwon Jeon71abb132013-08-31 00:13:59 +09001356
Will Newtonf95f3852011-01-02 01:11:59 -05001357 host->cmd = NULL;
Seungwon Jeon71abb132013-08-31 00:13:59 +09001358 host->data = NULL;
Seungwon Jeon90c21432013-08-31 00:14:05 +09001359
Seungwon Jeone352c812013-08-31 00:14:17 +09001360 if (mrq->stop)
1361 dw_mci_command_complete(host, mrq->stop);
Seungwon Jeon90c21432013-08-31 00:14:05 +09001362 else
1363 host->cmd_status = 0;
1364
Seungwon Jeone352c812013-08-31 00:14:17 +09001365 dw_mci_request_end(host, mrq);
Will Newtonf95f3852011-01-02 01:11:59 -05001366 goto unlock;
1367
1368 case STATE_DATA_ERROR:
1369 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1370 &host->pending_events))
1371 break;
1372
1373 state = STATE_DATA_BUSY;
1374 break;
1375 }
1376 } while (state != prev_state);
1377
1378 host->state = state;
1379unlock:
1380 spin_unlock(&host->lock);
1381
1382}
1383
James Hogan34b664a2011-06-24 13:57:56 +01001384/* push final bytes to part_buf, only use during push */
1385static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1386{
1387 memcpy((void *)&host->part_buf, buf, cnt);
1388 host->part_buf_count = cnt;
1389}
1390
1391/* append bytes to part_buf, only use during push */
1392static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1393{
1394 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1395 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1396 host->part_buf_count += cnt;
1397 return cnt;
1398}
1399
1400/* pull first bytes from part_buf, only use during pull */
1401static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1402{
1403 cnt = min(cnt, (int)host->part_buf_count);
1404 if (cnt) {
1405 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1406 cnt);
1407 host->part_buf_count -= cnt;
1408 host->part_buf_start += cnt;
1409 }
1410 return cnt;
1411}
1412
1413/* pull final bytes from the part_buf, assuming it's just been filled */
1414static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1415{
1416 memcpy(buf, &host->part_buf, cnt);
1417 host->part_buf_start = cnt;
1418 host->part_buf_count = (1 << host->data_shift) - cnt;
1419}
1420
Will Newtonf95f3852011-01-02 01:11:59 -05001421static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1422{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001423 struct mmc_data *data = host->data;
1424 int init_cnt = cnt;
1425
James Hogan34b664a2011-06-24 13:57:56 +01001426 /* try and push anything in the part_buf */
1427 if (unlikely(host->part_buf_count)) {
1428 int len = dw_mci_push_part_bytes(host, buf, cnt);
1429 buf += len;
1430 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001431 if (host->part_buf_count == 2) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001432 mci_writew(host, DATA(host->data_offset),
1433 host->part_buf16);
James Hogan34b664a2011-06-24 13:57:56 +01001434 host->part_buf_count = 0;
1435 }
1436 }
1437#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1438 if (unlikely((unsigned long)buf & 0x1)) {
1439 while (cnt >= 2) {
1440 u16 aligned_buf[64];
1441 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1442 int items = len >> 1;
1443 int i;
1444 /* memcpy from input buffer into aligned buffer */
1445 memcpy(aligned_buf, buf, len);
1446 buf += len;
1447 cnt -= len;
1448 /* push data from aligned buffer into fifo */
1449 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001450 mci_writew(host, DATA(host->data_offset),
1451 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001452 }
1453 } else
1454#endif
1455 {
1456 u16 *pdata = buf;
1457 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001458 mci_writew(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001459 buf = pdata;
1460 }
1461 /* put anything remaining in the part_buf */
1462 if (cnt) {
1463 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001464 /* Push data if we have reached the expected data length */
1465 if ((data->bytes_xfered + init_cnt) ==
1466 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001467 mci_writew(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001468 host->part_buf16);
Will Newtonf95f3852011-01-02 01:11:59 -05001469 }
1470}
1471
1472static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1473{
James Hogan34b664a2011-06-24 13:57:56 +01001474#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1475 if (unlikely((unsigned long)buf & 0x1)) {
1476 while (cnt >= 2) {
1477 /* pull data from fifo into aligned buffer */
1478 u16 aligned_buf[64];
1479 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1480 int items = len >> 1;
1481 int i;
1482 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001483 aligned_buf[i] = mci_readw(host,
1484 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001485 /* memcpy from aligned buffer into output buffer */
1486 memcpy(buf, aligned_buf, len);
1487 buf += len;
1488 cnt -= len;
1489 }
1490 } else
1491#endif
1492 {
1493 u16 *pdata = buf;
1494 for (; cnt >= 2; cnt -= 2)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001495 *pdata++ = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001496 buf = pdata;
1497 }
1498 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001499 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001500 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001501 }
1502}
1503
1504static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1505{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001506 struct mmc_data *data = host->data;
1507 int init_cnt = cnt;
1508
James Hogan34b664a2011-06-24 13:57:56 +01001509 /* try and push anything in the part_buf */
1510 if (unlikely(host->part_buf_count)) {
1511 int len = dw_mci_push_part_bytes(host, buf, cnt);
1512 buf += len;
1513 cnt -= len;
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001514 if (host->part_buf_count == 4) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001515 mci_writel(host, DATA(host->data_offset),
1516 host->part_buf32);
James Hogan34b664a2011-06-24 13:57:56 +01001517 host->part_buf_count = 0;
1518 }
1519 }
1520#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1521 if (unlikely((unsigned long)buf & 0x3)) {
1522 while (cnt >= 4) {
1523 u32 aligned_buf[32];
1524 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1525 int items = len >> 2;
1526 int i;
1527 /* memcpy from input buffer into aligned buffer */
1528 memcpy(aligned_buf, buf, len);
1529 buf += len;
1530 cnt -= len;
1531 /* push data from aligned buffer into fifo */
1532 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001533 mci_writel(host, DATA(host->data_offset),
1534 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001535 }
1536 } else
1537#endif
1538 {
1539 u32 *pdata = buf;
1540 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001541 mci_writel(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001542 buf = pdata;
1543 }
1544 /* put anything remaining in the part_buf */
1545 if (cnt) {
1546 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001547 /* Push data if we have reached the expected data length */
1548 if ((data->bytes_xfered + init_cnt) ==
1549 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001550 mci_writel(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001551 host->part_buf32);
Will Newtonf95f3852011-01-02 01:11:59 -05001552 }
1553}
1554
1555static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1556{
James Hogan34b664a2011-06-24 13:57:56 +01001557#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1558 if (unlikely((unsigned long)buf & 0x3)) {
1559 while (cnt >= 4) {
1560 /* pull data from fifo into aligned buffer */
1561 u32 aligned_buf[32];
1562 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1563 int items = len >> 2;
1564 int i;
1565 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001566 aligned_buf[i] = mci_readl(host,
1567 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001568 /* memcpy from aligned buffer into output buffer */
1569 memcpy(buf, aligned_buf, len);
1570 buf += len;
1571 cnt -= len;
1572 }
1573 } else
1574#endif
1575 {
1576 u32 *pdata = buf;
1577 for (; cnt >= 4; cnt -= 4)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001578 *pdata++ = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001579 buf = pdata;
1580 }
1581 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001582 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001583 dw_mci_pull_final_bytes(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001584 }
1585}
1586
1587static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1588{
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001589 struct mmc_data *data = host->data;
1590 int init_cnt = cnt;
1591
James Hogan34b664a2011-06-24 13:57:56 +01001592 /* try and push anything in the part_buf */
1593 if (unlikely(host->part_buf_count)) {
1594 int len = dw_mci_push_part_bytes(host, buf, cnt);
1595 buf += len;
1596 cnt -= len;
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001597
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001598 if (host->part_buf_count == 8) {
Seungwon Jeonc09fbd72013-03-25 16:28:22 +09001599 mci_writeq(host, DATA(host->data_offset),
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001600 host->part_buf);
James Hogan34b664a2011-06-24 13:57:56 +01001601 host->part_buf_count = 0;
1602 }
1603 }
1604#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1605 if (unlikely((unsigned long)buf & 0x7)) {
1606 while (cnt >= 8) {
1607 u64 aligned_buf[16];
1608 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1609 int items = len >> 3;
1610 int i;
1611 /* memcpy from input buffer into aligned buffer */
1612 memcpy(aligned_buf, buf, len);
1613 buf += len;
1614 cnt -= len;
1615 /* push data from aligned buffer into fifo */
1616 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001617 mci_writeq(host, DATA(host->data_offset),
1618 aligned_buf[i]);
James Hogan34b664a2011-06-24 13:57:56 +01001619 }
1620 } else
1621#endif
1622 {
1623 u64 *pdata = buf;
1624 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001625 mci_writeq(host, DATA(host->data_offset), *pdata++);
James Hogan34b664a2011-06-24 13:57:56 +01001626 buf = pdata;
1627 }
1628 /* put anything remaining in the part_buf */
1629 if (cnt) {
1630 dw_mci_set_part_bytes(host, buf, cnt);
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001631 /* Push data if we have reached the expected data length */
1632 if ((data->bytes_xfered + init_cnt) ==
1633 (data->blksz * data->blocks))
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001634 mci_writeq(host, DATA(host->data_offset),
Markos Chandrascfbeb59c2013-03-12 10:53:13 +00001635 host->part_buf);
Will Newtonf95f3852011-01-02 01:11:59 -05001636 }
1637}
1638
1639static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1640{
James Hogan34b664a2011-06-24 13:57:56 +01001641#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1642 if (unlikely((unsigned long)buf & 0x7)) {
1643 while (cnt >= 8) {
1644 /* pull data from fifo into aligned buffer */
1645 u64 aligned_buf[16];
1646 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1647 int items = len >> 3;
1648 int i;
1649 for (i = 0; i < items; ++i)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001650 aligned_buf[i] = mci_readq(host,
1651 DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001652 /* memcpy from aligned buffer into output buffer */
1653 memcpy(buf, aligned_buf, len);
1654 buf += len;
1655 cnt -= len;
1656 }
1657 } else
1658#endif
1659 {
1660 u64 *pdata = buf;
1661 for (; cnt >= 8; cnt -= 8)
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001662 *pdata++ = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001663 buf = pdata;
Will Newtonf95f3852011-01-02 01:11:59 -05001664 }
James Hogan34b664a2011-06-24 13:57:56 +01001665 if (cnt) {
Jaehoon Chung4e0a5ad2011-10-17 19:36:23 +09001666 host->part_buf = mci_readq(host, DATA(host->data_offset));
James Hogan34b664a2011-06-24 13:57:56 +01001667 dw_mci_pull_final_bytes(host, buf, cnt);
1668 }
1669}
1670
1671static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1672{
1673 int len;
1674
1675 /* get remaining partial bytes */
1676 len = dw_mci_pull_part_bytes(host, buf, cnt);
1677 if (unlikely(len == cnt))
1678 return;
1679 buf += len;
1680 cnt -= len;
1681
1682 /* get the rest of the data */
1683 host->pull_data(host, buf, cnt);
Will Newtonf95f3852011-01-02 01:11:59 -05001684}
1685
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001686static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
Will Newtonf95f3852011-01-02 01:11:59 -05001687{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001688 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1689 void *buf;
1690 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001691 struct mmc_data *data = host->data;
1692 int shift = host->data_shift;
1693 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001694 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001695 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001696
1697 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001698 if (!sg_miter_next(sg_miter))
1699 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001700
Imre Deak4225fc82013-02-27 17:02:57 -08001701 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001702 buf = sg_miter->addr;
1703 remain = sg_miter->length;
1704 offset = 0;
1705
1706 do {
1707 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1708 << shift) + host->part_buf_count;
1709 len = min(remain, fcnt);
1710 if (!len)
1711 break;
1712 dw_mci_pull_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001713 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001714 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001715 remain -= len;
1716 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001717
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001718 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001719 status = mci_readl(host, MINTSTS);
1720 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001721 /* if the RXDR is ready read again */
1722 } while ((status & SDMMC_INT_RXDR) ||
1723 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001724
1725 if (!remain) {
1726 if (!sg_miter_next(sg_miter))
1727 goto done;
1728 sg_miter->consumed = 0;
1729 }
1730 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001731 return;
1732
1733done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001734 sg_miter_stop(sg_miter);
1735 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001736 smp_wmb();
1737 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1738}
1739
1740static void dw_mci_write_data_pio(struct dw_mci *host)
1741{
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001742 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1743 void *buf;
1744 unsigned int offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001745 struct mmc_data *data = host->data;
1746 int shift = host->data_shift;
1747 u32 status;
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001748 unsigned int len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001749 unsigned int fifo_depth = host->fifo_depth;
1750 unsigned int remain, fcnt;
Will Newtonf95f3852011-01-02 01:11:59 -05001751
1752 do {
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001753 if (!sg_miter_next(sg_miter))
1754 goto done;
Will Newtonf95f3852011-01-02 01:11:59 -05001755
Imre Deak4225fc82013-02-27 17:02:57 -08001756 host->sg = sg_miter->piter.sg;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001757 buf = sg_miter->addr;
1758 remain = sg_miter->length;
1759 offset = 0;
1760
1761 do {
1762 fcnt = ((fifo_depth -
1763 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1764 << shift) - host->part_buf_count;
1765 len = min(remain, fcnt);
1766 if (!len)
1767 break;
1768 host->push_data(host, (void *)(buf + offset), len);
Markos Chandras3e4b0d82013-03-22 12:50:05 -04001769 data->bytes_xfered += len;
Will Newtonf95f3852011-01-02 01:11:59 -05001770 offset += len;
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001771 remain -= len;
1772 } while (remain);
Will Newtonf95f3852011-01-02 01:11:59 -05001773
Seungwon Jeone74f3a92012-08-01 09:30:46 +09001774 sg_miter->consumed = offset;
Will Newtonf95f3852011-01-02 01:11:59 -05001775 status = mci_readl(host, MINTSTS);
1776 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
Will Newtonf95f3852011-01-02 01:11:59 -05001777 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001778
1779 if (!remain) {
1780 if (!sg_miter_next(sg_miter))
1781 goto done;
1782 sg_miter->consumed = 0;
1783 }
1784 sg_miter_stop(sg_miter);
Will Newtonf95f3852011-01-02 01:11:59 -05001785 return;
1786
1787done:
Seungwon Jeonf9c2a0d2012-02-09 14:32:43 +09001788 sg_miter_stop(sg_miter);
1789 host->sg = NULL;
Will Newtonf95f3852011-01-02 01:11:59 -05001790 smp_wmb();
1791 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1792}
1793
1794static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1795{
1796 if (!host->cmd_status)
1797 host->cmd_status = status;
1798
1799 smp_wmb();
1800
1801 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1802 tasklet_schedule(&host->tasklet);
1803}
1804
1805static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1806{
1807 struct dw_mci *host = dev_id;
Seungwon Jeon182c9082012-08-01 09:30:30 +09001808 u32 pending;
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301809 int i;
Will Newtonf95f3852011-01-02 01:11:59 -05001810
Markos Chandras1fb5f682013-03-12 10:53:11 +00001811 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1812
Doug Anderson476d79f2013-07-09 13:04:40 -07001813 /*
1814 * DTO fix - version 2.10a and below, and only if internal DMA
1815 * is configured.
1816 */
1817 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1818 if (!pending &&
1819 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1820 pending |= SDMMC_INT_DATA_OVER;
1821 }
1822
Markos Chandras1fb5f682013-03-12 10:53:11 +00001823 if (pending) {
Will Newtonf95f3852011-01-02 01:11:59 -05001824 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1825 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001826 host->cmd_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001827 smp_wmb();
1828 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
Will Newtonf95f3852011-01-02 01:11:59 -05001829 }
1830
1831 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1832 /* if there is an error report DATA_ERROR */
1833 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001834 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001835 smp_wmb();
1836 set_bit(EVENT_DATA_ERROR, &host->pending_events);
Seungwon Jeon9b2026a2012-08-01 09:30:40 +09001837 tasklet_schedule(&host->tasklet);
Will Newtonf95f3852011-01-02 01:11:59 -05001838 }
1839
1840 if (pending & SDMMC_INT_DATA_OVER) {
1841 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1842 if (!host->data_status)
Seungwon Jeon182c9082012-08-01 09:30:30 +09001843 host->data_status = pending;
Will Newtonf95f3852011-01-02 01:11:59 -05001844 smp_wmb();
1845 if (host->dir_status == DW_MCI_RECV_STATUS) {
1846 if (host->sg != NULL)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001847 dw_mci_read_data_pio(host, true);
Will Newtonf95f3852011-01-02 01:11:59 -05001848 }
1849 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1850 tasklet_schedule(&host->tasklet);
1851 }
1852
1853 if (pending & SDMMC_INT_RXDR) {
1854 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001855 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
Kyoungil Kim87a74d32013-01-22 16:46:30 +09001856 dw_mci_read_data_pio(host, false);
Will Newtonf95f3852011-01-02 01:11:59 -05001857 }
1858
1859 if (pending & SDMMC_INT_TXDR) {
1860 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
James Hoganb40af3a2011-06-24 13:54:06 +01001861 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
Will Newtonf95f3852011-01-02 01:11:59 -05001862 dw_mci_write_data_pio(host);
1863 }
1864
1865 if (pending & SDMMC_INT_CMD_DONE) {
1866 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
Seungwon Jeon182c9082012-08-01 09:30:30 +09001867 dw_mci_cmd_interrupt(host, pending);
Will Newtonf95f3852011-01-02 01:11:59 -05001868 }
1869
1870 if (pending & SDMMC_INT_CD) {
1871 mci_writel(host, RINTSTS, SDMMC_INT_CD);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07001872 queue_work(host->card_workqueue, &host->card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001873 }
1874
Shashidhar Hiremath1a5c8e12011-08-29 13:11:46 +05301875 /* Handle SDIO Interrupts */
1876 for (i = 0; i < host->num_slots; i++) {
1877 struct dw_mci_slot *slot = host->slot[i];
1878 if (pending & SDMMC_INT_SDIO(i)) {
1879 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1880 mmc_signal_sdio_irq(slot->mmc);
1881 }
1882 }
1883
Markos Chandras1fb5f682013-03-12 10:53:11 +00001884 }
Will Newtonf95f3852011-01-02 01:11:59 -05001885
1886#ifdef CONFIG_MMC_DW_IDMAC
1887 /* Handle DMA interrupts */
1888 pending = mci_readl(host, IDSTS);
1889 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1890 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1891 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
Will Newtonf95f3852011-01-02 01:11:59 -05001892 host->dma_ops->complete(host);
1893 }
1894#endif
1895
1896 return IRQ_HANDLED;
1897}
1898
James Hogan1791b13e2011-06-24 13:55:55 +01001899static void dw_mci_work_routine_card(struct work_struct *work)
Will Newtonf95f3852011-01-02 01:11:59 -05001900{
James Hogan1791b13e2011-06-24 13:55:55 +01001901 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
Will Newtonf95f3852011-01-02 01:11:59 -05001902 int i;
1903
1904 for (i = 0; i < host->num_slots; i++) {
1905 struct dw_mci_slot *slot = host->slot[i];
1906 struct mmc_host *mmc = slot->mmc;
1907 struct mmc_request *mrq;
1908 int present;
Will Newtonf95f3852011-01-02 01:11:59 -05001909
1910 present = dw_mci_get_cd(mmc);
1911 while (present != slot->last_detect_state) {
Will Newtonf95f3852011-01-02 01:11:59 -05001912 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1913 present ? "inserted" : "removed");
1914
James Hogan1791b13e2011-06-24 13:55:55 +01001915 spin_lock_bh(&host->lock);
1916
Will Newtonf95f3852011-01-02 01:11:59 -05001917 /* Card change detected */
1918 slot->last_detect_state = present;
1919
Will Newtonf95f3852011-01-02 01:11:59 -05001920 /* Clean up queue if present */
1921 mrq = slot->mrq;
1922 if (mrq) {
1923 if (mrq == host->mrq) {
1924 host->data = NULL;
1925 host->cmd = NULL;
1926
1927 switch (host->state) {
1928 case STATE_IDLE:
1929 break;
1930 case STATE_SENDING_CMD:
1931 mrq->cmd->error = -ENOMEDIUM;
1932 if (!mrq->data)
1933 break;
1934 /* fall through */
1935 case STATE_SENDING_DATA:
1936 mrq->data->error = -ENOMEDIUM;
1937 dw_mci_stop_dma(host);
1938 break;
1939 case STATE_DATA_BUSY:
1940 case STATE_DATA_ERROR:
1941 if (mrq->data->error == -EINPROGRESS)
1942 mrq->data->error = -ENOMEDIUM;
Will Newtonf95f3852011-01-02 01:11:59 -05001943 /* fall through */
1944 case STATE_SENDING_STOP:
Seungwon Jeon90c21432013-08-31 00:14:05 +09001945 if (mrq->stop)
1946 mrq->stop->error = -ENOMEDIUM;
Will Newtonf95f3852011-01-02 01:11:59 -05001947 break;
1948 }
1949
1950 dw_mci_request_end(host, mrq);
1951 } else {
1952 list_del(&slot->queue_node);
1953 mrq->cmd->error = -ENOMEDIUM;
1954 if (mrq->data)
1955 mrq->data->error = -ENOMEDIUM;
1956 if (mrq->stop)
1957 mrq->stop->error = -ENOMEDIUM;
1958
1959 spin_unlock(&host->lock);
1960 mmc_request_done(slot->mmc, mrq);
1961 spin_lock(&host->lock);
1962 }
1963 }
1964
1965 /* Power down slot */
1966 if (present == 0) {
Seungwon Jeon31bff452013-08-31 00:14:23 +09001967 /* Clear down the FIFO */
1968 dw_mci_fifo_reset(host);
Will Newtonf95f3852011-01-02 01:11:59 -05001969#ifdef CONFIG_MMC_DW_IDMAC
Seungwon Jeon5ce9d962013-08-31 00:14:33 +09001970 dw_mci_idmac_reset(host);
Will Newtonf95f3852011-01-02 01:11:59 -05001971#endif
1972
1973 }
1974
James Hogan1791b13e2011-06-24 13:55:55 +01001975 spin_unlock_bh(&host->lock);
1976
Will Newtonf95f3852011-01-02 01:11:59 -05001977 present = dw_mci_get_cd(mmc);
1978 }
1979
1980 mmc_detect_change(slot->mmc,
1981 msecs_to_jiffies(host->pdata->detect_delay_ms));
1982 }
1983}
1984
Thomas Abrahamc91eab42012-09-17 18:16:40 +00001985#ifdef CONFIG_OF
1986/* given a slot id, find out the device node representing that slot */
1987static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1988{
1989 struct device_node *np;
1990 const __be32 *addr;
1991 int len;
1992
1993 if (!dev || !dev->of_node)
1994 return NULL;
1995
1996 for_each_child_of_node(dev->of_node, np) {
1997 addr = of_get_property(np, "reg", &len);
1998 if (!addr || (len < sizeof(int)))
1999 continue;
2000 if (be32_to_cpup(addr) == slot)
2001 return np;
2002 }
2003 return NULL;
2004}
2005
Doug Andersona70aaa62013-01-11 17:03:50 +00002006static struct dw_mci_of_slot_quirks {
2007 char *quirk;
2008 int id;
2009} of_slot_quirks[] = {
2010 {
2011 .quirk = "disable-wp",
2012 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2013 },
2014};
2015
2016static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2017{
2018 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2019 int quirks = 0;
2020 int idx;
2021
2022 /* get quirks */
2023 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2024 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2025 quirks |= of_slot_quirks[idx].id;
2026
2027 return quirks;
2028}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002029#else /* CONFIG_OF */
Doug Andersona70aaa62013-01-11 17:03:50 +00002030static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2031{
2032 return 0;
2033}
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002034static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2035{
2036 return NULL;
2037}
2038#endif /* CONFIG_OF */
2039
Jaehoon Chung36c179a2012-08-23 20:31:48 +09002040static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
Will Newtonf95f3852011-01-02 01:11:59 -05002041{
2042 struct mmc_host *mmc;
2043 struct dw_mci_slot *slot;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002044 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002045 int ctrl_id, ret;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002046 u32 freq[2];
Will Newtonf95f3852011-01-02 01:11:59 -05002047
Thomas Abraham4a909202012-09-17 18:16:35 +00002048 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
Will Newtonf95f3852011-01-02 01:11:59 -05002049 if (!mmc)
2050 return -ENOMEM;
2051
2052 slot = mmc_priv(mmc);
2053 slot->id = id;
2054 slot->mmc = mmc;
2055 slot->host = host;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002056 host->slot[id] = slot;
Will Newtonf95f3852011-01-02 01:11:59 -05002057
Doug Andersona70aaa62013-01-11 17:03:50 +00002058 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2059
Will Newtonf95f3852011-01-02 01:11:59 -05002060 mmc->ops = &dw_mci_ops;
Seungwon Jeon1f44a2a2013-08-31 00:13:31 +09002061 if (of_property_read_u32_array(host->dev->of_node,
2062 "clock-freq-min-max", freq, 2)) {
2063 mmc->f_min = DW_MCI_FREQ_MIN;
2064 mmc->f_max = DW_MCI_FREQ_MAX;
2065 } else {
2066 mmc->f_min = freq[0];
2067 mmc->f_max = freq[1];
2068 }
Will Newtonf95f3852011-01-02 01:11:59 -05002069
Jaehoon Chung907abd52014-03-03 11:36:43 +09002070 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
Will Newtonf95f3852011-01-02 01:11:59 -05002071
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002072 if (host->pdata->caps)
2073 mmc->caps = host->pdata->caps;
Jaehoon Chungfc3d7722011-02-25 11:08:15 +09002074
Abhilash Kesavanab269122012-11-19 10:26:21 +05302075 if (host->pdata->pm_caps)
2076 mmc->pm_caps = host->pdata->pm_caps;
2077
Thomas Abraham800d78b2012-09-17 18:16:42 +00002078 if (host->dev->of_node) {
2079 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2080 if (ctrl_id < 0)
2081 ctrl_id = 0;
2082 } else {
2083 ctrl_id = to_platform_device(host->dev)->id;
2084 }
James Hogancb27a842012-10-16 09:43:08 +01002085 if (drv_data && drv_data->caps)
2086 mmc->caps |= drv_data->caps[ctrl_id];
Thomas Abraham800d78b2012-09-17 18:16:42 +00002087
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002088 if (host->pdata->caps2)
2089 mmc->caps2 = host->pdata->caps2;
Seungwon Jeon4f408cc2011-12-09 14:55:52 +09002090
Jaehoon Chungd8a4fb02014-03-03 11:36:41 +09002091 mmc_of_parse(mmc);
Will Newtonf95f3852011-01-02 01:11:59 -05002092
Will Newtonf95f3852011-01-02 01:11:59 -05002093 if (host->pdata->blk_settings) {
2094 mmc->max_segs = host->pdata->blk_settings->max_segs;
2095 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2096 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2097 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2098 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2099 } else {
2100 /* Useful defaults if platform data is unset. */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002101#ifdef CONFIG_MMC_DW_IDMAC
2102 mmc->max_segs = host->ring_size;
2103 mmc->max_blk_size = 65536;
2104 mmc->max_blk_count = host->ring_size;
2105 mmc->max_seg_size = 0x1000;
2106 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2107#else
Will Newtonf95f3852011-01-02 01:11:59 -05002108 mmc->max_segs = 64;
2109 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2110 mmc->max_blk_count = 512;
2111 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2112 mmc->max_seg_size = mmc->max_req_size;
Will Newtonf95f3852011-01-02 01:11:59 -05002113#endif /* CONFIG_MMC_DW_IDMAC */
Jaehoon Chunga39e5742012-02-04 17:00:27 -05002114 }
Will Newtonf95f3852011-01-02 01:11:59 -05002115
Jaehoon Chungae0eb342014-03-03 11:36:48 +09002116 if (dw_mci_get_cd(mmc))
2117 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2118 else
2119 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2120
Jaehoon Chung0cea5292013-02-15 23:45:45 +09002121 ret = mmc_add_host(mmc);
2122 if (ret)
2123 goto err_setup_bus;
Will Newtonf95f3852011-01-02 01:11:59 -05002124
2125#if defined(CONFIG_DEBUG_FS)
2126 dw_mci_init_debugfs(slot);
2127#endif
2128
2129 /* Card initially undetected */
2130 slot->last_detect_state = 0;
2131
Will Newtonf95f3852011-01-02 01:11:59 -05002132 return 0;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002133
2134err_setup_bus:
2135 mmc_free_host(mmc);
2136 return -EINVAL;
Will Newtonf95f3852011-01-02 01:11:59 -05002137}
2138
2139static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2140{
Will Newtonf95f3852011-01-02 01:11:59 -05002141 /* Debugfs stuff is cleaned up by mmc core */
2142 mmc_remove_host(slot->mmc);
2143 slot->host->slot[id] = NULL;
2144 mmc_free_host(slot->mmc);
2145}
2146
2147static void dw_mci_init_dma(struct dw_mci *host)
2148{
2149 /* Alloc memory for sg translation */
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002150 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
Will Newtonf95f3852011-01-02 01:11:59 -05002151 &host->sg_dma, GFP_KERNEL);
2152 if (!host->sg_cpu) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002153 dev_err(host->dev, "%s: could not alloc DMA memory\n",
Will Newtonf95f3852011-01-02 01:11:59 -05002154 __func__);
2155 goto no_dma;
2156 }
2157
2158 /* Determine which DMA interface to use */
2159#ifdef CONFIG_MMC_DW_IDMAC
2160 host->dma_ops = &dw_mci_idmac_ops;
Seungwon Jeon00956ea2012-09-28 19:13:11 +09002161 dev_info(host->dev, "Using internal DMA controller.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002162#endif
2163
2164 if (!host->dma_ops)
2165 goto no_dma;
2166
Jaehoon Chunge1631f92012-04-18 15:42:31 +09002167 if (host->dma_ops->init && host->dma_ops->start &&
2168 host->dma_ops->stop && host->dma_ops->cleanup) {
Will Newtonf95f3852011-01-02 01:11:59 -05002169 if (host->dma_ops->init(host)) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002170 dev_err(host->dev, "%s: Unable to initialize "
Will Newtonf95f3852011-01-02 01:11:59 -05002171 "DMA Controller.\n", __func__);
2172 goto no_dma;
2173 }
2174 } else {
Thomas Abraham4a909202012-09-17 18:16:35 +00002175 dev_err(host->dev, "DMA initialization not found.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002176 goto no_dma;
2177 }
2178
2179 host->use_dma = 1;
2180 return;
2181
2182no_dma:
Thomas Abraham4a909202012-09-17 18:16:35 +00002183 dev_info(host->dev, "Using PIO mode.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002184 host->use_dma = 0;
2185 return;
2186}
2187
Seungwon Jeon31bff452013-08-31 00:14:23 +09002188static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
Will Newtonf95f3852011-01-02 01:11:59 -05002189{
2190 unsigned long timeout = jiffies + msecs_to_jiffies(500);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002191 u32 ctrl;
Will Newtonf95f3852011-01-02 01:11:59 -05002192
Seungwon Jeon31bff452013-08-31 00:14:23 +09002193 ctrl = mci_readl(host, CTRL);
2194 ctrl |= reset;
2195 mci_writel(host, CTRL, ctrl);
Will Newtonf95f3852011-01-02 01:11:59 -05002196
2197 /* wait till resets clear */
2198 do {
2199 ctrl = mci_readl(host, CTRL);
Seungwon Jeon31bff452013-08-31 00:14:23 +09002200 if (!(ctrl & reset))
Will Newtonf95f3852011-01-02 01:11:59 -05002201 return true;
2202 } while (time_before(jiffies, timeout));
2203
Seungwon Jeon31bff452013-08-31 00:14:23 +09002204 dev_err(host->dev,
2205 "Timeout resetting block (ctrl reset %#x)\n",
2206 ctrl & reset);
Will Newtonf95f3852011-01-02 01:11:59 -05002207
2208 return false;
2209}
2210
Seungwon Jeon31bff452013-08-31 00:14:23 +09002211static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2212{
2213 /*
2214 * Reseting generates a block interrupt, hence setting
2215 * the scatter-gather pointer to NULL.
2216 */
2217 if (host->sg) {
2218 sg_miter_stop(&host->sg_miter);
2219 host->sg = NULL;
2220 }
2221
2222 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2223}
2224
2225static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2226{
2227 return dw_mci_ctrl_reset(host,
2228 SDMMC_CTRL_FIFO_RESET |
2229 SDMMC_CTRL_RESET |
2230 SDMMC_CTRL_DMA_RESET);
2231}
2232
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002233#ifdef CONFIG_OF
2234static struct dw_mci_of_quirks {
2235 char *quirk;
2236 int id;
2237} of_quirks[] = {
2238 {
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002239 .quirk = "broken-cd",
2240 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2241 },
2242};
2243
2244static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2245{
2246 struct dw_mci_board *pdata;
2247 struct device *dev = host->dev;
2248 struct device_node *np = dev->of_node;
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002249 const struct dw_mci_drv_data *drv_data = host->drv_data;
Thomas Abraham800d78b2012-09-17 18:16:42 +00002250 int idx, ret;
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002251 u32 clock_frequency;
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002252
2253 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2254 if (!pdata) {
2255 dev_err(dev, "could not allocate memory for pdata\n");
2256 return ERR_PTR(-ENOMEM);
2257 }
2258
2259 /* find out number of slots supported */
2260 if (of_property_read_u32(dev->of_node, "num-slots",
2261 &pdata->num_slots)) {
2262 dev_info(dev, "num-slots property not found, "
2263 "assuming 1 slot is available\n");
2264 pdata->num_slots = 1;
2265 }
2266
2267 /* get quirks */
2268 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2269 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2270 pdata->quirks |= of_quirks[idx].id;
2271
2272 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2273 dev_info(dev, "fifo-depth property not found, using "
2274 "value of FIFOTH register as default\n");
2275
2276 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2277
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002278 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2279 pdata->bus_hz = clock_frequency;
2280
James Hogancb27a842012-10-16 09:43:08 +01002281 if (drv_data && drv_data->parse_dt) {
2282 ret = drv_data->parse_dt(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002283 if (ret)
2284 return ERR_PTR(ret);
2285 }
2286
Seungwon Jeon10b49842013-08-31 00:13:22 +09002287 if (of_find_property(np, "supports-highspeed", NULL))
2288 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2289
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002290 return pdata;
2291}
2292
2293#else /* CONFIG_OF */
2294static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2295{
2296 return ERR_PTR(-EINVAL);
2297}
2298#endif /* CONFIG_OF */
2299
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302300int dw_mci_probe(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002301{
Arnd Bergmanne95baf12012-11-08 14:26:11 +00002302 const struct dw_mci_drv_data *drv_data = host->drv_data;
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302303 int width, i, ret = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002304 u32 fifo_size;
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002305 int init_slots = 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002306
Thomas Abrahamc91eab42012-09-17 18:16:40 +00002307 if (!host->pdata) {
2308 host->pdata = dw_mci_parse_dt(host);
2309 if (IS_ERR(host->pdata)) {
2310 dev_err(host->dev, "platform data not available\n");
2311 return -EINVAL;
2312 }
Will Newtonf95f3852011-01-02 01:11:59 -05002313 }
2314
Jaehoon Chung907abd52014-03-03 11:36:43 +09002315 if (host->pdata->num_slots > 1) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002316 dev_err(host->dev,
Jaehoon Chung907abd52014-03-03 11:36:43 +09002317 "Platform data must supply num_slots.\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302318 return -ENODEV;
Will Newtonf95f3852011-01-02 01:11:59 -05002319 }
2320
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002321 host->biu_clk = devm_clk_get(host->dev, "biu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002322 if (IS_ERR(host->biu_clk)) {
2323 dev_dbg(host->dev, "biu clock not available\n");
2324 } else {
2325 ret = clk_prepare_enable(host->biu_clk);
2326 if (ret) {
2327 dev_err(host->dev, "failed to enable biu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002328 return ret;
2329 }
Will Newtonf95f3852011-01-02 01:11:59 -05002330 }
2331
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002332 host->ciu_clk = devm_clk_get(host->dev, "ciu");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002333 if (IS_ERR(host->ciu_clk)) {
2334 dev_dbg(host->dev, "ciu clock not available\n");
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002335 host->bus_hz = host->pdata->bus_hz;
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002336 } else {
2337 ret = clk_prepare_enable(host->ciu_clk);
2338 if (ret) {
2339 dev_err(host->dev, "failed to enable ciu clock\n");
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002340 goto err_clk_biu;
2341 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002342
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002343 if (host->pdata->bus_hz) {
2344 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2345 if (ret)
2346 dev_warn(host->dev,
Jaehoon Chung612de4c2014-03-03 11:36:42 +09002347 "Unable to set bus rate to %uHz\n",
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002348 host->pdata->bus_hz);
2349 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002350 host->bus_hz = clk_get_rate(host->ciu_clk);
Doug Anderson3c6d89e2013-06-07 10:28:30 -07002351 }
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002352
Jaehoon Chung612de4c2014-03-03 11:36:42 +09002353 if (!host->bus_hz) {
2354 dev_err(host->dev,
2355 "Platform data must supply bus speed\n");
2356 ret = -ENODEV;
2357 goto err_clk_ciu;
2358 }
2359
Yuvaraj Kumar C D002f0d52013-08-31 00:12:19 +09002360 if (drv_data && drv_data->init) {
2361 ret = drv_data->init(host);
2362 if (ret) {
2363 dev_err(host->dev,
2364 "implementation specific init failed\n");
2365 goto err_clk_ciu;
2366 }
2367 }
2368
James Hogancb27a842012-10-16 09:43:08 +01002369 if (drv_data && drv_data->setup_clock) {
2370 ret = drv_data->setup_clock(host);
Thomas Abraham800d78b2012-09-17 18:16:42 +00002371 if (ret) {
2372 dev_err(host->dev,
2373 "implementation specific clock setup failed\n");
2374 goto err_clk_ciu;
2375 }
2376 }
2377
Mark Browna55d6ff2013-07-29 21:55:27 +01002378 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
Doug Anderson870556a2013-06-07 10:28:29 -07002379 if (IS_ERR(host->vmmc)) {
2380 ret = PTR_ERR(host->vmmc);
2381 if (ret == -EPROBE_DEFER)
2382 goto err_clk_ciu;
2383
2384 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2385 host->vmmc = NULL;
2386 } else {
2387 ret = regulator_enable(host->vmmc);
2388 if (ret) {
2389 if (ret != -EPROBE_DEFER)
2390 dev_err(host->dev,
2391 "regulator_enable fail: %d\n", ret);
2392 goto err_clk_ciu;
2393 }
2394 }
2395
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302396 host->quirks = host->pdata->quirks;
Will Newtonf95f3852011-01-02 01:11:59 -05002397
2398 spin_lock_init(&host->lock);
2399 INIT_LIST_HEAD(&host->queue);
2400
Will Newtonf95f3852011-01-02 01:11:59 -05002401 /*
2402 * Get the host data width - this assumes that HCON has been set with
2403 * the correct values.
2404 */
2405 i = (mci_readl(host, HCON) >> 7) & 0x7;
2406 if (!i) {
2407 host->push_data = dw_mci_push_data16;
2408 host->pull_data = dw_mci_pull_data16;
2409 width = 16;
2410 host->data_shift = 1;
2411 } else if (i == 2) {
2412 host->push_data = dw_mci_push_data64;
2413 host->pull_data = dw_mci_pull_data64;
2414 width = 64;
2415 host->data_shift = 3;
2416 } else {
2417 /* Check for a reserved value, and warn if it is */
2418 WARN((i != 1),
2419 "HCON reports a reserved host data width!\n"
2420 "Defaulting to 32-bit access.\n");
2421 host->push_data = dw_mci_push_data32;
2422 host->pull_data = dw_mci_pull_data32;
2423 width = 32;
2424 host->data_shift = 2;
2425 }
2426
2427 /* Reset all blocks */
Seungwon Jeon31bff452013-08-31 00:14:23 +09002428 if (!dw_mci_ctrl_all_reset(host))
Seungwon Jeon141a7122012-05-22 13:01:03 +09002429 return -ENODEV;
2430
2431 host->dma_ops = host->pdata->dma_ops;
2432 dw_mci_init_dma(host);
Will Newtonf95f3852011-01-02 01:11:59 -05002433
2434 /* Clear the interrupts for the host controller */
2435 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2436 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2437
2438 /* Put in max timeout */
2439 mci_writel(host, TMOUT, 0xFFFFFFFF);
2440
2441 /*
2442 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2443 * Tx Mark = fifo_size / 2 DMA Size = 8
2444 */
James Hoganb86d8252011-06-24 13:57:18 +01002445 if (!host->pdata->fifo_depth) {
2446 /*
2447 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2448 * have been overwritten by the bootloader, just like we're
2449 * about to do, so if you know the value for your hardware, you
2450 * should put it in the platform data.
2451 */
2452 fifo_size = mci_readl(host, FIFOTH);
Jaehoon Chung8234e862012-01-11 09:28:21 +00002453 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
James Hoganb86d8252011-06-24 13:57:18 +01002454 } else {
2455 fifo_size = host->pdata->fifo_depth;
2456 }
2457 host->fifo_depth = fifo_size;
Seungwon Jeon524268992013-08-31 00:13:42 +09002458 host->fifoth_val =
2459 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002460 mci_writel(host, FIFOTH, host->fifoth_val);
Will Newtonf95f3852011-01-02 01:11:59 -05002461
2462 /* disable clock to CIU */
2463 mci_writel(host, CLKENA, 0);
2464 mci_writel(host, CLKSRC, 0);
2465
James Hogan63008762013-03-12 10:43:54 +00002466 /*
2467 * In 2.40a spec, Data offset is changed.
2468 * Need to check the version-id and set data-offset for DATA register.
2469 */
2470 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2471 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2472
2473 if (host->verid < DW_MMC_240A)
2474 host->data_offset = DATA_OFFSET;
2475 else
2476 host->data_offset = DATA_240A_OFFSET;
2477
Will Newtonf95f3852011-01-02 01:11:59 -05002478 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002479 host->card_workqueue = alloc_workqueue("dw-mci-card",
ZhangZhen59ff3eb2014-03-27 09:41:47 +08002480 WQ_MEM_RECLAIM, 1);
Wei Yongjunef7aef92013-04-19 09:25:45 +08002481 if (!host->card_workqueue) {
2482 ret = -ENOMEM;
James Hogan1791b13e2011-06-24 13:55:55 +01002483 goto err_dmaunmap;
Wei Yongjunef7aef92013-04-19 09:25:45 +08002484 }
James Hogan1791b13e2011-06-24 13:55:55 +01002485 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002486 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2487 host->irq_flags, "dw-mci", host);
Will Newtonf95f3852011-01-02 01:11:59 -05002488 if (ret)
James Hogan1791b13e2011-06-24 13:55:55 +01002489 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002490
Will Newtonf95f3852011-01-02 01:11:59 -05002491 if (host->pdata->num_slots)
2492 host->num_slots = host->pdata->num_slots;
2493 else
2494 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2495
Yuvaraj CD2da1d7f2012-10-08 14:29:51 +05302496 /*
2497 * Enable interrupts for command done, data over, data empty, card det,
2498 * receive ready and error such as transmit, receive timeout, crc error
2499 */
2500 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2501 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2502 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2503 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2504 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2505
2506 dev_info(host->dev, "DW MMC controller at irq %d, "
2507 "%d bit host data width, "
2508 "%u deep fifo\n",
2509 host->irq, width, fifo_size);
2510
Will Newtonf95f3852011-01-02 01:11:59 -05002511 /* We need at least one slot to succeed */
2512 for (i = 0; i < host->num_slots; i++) {
2513 ret = dw_mci_init_slot(host, i);
Thomas Abraham1c2215b2012-09-17 18:16:37 +00002514 if (ret)
2515 dev_dbg(host->dev, "slot %d init failed\n", i);
2516 else
2517 init_slots++;
2518 }
2519
2520 if (init_slots) {
2521 dev_info(host->dev, "%d slots initialized\n", init_slots);
2522 } else {
2523 dev_dbg(host->dev, "attempted to initialize %d slots, "
2524 "but failed on all\n", host->num_slots);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002525 goto err_workqueue;
Will Newtonf95f3852011-01-02 01:11:59 -05002526 }
2527
Will Newtonf95f3852011-01-02 01:11:59 -05002528 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
Thomas Abraham4a909202012-09-17 18:16:35 +00002529 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
Will Newtonf95f3852011-01-02 01:11:59 -05002530
2531 return 0;
2532
James Hogan1791b13e2011-06-24 13:55:55 +01002533err_workqueue:
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002534 destroy_workqueue(host->card_workqueue);
James Hogan1791b13e2011-06-24 13:55:55 +01002535
Will Newtonf95f3852011-01-02 01:11:59 -05002536err_dmaunmap:
2537 if (host->use_dma && host->dma_ops->exit)
2538 host->dma_ops->exit(host);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002539 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002540 regulator_disable(host->vmmc);
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002541
2542err_clk_ciu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002543 if (!IS_ERR(host->ciu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002544 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002545
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002546err_clk_biu:
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002547 if (!IS_ERR(host->biu_clk))
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002548 clk_disable_unprepare(host->biu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002549
Will Newtonf95f3852011-01-02 01:11:59 -05002550 return ret;
2551}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302552EXPORT_SYMBOL(dw_mci_probe);
Will Newtonf95f3852011-01-02 01:11:59 -05002553
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302554void dw_mci_remove(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002555{
Will Newtonf95f3852011-01-02 01:11:59 -05002556 int i;
2557
2558 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2559 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2560
Will Newtonf95f3852011-01-02 01:11:59 -05002561 for (i = 0; i < host->num_slots; i++) {
Thomas Abraham4a909202012-09-17 18:16:35 +00002562 dev_dbg(host->dev, "remove slot %d\n", i);
Will Newtonf95f3852011-01-02 01:11:59 -05002563 if (host->slot[i])
2564 dw_mci_cleanup_slot(host->slot[i], i);
2565 }
2566
2567 /* disable clock to CIU */
2568 mci_writel(host, CLKENA, 0);
2569 mci_writel(host, CLKSRC, 0);
2570
Thomas Abraham95dcc2c2012-05-01 14:57:36 -07002571 destroy_workqueue(host->card_workqueue);
Will Newtonf95f3852011-01-02 01:11:59 -05002572
2573 if (host->use_dma && host->dma_ops->exit)
2574 host->dma_ops->exit(host);
2575
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002576 if (host->vmmc)
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002577 regulator_disable(host->vmmc);
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002578
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002579 if (!IS_ERR(host->ciu_clk))
2580 clk_disable_unprepare(host->ciu_clk);
Seungwon Jeon780f22a2012-11-28 19:26:03 +09002581
Thomas Abrahamf90a0612012-09-17 18:16:38 +00002582 if (!IS_ERR(host->biu_clk))
2583 clk_disable_unprepare(host->biu_clk);
Will Newtonf95f3852011-01-02 01:11:59 -05002584}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302585EXPORT_SYMBOL(dw_mci_remove);
2586
2587
Will Newtonf95f3852011-01-02 01:11:59 -05002588
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002589#ifdef CONFIG_PM_SLEEP
Will Newtonf95f3852011-01-02 01:11:59 -05002590/*
2591 * TODO: we should probably disable the clock to the card in the suspend path.
2592 */
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302593int dw_mci_suspend(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002594{
Jaehoon Chungc07946a2011-02-25 11:08:14 +09002595 if (host->vmmc)
2596 regulator_disable(host->vmmc);
2597
Will Newtonf95f3852011-01-02 01:11:59 -05002598 return 0;
2599}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302600EXPORT_SYMBOL(dw_mci_suspend);
Will Newtonf95f3852011-01-02 01:11:59 -05002601
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302602int dw_mci_resume(struct dw_mci *host)
Will Newtonf95f3852011-01-02 01:11:59 -05002603{
2604 int i, ret;
Will Newtonf95f3852011-01-02 01:11:59 -05002605
Sachin Kamatf2f942c2013-04-04 11:25:10 +05302606 if (host->vmmc) {
2607 ret = regulator_enable(host->vmmc);
2608 if (ret) {
2609 dev_err(host->dev,
2610 "failed to enable regulator: %d\n", ret);
2611 return ret;
2612 }
2613 }
Jaehoon Chung1d6c4e02011-05-11 15:52:39 +09002614
Seungwon Jeon31bff452013-08-31 00:14:23 +09002615 if (!dw_mci_ctrl_all_reset(host)) {
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002616 ret = -ENODEV;
2617 return ret;
2618 }
2619
Jonathan Kliegman3bfe6192012-06-14 13:31:55 -04002620 if (host->use_dma && host->dma_ops->init)
Seungwon Jeon141a7122012-05-22 13:01:03 +09002621 host->dma_ops->init(host);
2622
Seungwon Jeon524268992013-08-31 00:13:42 +09002623 /*
2624 * Restore the initial value at FIFOTH register
2625 * And Invalidate the prev_blksz with zero
2626 */
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002627 mci_writel(host, FIFOTH, host->fifoth_val);
Seungwon Jeon524268992013-08-31 00:13:42 +09002628 host->prev_blksz = 0;
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002629
Doug Anderson2eb29442013-08-31 00:11:49 +09002630 /* Put in max timeout */
2631 mci_writel(host, TMOUT, 0xFFFFFFFF);
2632
Jaehoon Chunge61cf112011-03-17 20:32:33 +09002633 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2634 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2635 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2636 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2637 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2638
Will Newtonf95f3852011-01-02 01:11:59 -05002639 for (i = 0; i < host->num_slots; i++) {
2640 struct dw_mci_slot *slot = host->slot[i];
2641 if (!slot)
2642 continue;
Abhilash Kesavanab269122012-11-19 10:26:21 +05302643 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2644 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2645 dw_mci_setup_bus(slot, true);
2646 }
Will Newtonf95f3852011-01-02 01:11:59 -05002647 }
Will Newtonf95f3852011-01-02 01:11:59 -05002648 return 0;
2649}
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302650EXPORT_SYMBOL(dw_mci_resume);
Jaehoon Chung6fe88902011-12-08 19:23:03 +09002651#endif /* CONFIG_PM_SLEEP */
2652
Will Newtonf95f3852011-01-02 01:11:59 -05002653static int __init dw_mci_init(void)
2654{
Sachin Kamat8e1c4e42013-04-04 11:25:11 +05302655 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
Shashidhar Hiremath62ca8032012-01-13 16:04:57 +05302656 return 0;
Will Newtonf95f3852011-01-02 01:11:59 -05002657}
2658
2659static void __exit dw_mci_exit(void)
2660{
Will Newtonf95f3852011-01-02 01:11:59 -05002661}
2662
2663module_init(dw_mci_init);
2664module_exit(dw_mci_exit);
2665
2666MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2667MODULE_AUTHOR("NXP Semiconductor VietNam");
2668MODULE_AUTHOR("Imagination Technologies Ltd");
2669MODULE_LICENSE("GPL v2");