blob: 442f5766ffca7dd57b694ae526d4e5f37b98b1c2 [file] [log] [blame]
Tony Prisk3a96dff2012-11-18 15:33:06 +13001/*
2 * WM8505/WM8650 SD/MMC Host Controller
3 *
4 * Copyright (C) 2010 Tony Prisk
5 * Copyright (C) 2008 WonderMedia Technologies, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation
10 */
11
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/ioport.h>
16#include <linux/errno.h>
17#include <linux/dma-mapping.h>
18#include <linux/delay.h>
19#include <linux/io.h>
20#include <linux/irq.h>
21#include <linux/clk.h>
22#include <linux/gpio.h>
23
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/of_device.h>
28
29#include <linux/mmc/host.h>
30#include <linux/mmc/mmc.h>
31#include <linux/mmc/sd.h>
32
33#include <asm/byteorder.h>
34
35
36#define DRIVER_NAME "wmt-sdhc"
37
38
39/* MMC/SD controller registers */
40#define SDMMC_CTLR 0x00
41#define SDMMC_CMD 0x01
42#define SDMMC_RSPTYPE 0x02
43#define SDMMC_ARG 0x04
44#define SDMMC_BUSMODE 0x08
45#define SDMMC_BLKLEN 0x0C
46#define SDMMC_BLKCNT 0x0E
47#define SDMMC_RSP 0x10
48#define SDMMC_CBCR 0x20
49#define SDMMC_INTMASK0 0x24
50#define SDMMC_INTMASK1 0x25
51#define SDMMC_STS0 0x28
52#define SDMMC_STS1 0x29
53#define SDMMC_STS2 0x2A
54#define SDMMC_STS3 0x2B
55#define SDMMC_RSPTIMEOUT 0x2C
56#define SDMMC_CLK 0x30 /* VT8500 only */
57#define SDMMC_EXTCTRL 0x34
58#define SDMMC_SBLKLEN 0x38
59#define SDMMC_DMATIMEOUT 0x3C
60
61
62/* SDMMC_CTLR bit fields */
63#define CTLR_CMD_START 0x01
64#define CTLR_CMD_WRITE 0x04
65#define CTLR_FIFO_RESET 0x08
66
67/* SDMMC_BUSMODE bit fields */
68#define BM_SPI_MODE 0x01
69#define BM_FOURBIT_MODE 0x02
70#define BM_EIGHTBIT_MODE 0x04
71#define BM_SD_OFF 0x10
72#define BM_SPI_CS 0x20
73#define BM_SD_POWER 0x40
74#define BM_SOFT_RESET 0x80
75#define BM_ONEBIT_MASK 0xFD
76
77/* SDMMC_BLKLEN bit fields */
78#define BLKL_CRCERR_ABORT 0x0800
79#define BLKL_CD_POL_HIGH 0x1000
80#define BLKL_GPI_CD 0x2000
81#define BLKL_DATA3_CD 0x4000
82#define BLKL_INT_ENABLE 0x8000
83
84/* SDMMC_INTMASK0 bit fields */
85#define INT0_MBLK_TRAN_DONE_INT_EN 0x10
86#define INT0_BLK_TRAN_DONE_INT_EN 0x20
87#define INT0_CD_INT_EN 0x40
88#define INT0_DI_INT_EN 0x80
89
90/* SDMMC_INTMASK1 bit fields */
91#define INT1_CMD_RES_TRAN_DONE_INT_EN 0x02
92#define INT1_CMD_RES_TOUT_INT_EN 0x04
93#define INT1_MBLK_AUTO_STOP_INT_EN 0x08
94#define INT1_DATA_TOUT_INT_EN 0x10
95#define INT1_RESCRC_ERR_INT_EN 0x20
96#define INT1_RCRC_ERR_INT_EN 0x40
97#define INT1_WCRC_ERR_INT_EN 0x80
98
99/* SDMMC_STS0 bit fields */
100#define STS0_WRITE_PROTECT 0x02
101#define STS0_CD_DATA3 0x04
102#define STS0_CD_GPI 0x08
103#define STS0_MBLK_DONE 0x10
104#define STS0_BLK_DONE 0x20
105#define STS0_CARD_DETECT 0x40
106#define STS0_DEVICE_INS 0x80
107
108/* SDMMC_STS1 bit fields */
109#define STS1_SDIO_INT 0x01
110#define STS1_CMDRSP_DONE 0x02
111#define STS1_RSP_TIMEOUT 0x04
112#define STS1_AUTOSTOP_DONE 0x08
113#define STS1_DATA_TIMEOUT 0x10
114#define STS1_RSP_CRC_ERR 0x20
115#define STS1_RCRC_ERR 0x40
116#define STS1_WCRC_ERR 0x80
117
118/* SDMMC_STS2 bit fields */
119#define STS2_CMD_RES_BUSY 0x10
120#define STS2_DATARSP_BUSY 0x20
121#define STS2_DIS_FORCECLK 0x80
122
123
124/* MMC/SD DMA Controller Registers */
125#define SDDMA_GCR 0x100
126#define SDDMA_IER 0x104
127#define SDDMA_ISR 0x108
128#define SDDMA_DESPR 0x10C
129#define SDDMA_RBR 0x110
130#define SDDMA_DAR 0x114
131#define SDDMA_BAR 0x118
132#define SDDMA_CPR 0x11C
133#define SDDMA_CCR 0x120
134
135
136/* SDDMA_GCR bit fields */
137#define DMA_GCR_DMA_EN 0x00000001
138#define DMA_GCR_SOFT_RESET 0x00000100
139
140/* SDDMA_IER bit fields */
141#define DMA_IER_INT_EN 0x00000001
142
143/* SDDMA_ISR bit fields */
144#define DMA_ISR_INT_STS 0x00000001
145
146/* SDDMA_RBR bit fields */
147#define DMA_RBR_FORMAT 0x40000000
148#define DMA_RBR_END 0x80000000
149
150/* SDDMA_CCR bit fields */
151#define DMA_CCR_RUN 0x00000080
152#define DMA_CCR_IF_TO_PERIPHERAL 0x00000000
153#define DMA_CCR_PERIPHERAL_TO_IF 0x00400000
154
155/* SDDMA_CCR event status */
156#define DMA_CCR_EVT_NO_STATUS 0x00000000
157#define DMA_CCR_EVT_UNDERRUN 0x00000001
158#define DMA_CCR_EVT_OVERRUN 0x00000002
159#define DMA_CCR_EVT_DESP_READ 0x00000003
160#define DMA_CCR_EVT_DATA_RW 0x00000004
161#define DMA_CCR_EVT_EARLY_END 0x00000005
162#define DMA_CCR_EVT_SUCCESS 0x0000000F
163
164#define PDMA_READ 0x00
165#define PDMA_WRITE 0x01
166
167#define WMT_SD_POWER_OFF 0
168#define WMT_SD_POWER_ON 1
169
170struct wmt_dma_descriptor {
171 u32 flags;
172 u32 data_buffer_addr;
173 u32 branch_addr;
174 u32 reserved1;
175};
176
177struct wmt_mci_caps {
178 unsigned int f_min;
179 unsigned int f_max;
180 u32 ocr_avail;
181 u32 caps;
182 u32 max_seg_size;
183 u32 max_segs;
184 u32 max_blk_size;
185};
186
187struct wmt_mci_priv {
188 struct mmc_host *mmc;
189 void __iomem *sdmmc_base;
190
191 int irq_regular;
192 int irq_dma;
193
194 void *dma_desc_buffer;
195 dma_addr_t dma_desc_device_addr;
196
197 struct completion cmdcomp;
198 struct completion datacomp;
199
200 struct completion *comp_cmd;
201 struct completion *comp_dma;
202
203 struct mmc_request *req;
204 struct mmc_command *cmd;
205
206 struct clk *clk_sdmmc;
207 struct device *dev;
208
209 u8 power_inverted;
210 u8 cd_inverted;
211};
212
213static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable)
214{
215 u32 reg_tmp;
216 if (enable) {
217 if (priv->power_inverted) {
218 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
219 writeb(reg_tmp | BM_SD_OFF,
220 priv->sdmmc_base + SDMMC_BUSMODE);
221 } else {
222 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
223 writeb(reg_tmp & (~BM_SD_OFF),
224 priv->sdmmc_base + SDMMC_BUSMODE);
225 }
226 } else {
227 if (priv->power_inverted) {
228 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
229 writeb(reg_tmp & (~BM_SD_OFF),
230 priv->sdmmc_base + SDMMC_BUSMODE);
231 } else {
232 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
233 writeb(reg_tmp | BM_SD_OFF,
234 priv->sdmmc_base + SDMMC_BUSMODE);
235 }
236 }
237}
238
239static void wmt_mci_read_response(struct mmc_host *mmc)
240{
241 struct wmt_mci_priv *priv;
242 int idx1, idx2;
243 u8 tmp_resp;
244 u32 response;
245
246 priv = mmc_priv(mmc);
247
248 for (idx1 = 0; idx1 < 4; idx1++) {
249 response = 0;
250 for (idx2 = 0; idx2 < 4; idx2++) {
251 if ((idx1 == 3) && (idx2 == 3))
252 tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP);
253 else
254 tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP +
255 (idx1*4) + idx2 + 1);
256 response |= (tmp_resp << (idx2 * 8));
257 }
258 priv->cmd->resp[idx1] = cpu_to_be32(response);
259 }
260}
261
262static void wmt_mci_start_command(struct wmt_mci_priv *priv)
263{
264 u32 reg_tmp;
265
266 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
267 writeb(reg_tmp | CTLR_CMD_START, priv->sdmmc_base + SDMMC_CTLR);
268}
269
270static int wmt_mci_send_command(struct mmc_host *mmc, u8 command, u8 cmdtype,
271 u32 arg, u8 rsptype)
272{
273 struct wmt_mci_priv *priv;
274 u32 reg_tmp;
275
276 priv = mmc_priv(mmc);
277
278 /* write command, arg, resptype registers */
279 writeb(command, priv->sdmmc_base + SDMMC_CMD);
280 writel(arg, priv->sdmmc_base + SDMMC_ARG);
281 writeb(rsptype, priv->sdmmc_base + SDMMC_RSPTYPE);
282
283 /* reset response FIFO */
284 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
285 writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
286
287 /* ensure clock enabled - VT3465 */
288 wmt_set_sd_power(priv, WMT_SD_POWER_ON);
289
290 /* clear status bits */
291 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
292 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
293 writeb(0xFF, priv->sdmmc_base + SDMMC_STS2);
294 writeb(0xFF, priv->sdmmc_base + SDMMC_STS3);
295
296 /* set command type */
297 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
298 writeb((reg_tmp & 0x0F) | (cmdtype << 4),
299 priv->sdmmc_base + SDMMC_CTLR);
300
301 return 0;
302}
303
304static void wmt_mci_disable_dma(struct wmt_mci_priv *priv)
305{
306 writel(DMA_ISR_INT_STS, priv->sdmmc_base + SDDMA_ISR);
307 writel(0, priv->sdmmc_base + SDDMA_IER);
308}
309
310static void wmt_complete_data_request(struct wmt_mci_priv *priv)
311{
312 struct mmc_request *req;
313 req = priv->req;
314
315 req->data->bytes_xfered = req->data->blksz * req->data->blocks;
316
317 /* unmap the DMA pages used for write data */
318 if (req->data->flags & MMC_DATA_WRITE)
319 dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
320 req->data->sg_len, DMA_TO_DEVICE);
321 else
322 dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
323 req->data->sg_len, DMA_FROM_DEVICE);
324
325 /* Check if the DMA ISR returned a data error */
326 if ((req->cmd->error) || (req->data->error))
327 mmc_request_done(priv->mmc, req);
328 else {
329 wmt_mci_read_response(priv->mmc);
330 if (!req->data->stop) {
331 /* single-block read/write requests end here */
332 mmc_request_done(priv->mmc, req);
333 } else {
334 /*
335 * we change the priv->cmd variable so the response is
336 * stored in the stop struct rather than the original
337 * calling command struct
338 */
339 priv->comp_cmd = &priv->cmdcomp;
340 init_completion(priv->comp_cmd);
341 priv->cmd = req->data->stop;
342 wmt_mci_send_command(priv->mmc, req->data->stop->opcode,
343 7, req->data->stop->arg, 9);
344 wmt_mci_start_command(priv);
345 }
346 }
347}
348
349static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data)
350{
Tony Prisk3a96dff2012-11-18 15:33:06 +1300351 struct wmt_mci_priv *priv;
352
353 int status;
354
355 priv = (struct wmt_mci_priv *)data;
Tony Prisk3a96dff2012-11-18 15:33:06 +1300356
357 status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F;
358
359 if (status != DMA_CCR_EVT_SUCCESS) {
360 dev_err(priv->dev, "DMA Error: Status = %d\n", status);
361 priv->req->data->error = -ETIMEDOUT;
362 complete(priv->comp_dma);
363 return IRQ_HANDLED;
364 }
365
366 priv->req->data->error = 0;
367
368 wmt_mci_disable_dma(priv);
369
370 complete(priv->comp_dma);
371
372 if (priv->comp_cmd) {
373 if (completion_done(priv->comp_cmd)) {
374 /*
375 * if the command (regular) interrupt has already
376 * completed, finish off the request otherwise we wait
377 * for the command interrupt and finish from there.
378 */
379 wmt_complete_data_request(priv);
380 }
381 }
382
383 return IRQ_HANDLED;
384}
385
386static irqreturn_t wmt_mci_regular_isr(int irq_num, void *data)
387{
388 struct wmt_mci_priv *priv;
389 u32 status0;
390 u32 status1;
391 u32 status2;
392 u32 reg_tmp;
393 int cmd_done;
394
395 priv = (struct wmt_mci_priv *)data;
396 cmd_done = 0;
397 status0 = readb(priv->sdmmc_base + SDMMC_STS0);
398 status1 = readb(priv->sdmmc_base + SDMMC_STS1);
399 status2 = readb(priv->sdmmc_base + SDMMC_STS2);
400
401 /* Check for card insertion */
402 reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
403 if ((reg_tmp & INT0_DI_INT_EN) && (status0 & STS0_DEVICE_INS)) {
404 mmc_detect_change(priv->mmc, 0);
405 if (priv->cmd)
406 priv->cmd->error = -ETIMEDOUT;
407 if (priv->comp_cmd)
408 complete(priv->comp_cmd);
409 if (priv->comp_dma) {
410 wmt_mci_disable_dma(priv);
411 complete(priv->comp_dma);
412 }
413 writeb(STS0_DEVICE_INS, priv->sdmmc_base + SDMMC_STS0);
414 return IRQ_HANDLED;
415 }
416
417 if ((!priv->req->data) ||
418 ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) {
419 /* handle non-data & stop_transmission requests */
420 if (status1 & STS1_CMDRSP_DONE) {
421 priv->cmd->error = 0;
422 cmd_done = 1;
423 } else if ((status1 & STS1_RSP_TIMEOUT) ||
424 (status1 & STS1_DATA_TIMEOUT)) {
425 priv->cmd->error = -ETIMEDOUT;
426 cmd_done = 1;
427 }
428
429 if (cmd_done) {
430 priv->comp_cmd = NULL;
431
432 if (!priv->cmd->error)
433 wmt_mci_read_response(priv->mmc);
434
435 priv->cmd = NULL;
436
437 mmc_request_done(priv->mmc, priv->req);
438 }
439 } else {
440 /* handle data requests */
441 if (status1 & STS1_CMDRSP_DONE) {
442 if (priv->cmd)
443 priv->cmd->error = 0;
444 if (priv->comp_cmd)
445 complete(priv->comp_cmd);
446 }
447
448 if ((status1 & STS1_RSP_TIMEOUT) ||
449 (status1 & STS1_DATA_TIMEOUT)) {
450 if (priv->cmd)
451 priv->cmd->error = -ETIMEDOUT;
452 if (priv->comp_cmd)
453 complete(priv->comp_cmd);
454 if (priv->comp_dma) {
455 wmt_mci_disable_dma(priv);
456 complete(priv->comp_dma);
457 }
458 }
459
460 if (priv->comp_dma) {
461 /*
462 * If the dma interrupt has already completed, finish
463 * off the request; otherwise we wait for the DMA
464 * interrupt and finish from there.
465 */
466 if (completion_done(priv->comp_dma))
467 wmt_complete_data_request(priv);
468 }
469 }
470
471 writeb(status0, priv->sdmmc_base + SDMMC_STS0);
472 writeb(status1, priv->sdmmc_base + SDMMC_STS1);
473 writeb(status2, priv->sdmmc_base + SDMMC_STS2);
474
475 return IRQ_HANDLED;
476}
477
478static void wmt_reset_hardware(struct mmc_host *mmc)
479{
480 struct wmt_mci_priv *priv;
481 u32 reg_tmp;
482
483 priv = mmc_priv(mmc);
484
485 /* reset controller */
486 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
487 writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
488
489 /* reset response FIFO */
490 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
491 writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
492
493 /* enable GPI pin to detect card */
494 writew(BLKL_INT_ENABLE | BLKL_GPI_CD, priv->sdmmc_base + SDMMC_BLKLEN);
495
496 /* clear interrupt status */
497 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
498 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
499
500 /* setup interrupts */
501 writeb(INT0_CD_INT_EN | INT0_DI_INT_EN, priv->sdmmc_base +
502 SDMMC_INTMASK0);
503 writeb(INT1_DATA_TOUT_INT_EN | INT1_CMD_RES_TRAN_DONE_INT_EN |
504 INT1_CMD_RES_TOUT_INT_EN, priv->sdmmc_base + SDMMC_INTMASK1);
505
506 /* set the DMA timeout */
507 writew(8191, priv->sdmmc_base + SDMMC_DMATIMEOUT);
508
509 /* auto clock freezing enable */
510 reg_tmp = readb(priv->sdmmc_base + SDMMC_STS2);
511 writeb(reg_tmp | STS2_DIS_FORCECLK, priv->sdmmc_base + SDMMC_STS2);
512
513 /* set a default clock speed of 400Khz */
514 clk_set_rate(priv->clk_sdmmc, 400000);
515}
516
517static int wmt_dma_init(struct mmc_host *mmc)
518{
519 struct wmt_mci_priv *priv;
520
521 priv = mmc_priv(mmc);
522
523 writel(DMA_GCR_SOFT_RESET, priv->sdmmc_base + SDDMA_GCR);
524 writel(DMA_GCR_DMA_EN, priv->sdmmc_base + SDDMA_GCR);
525 if ((readl(priv->sdmmc_base + SDDMA_GCR) & DMA_GCR_DMA_EN) != 0)
526 return 0;
527 else
528 return 1;
529}
530
531static void wmt_dma_init_descriptor(struct wmt_dma_descriptor *desc,
532 u16 req_count, u32 buffer_addr, u32 branch_addr, int end)
533{
534 desc->flags = 0x40000000 | req_count;
535 if (end)
536 desc->flags |= 0x80000000;
537 desc->data_buffer_addr = buffer_addr;
538 desc->branch_addr = branch_addr;
539}
540
541static void wmt_dma_config(struct mmc_host *mmc, u32 descaddr, u8 dir)
542{
543 struct wmt_mci_priv *priv;
544 u32 reg_tmp;
545
546 priv = mmc_priv(mmc);
547
548 /* Enable DMA Interrupts */
549 writel(DMA_IER_INT_EN, priv->sdmmc_base + SDDMA_IER);
550
551 /* Write DMA Descriptor Pointer Register */
552 writel(descaddr, priv->sdmmc_base + SDDMA_DESPR);
553
554 writel(0x00, priv->sdmmc_base + SDDMA_CCR);
555
556 if (dir == PDMA_WRITE) {
557 reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
558 writel(reg_tmp & DMA_CCR_IF_TO_PERIPHERAL, priv->sdmmc_base +
559 SDDMA_CCR);
560 } else {
561 reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
562 writel(reg_tmp | DMA_CCR_PERIPHERAL_TO_IF, priv->sdmmc_base +
563 SDDMA_CCR);
564 }
565}
566
567static void wmt_dma_start(struct wmt_mci_priv *priv)
568{
569 u32 reg_tmp;
570
571 reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
572 writel(reg_tmp | DMA_CCR_RUN, priv->sdmmc_base + SDDMA_CCR);
573}
574
575static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req)
576{
577 struct wmt_mci_priv *priv;
578 struct wmt_dma_descriptor *desc;
579 u8 command;
580 u8 cmdtype;
581 u32 arg;
582 u8 rsptype;
583 u32 reg_tmp;
584
585 struct scatterlist *sg;
586 int i;
587 int sg_cnt;
588 int offset;
589 u32 dma_address;
590 int desc_cnt;
591
592 priv = mmc_priv(mmc);
593 priv->req = req;
594
595 /*
596 * Use the cmd variable to pass a pointer to the resp[] structure
597 * This is required on multi-block requests to pass the pointer to the
598 * stop command
599 */
600 priv->cmd = req->cmd;
601
602 command = req->cmd->opcode;
603 arg = req->cmd->arg;
604 rsptype = mmc_resp_type(req->cmd);
605 cmdtype = 0;
606
607 /* rsptype=7 only valid for SPI commands - should be =2 for SD */
608 if (rsptype == 7)
609 rsptype = 2;
610 /* rsptype=21 is R1B, convert for controller */
611 if (rsptype == 21)
612 rsptype = 9;
613
614 if (!req->data) {
615 wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
616 wmt_mci_start_command(priv);
617 /* completion is now handled in the regular_isr() */
618 }
619 if (req->data) {
620 priv->comp_cmd = &priv->cmdcomp;
621 init_completion(priv->comp_cmd);
622
623 wmt_dma_init(mmc);
624
625 /* set controller data length */
626 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
627 writew((reg_tmp & 0xF800) | (req->data->blksz - 1),
628 priv->sdmmc_base + SDMMC_BLKLEN);
629
630 /* set controller block count */
631 writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT);
632
633 desc = (struct wmt_dma_descriptor *)priv->dma_desc_buffer;
634
635 if (req->data->flags & MMC_DATA_WRITE) {
636 sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
637 req->data->sg_len, DMA_TO_DEVICE);
638 cmdtype = 1;
639 if (req->data->blocks > 1)
640 cmdtype = 3;
641 } else {
642 sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
643 req->data->sg_len, DMA_FROM_DEVICE);
644 cmdtype = 2;
645 if (req->data->blocks > 1)
646 cmdtype = 4;
647 }
648
649 dma_address = priv->dma_desc_device_addr + 16;
650 desc_cnt = 0;
651
652 for_each_sg(req->data->sg, sg, sg_cnt, i) {
653 offset = 0;
654 while (offset < sg_dma_len(sg)) {
655 wmt_dma_init_descriptor(desc, req->data->blksz,
656 sg_dma_address(sg)+offset,
657 dma_address, 0);
658 desc++;
659 desc_cnt++;
660 offset += req->data->blksz;
661 dma_address += 16;
662 if (desc_cnt == req->data->blocks)
663 break;
664 }
665 }
666 desc--;
667 desc->flags |= 0x80000000;
668
669 if (req->data->flags & MMC_DATA_WRITE)
670 wmt_dma_config(mmc, priv->dma_desc_device_addr,
671 PDMA_WRITE);
672 else
673 wmt_dma_config(mmc, priv->dma_desc_device_addr,
674 PDMA_READ);
675
676 wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
677
678 priv->comp_dma = &priv->datacomp;
679 init_completion(priv->comp_dma);
680
681 wmt_dma_start(priv);
682 wmt_mci_start_command(priv);
683 }
684}
685
686static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
687{
688 struct wmt_mci_priv *priv;
689 u32 reg_tmp;
690
691 priv = mmc_priv(mmc);
692
693 if (ios->power_mode == MMC_POWER_UP) {
694 wmt_reset_hardware(mmc);
695
696 wmt_set_sd_power(priv, WMT_SD_POWER_ON);
697 }
698 if (ios->power_mode == MMC_POWER_OFF)
699 wmt_set_sd_power(priv, WMT_SD_POWER_OFF);
700
701 if (ios->clock != 0)
702 clk_set_rate(priv->clk_sdmmc, ios->clock);
703
704 switch (ios->bus_width) {
705 case MMC_BUS_WIDTH_8:
706 reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL);
707 writeb(reg_tmp | 0x04, priv->sdmmc_base + SDMMC_EXTCTRL);
708 break;
709 case MMC_BUS_WIDTH_4:
710 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
711 writeb(reg_tmp | BM_FOURBIT_MODE, priv->sdmmc_base +
712 SDMMC_BUSMODE);
713
714 reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL);
715 writeb(reg_tmp & 0xFB, priv->sdmmc_base + SDMMC_EXTCTRL);
716 break;
717 case MMC_BUS_WIDTH_1:
718 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
719 writeb(reg_tmp & BM_ONEBIT_MASK, priv->sdmmc_base +
720 SDMMC_BUSMODE);
721
722 reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL);
723 writeb(reg_tmp & 0xFB, priv->sdmmc_base + SDMMC_EXTCTRL);
724 break;
725 }
726}
727
728static int wmt_mci_get_ro(struct mmc_host *mmc)
729{
730 struct wmt_mci_priv *priv = mmc_priv(mmc);
731
732 return !(readb(priv->sdmmc_base + SDMMC_STS0) & STS0_WRITE_PROTECT);
733}
734
735static int wmt_mci_get_cd(struct mmc_host *mmc)
736{
737 struct wmt_mci_priv *priv = mmc_priv(mmc);
738 u32 cd = (readb(priv->sdmmc_base + SDMMC_STS0) & STS0_CD_GPI) >> 3;
739
740 return !(cd ^ priv->cd_inverted);
741}
742
743static struct mmc_host_ops wmt_mci_ops = {
744 .request = wmt_mci_request,
745 .set_ios = wmt_mci_set_ios,
746 .get_ro = wmt_mci_get_ro,
747 .get_cd = wmt_mci_get_cd,
748};
749
750/* Controller capabilities */
751static struct wmt_mci_caps wm8505_caps = {
752 .f_min = 390425,
753 .f_max = 50000000,
754 .ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34,
755 .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |
756 MMC_CAP_SD_HIGHSPEED,
757 .max_seg_size = 65024,
758 .max_segs = 128,
759 .max_blk_size = 2048,
760};
761
762static struct of_device_id wmt_mci_dt_ids[] = {
763 { .compatible = "wm,wm8505-sdhc", .data = &wm8505_caps },
764 { /* Sentinel */ },
765};
766
Greg Kroah-Hartman4e608e42012-12-21 15:05:47 -0800767static int wmt_mci_probe(struct platform_device *pdev)
Tony Prisk3a96dff2012-11-18 15:33:06 +1300768{
769 struct mmc_host *mmc;
770 struct wmt_mci_priv *priv;
771 struct device_node *np = pdev->dev.of_node;
772 const struct of_device_id *of_id =
773 of_match_device(wmt_mci_dt_ids, &pdev->dev);
774 const struct wmt_mci_caps *wmt_caps = of_id->data;
775 int ret;
776 int regular_irq, dma_irq;
777
778 if (!of_id || !of_id->data) {
779 dev_err(&pdev->dev, "Controller capabilities data missing\n");
780 return -EFAULT;
781 }
782
783 if (!np) {
784 dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n");
785 return -EFAULT;
786 }
787
788 regular_irq = irq_of_parse_and_map(np, 0);
789 dma_irq = irq_of_parse_and_map(np, 1);
790
791 if (!regular_irq || !dma_irq) {
792 dev_err(&pdev->dev, "Getting IRQs failed!\n");
793 ret = -ENXIO;
794 goto fail1;
795 }
796
797 mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev);
798 if (!mmc) {
799 dev_err(&pdev->dev, "Failed to allocate mmc_host\n");
800 ret = -ENOMEM;
801 goto fail1;
802 }
803
804 mmc->ops = &wmt_mci_ops;
805 mmc->f_min = wmt_caps->f_min;
806 mmc->f_max = wmt_caps->f_max;
807 mmc->ocr_avail = wmt_caps->ocr_avail;
808 mmc->caps = wmt_caps->caps;
809
810 mmc->max_seg_size = wmt_caps->max_seg_size;
811 mmc->max_segs = wmt_caps->max_segs;
812 mmc->max_blk_size = wmt_caps->max_blk_size;
813
814 mmc->max_req_size = (16*512*mmc->max_segs);
815 mmc->max_blk_count = mmc->max_req_size / 512;
816
817 priv = mmc_priv(mmc);
818 priv->mmc = mmc;
819 priv->dev = &pdev->dev;
820
821 priv->power_inverted = 0;
822 priv->cd_inverted = 0;
823
824 if (of_get_property(np, "sdon-inverted", NULL))
825 priv->power_inverted = 1;
826 if (of_get_property(np, "cd-inverted", NULL))
827 priv->cd_inverted = 1;
828
829 priv->sdmmc_base = of_iomap(np, 0);
830 if (!priv->sdmmc_base) {
831 dev_err(&pdev->dev, "Failed to map IO space\n");
832 ret = -ENOMEM;
833 goto fail2;
834 }
835
836 priv->irq_regular = regular_irq;
837 priv->irq_dma = dma_irq;
838
839 ret = request_irq(regular_irq, wmt_mci_regular_isr, 0, "sdmmc", priv);
840 if (ret) {
841 dev_err(&pdev->dev, "Register regular IRQ fail\n");
842 goto fail3;
843 }
844
845 ret = request_irq(dma_irq, wmt_mci_dma_isr, 32, "sdmmc", priv);
846 if (ret) {
847 dev_err(&pdev->dev, "Register DMA IRQ fail\n");
848 goto fail4;
849 }
850
851 /* alloc some DMA buffers for descriptors/transfers */
852 priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev,
853 mmc->max_blk_count * 16,
854 &priv->dma_desc_device_addr,
855 208);
856 if (!priv->dma_desc_buffer) {
857 dev_err(&pdev->dev, "DMA alloc fail\n");
858 ret = -EPERM;
859 goto fail5;
860 }
861
862 platform_set_drvdata(pdev, mmc);
863
864 priv->clk_sdmmc = of_clk_get(np, 0);
865 if (IS_ERR(priv->clk_sdmmc)) {
866 dev_err(&pdev->dev, "Error getting clock\n");
867 ret = PTR_ERR(priv->clk_sdmmc);
868 goto fail5;
869 }
870
871 clk_prepare_enable(priv->clk_sdmmc);
872
873 /* configure the controller to a known 'ready' state */
874 wmt_reset_hardware(mmc);
875
876 mmc_add_host(mmc);
877
878 dev_info(&pdev->dev, "WMT SDHC Controller initialized\n");
879
880 return 0;
881fail5:
882 free_irq(dma_irq, priv);
883fail4:
884 free_irq(regular_irq, priv);
885fail3:
886 iounmap(priv->sdmmc_base);
887fail2:
888 mmc_free_host(mmc);
889fail1:
890 return ret;
891}
892
Greg Kroah-Hartman4e608e42012-12-21 15:05:47 -0800893static int wmt_mci_remove(struct platform_device *pdev)
Tony Prisk3a96dff2012-11-18 15:33:06 +1300894{
895 struct mmc_host *mmc;
896 struct wmt_mci_priv *priv;
897 struct resource *res;
898 u32 reg_tmp;
899
900 mmc = platform_get_drvdata(pdev);
901 priv = mmc_priv(mmc);
902
903 /* reset SD controller */
904 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
905 writel(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
906 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
907 writew(reg_tmp & ~(0xA000), priv->sdmmc_base + SDMMC_BLKLEN);
908 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
909 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
910
911 /* release the dma buffers */
912 dma_free_coherent(&pdev->dev, priv->mmc->max_blk_count * 16,
913 priv->dma_desc_buffer, priv->dma_desc_device_addr);
914
915 mmc_remove_host(mmc);
916
917 free_irq(priv->irq_regular, priv);
918 free_irq(priv->irq_dma, priv);
919
920 iounmap(priv->sdmmc_base);
921
922 clk_disable_unprepare(priv->clk_sdmmc);
923 clk_put(priv->clk_sdmmc);
924
925 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Alexandru Gheorghiu22119902013-03-12 01:04:54 +0200926 release_mem_region(res->start, resource_size(res));
Tony Prisk3a96dff2012-11-18 15:33:06 +1300927
928 mmc_free_host(mmc);
929
930 platform_set_drvdata(pdev, NULL);
931
932 dev_info(&pdev->dev, "WMT MCI device removed\n");
933
934 return 0;
935}
936
937#ifdef CONFIG_PM
938static int wmt_mci_suspend(struct device *dev)
939{
940 u32 reg_tmp;
941 struct platform_device *pdev = to_platform_device(dev);
942 struct mmc_host *mmc = platform_get_drvdata(pdev);
943 struct wmt_mci_priv *priv;
944 int ret;
945
946 if (!mmc)
947 return 0;
948
949 priv = mmc_priv(mmc);
950 ret = mmc_suspend_host(mmc);
951
952 if (!ret) {
953 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
954 writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
955 SDMMC_BUSMODE);
956
957 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
958 writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN);
959
960 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
961 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
962
963 clk_disable(priv->clk_sdmmc);
964 }
965 return ret;
966}
967
968static int wmt_mci_resume(struct device *dev)
969{
970 u32 reg_tmp;
971 struct platform_device *pdev = to_platform_device(dev);
972 struct mmc_host *mmc = platform_get_drvdata(pdev);
973 struct wmt_mci_priv *priv;
974 int ret = 0;
975
976 if (mmc) {
977 priv = mmc_priv(mmc);
978 clk_enable(priv->clk_sdmmc);
979
980 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
981 writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
982 SDMMC_BUSMODE);
983
984 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
985 writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE),
986 priv->sdmmc_base + SDMMC_BLKLEN);
987
988 reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
989 writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base +
990 SDMMC_INTMASK0);
991
992 ret = mmc_resume_host(mmc);
993 }
994
995 return ret;
996}
997
998static const struct dev_pm_ops wmt_mci_pm = {
999 .suspend = wmt_mci_suspend,
1000 .resume = wmt_mci_resume,
1001};
1002
1003#define wmt_mci_pm_ops (&wmt_mci_pm)
1004
1005#else /* !CONFIG_PM */
1006
1007#define wmt_mci_pm_ops NULL
1008
1009#endif
1010
1011static struct platform_driver wmt_mci_driver = {
1012 .probe = wmt_mci_probe,
Tony Prisk893613b2013-01-13 19:19:20 +13001013 .remove = wmt_mci_remove,
Tony Prisk3a96dff2012-11-18 15:33:06 +13001014 .driver = {
1015 .name = DRIVER_NAME,
1016 .owner = THIS_MODULE,
1017 .pm = wmt_mci_pm_ops,
1018 .of_match_table = wmt_mci_dt_ids,
1019 },
1020};
1021
1022module_platform_driver(wmt_mci_driver);
1023
1024MODULE_DESCRIPTION("Wondermedia MMC/SD Driver");
1025MODULE_AUTHOR("Tony Prisk");
1026MODULE_LICENSE("GPL v2");
1027MODULE_DEVICE_TABLE(of, wmt_mci_dt_ids);