blob: 6061c2d101a06af5635ac49fbfe1cb835dcfa3fb [file] [log] [blame]
Andrew Victor65dbf342006-04-02 19:18:51 +01001/*
2 * linux/drivers/mmc/at91_mci.c - ATMEL AT91RM9200 MCI Driver
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
14 This is the AT91RM9200 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers.
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
56#include <linux/config.h>
57#include <linux/module.h>
58#include <linux/moduleparam.h>
59#include <linux/init.h>
60#include <linux/ioport.h>
61#include <linux/platform_device.h>
62#include <linux/interrupt.h>
63#include <linux/blkdev.h>
64#include <linux/delay.h>
65#include <linux/err.h>
66#include <linux/dma-mapping.h>
67#include <linux/clk.h>
68
69#include <linux/mmc/host.h>
70#include <linux/mmc/protocol.h>
71
72#include <asm/io.h>
73#include <asm/irq.h>
74#include <asm/mach/mmc.h>
75#include <asm/arch/board.h>
76#include <asm/arch/gpio.h>
77#include <asm/arch/at91rm9200_mci.h>
78#include <asm/arch/at91rm9200_pdc.h>
79
80#define DRIVER_NAME "at91_mci"
81
82#undef SUPPORT_4WIRE
83
84#ifdef CONFIG_MMC_DEBUG
85#define DBG(fmt...) \
86 printk(fmt)
87#else
88#define DBG(fmt...) do { } while (0)
89#endif
90
91static struct clk *mci_clk;
92
93#define FL_SENT_COMMAND (1 << 0)
94#define FL_SENT_STOP (1 << 1)
95
96
97
98/*
99 * Read from a MCI register.
100 */
101static inline unsigned long at91_mci_read(unsigned int reg)
102{
103 void __iomem *mci_base = (void __iomem *)AT91_VA_BASE_MCI;
104
105 return __raw_readl(mci_base + reg);
106}
107
108/*
109 * Write to a MCI register.
110 */
111static inline void at91_mci_write(unsigned int reg, unsigned long value)
112{
113 void __iomem *mci_base = (void __iomem *)AT91_VA_BASE_MCI;
114
115 __raw_writel(value, mci_base + reg);
116}
117
118/*
119 * Low level type for this driver
120 */
121struct at91mci_host
122{
123 struct mmc_host *mmc;
124 struct mmc_command *cmd;
125 struct mmc_request *request;
126
127 struct at91_mmc_data *board;
128 int present;
129
130 /*
131 * Flag indicating when the command has been sent. This is used to
132 * work out whether or not to send the stop
133 */
134 unsigned int flags;
135 /* flag for current bus settings */
136 u32 bus_mode;
137
138 /* DMA buffer used for transmitting */
139 unsigned int* buffer;
140 dma_addr_t physical_address;
141 unsigned int total_length;
142
143 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
144 int in_use_index;
145
146 /* Latest in the scatterlist that has been enabled for transfer */
147 int transfer_index;
148};
149
150/*
151 * Copy from sg to a dma block - used for transfers
152 */
153static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
154{
155 unsigned int len, i, size;
156 unsigned *dmabuf = host->buffer;
157
158 size = host->total_length;
159 len = data->sg_len;
160
161 /*
162 * Just loop through all entries. Size might not
163 * be the entire list though so make sure that
164 * we do not transfer too much.
165 */
166 for (i = 0; i < len; i++) {
167 struct scatterlist *sg;
168 int amount;
169 int index;
170 unsigned int *sgbuffer;
171
172 sg = &data->sg[i];
173
174 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
175 amount = min(size, sg->length);
176 size -= amount;
177 amount /= 4;
178
179 for (index = 0; index < amount; index++)
180 *dmabuf++ = swab32(sgbuffer[index]);
181
182 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
183
184 if (size == 0)
185 break;
186 }
187
188 /*
189 * Check that we didn't get a request to transfer
190 * more data than can fit into the SG list.
191 */
192 BUG_ON(size != 0);
193}
194
195/*
196 * Prepare a dma read
197 */
198static void at91mci_pre_dma_read(struct at91mci_host *host)
199{
200 int i;
201 struct scatterlist *sg;
202 struct mmc_command *cmd;
203 struct mmc_data *data;
204
205 DBG("pre dma read\n");
206
207 cmd = host->cmd;
208 if (!cmd) {
209 DBG("no command\n");
210 return;
211 }
212
213 data = cmd->data;
214 if (!data) {
215 DBG("no data\n");
216 return;
217 }
218
219 for (i = 0; i < 2; i++) {
220 /* nothing left to transfer */
221 if (host->transfer_index >= data->sg_len) {
222 DBG("Nothing left to transfer (index = %d)\n", host->transfer_index);
223 break;
224 }
225
226 /* Check to see if this needs filling */
227 if (i == 0) {
228 if (at91_mci_read(AT91_PDC_RCR) != 0) {
229 DBG("Transfer active in current\n");
230 continue;
231 }
232 }
233 else {
234 if (at91_mci_read(AT91_PDC_RNCR) != 0) {
235 DBG("Transfer active in next\n");
236 continue;
237 }
238 }
239
240 /* Setup the next transfer */
241 DBG("Using transfer index %d\n", host->transfer_index);
242
243 sg = &data->sg[host->transfer_index++];
244 DBG("sg = %p\n", sg);
245
246 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
247
248 DBG("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
249
250 if (i == 0) {
251 at91_mci_write(AT91_PDC_RPR, sg->dma_address);
252 at91_mci_write(AT91_PDC_RCR, sg->length / 4);
253 }
254 else {
255 at91_mci_write(AT91_PDC_RNPR, sg->dma_address);
256 at91_mci_write(AT91_PDC_RNCR, sg->length / 4);
257 }
258 }
259
260 DBG("pre dma read done\n");
261}
262
263/*
264 * Handle after a dma read
265 */
266static void at91mci_post_dma_read(struct at91mci_host *host)
267{
268 struct mmc_command *cmd;
269 struct mmc_data *data;
270
271 DBG("post dma read\n");
272
273 cmd = host->cmd;
274 if (!cmd) {
275 DBG("no command\n");
276 return;
277 }
278
279 data = cmd->data;
280 if (!data) {
281 DBG("no data\n");
282 return;
283 }
284
285 while (host->in_use_index < host->transfer_index) {
286 unsigned int *buffer;
287 int index;
288 int len;
289
290 struct scatterlist *sg;
291
292 DBG("finishing index %d\n", host->in_use_index);
293
294 sg = &data->sg[host->in_use_index++];
295
296 DBG("Unmapping page %08X\n", sg->dma_address);
297
298 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
299
300 /* Swap the contents of the buffer */
301 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
302 DBG("buffer = %p, length = %d\n", buffer, sg->length);
303
304 data->bytes_xfered += sg->length;
305
306 len = sg->length / 4;
307
308 for (index = 0; index < len; index++) {
309 buffer[index] = swab32(buffer[index]);
310 }
311 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
312 flush_dcache_page(sg->page);
313 }
314
315 /* Is there another transfer to trigger? */
316 if (host->transfer_index < data->sg_len)
317 at91mci_pre_dma_read(host);
318 else {
319 at91_mci_write(AT91_MCI_IER, AT91_MCI_RXBUFF);
320 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
321 }
322
323 DBG("post dma read done\n");
324}
325
326/*
327 * Handle transmitted data
328 */
329static void at91_mci_handle_transmitted(struct at91mci_host *host)
330{
331 struct mmc_command *cmd;
332 struct mmc_data *data;
333
334 DBG("Handling the transmit\n");
335
336 /* Disable the transfer */
337 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
338
339 /* Now wait for cmd ready */
340 at91_mci_write(AT91_MCI_IDR, AT91_MCI_TXBUFE);
341 at91_mci_write(AT91_MCI_IER, AT91_MCI_NOTBUSY);
342
343 cmd = host->cmd;
344 if (!cmd) return;
345
346 data = cmd->data;
347 if (!data) return;
348
349 data->bytes_xfered = host->total_length;
350}
351
352/*
353 * Enable the controller
354 */
355static void at91_mci_enable(void)
356{
357 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIEN);
358 at91_mci_write(AT91_MCI_IDR, 0xFFFFFFFF);
359 at91_mci_write(AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
360 at91_mci_write(AT91_MCI_MR, 0x834A);
361 at91_mci_write(AT91_MCI_SDCR, 0x0);
362}
363
364/*
365 * Disable the controller
366 */
367static void at91_mci_disable(void)
368{
369 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
370}
371
372/*
373 * Send a command
374 * return the interrupts to enable
375 */
376static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
377{
378 unsigned int cmdr, mr;
379 unsigned int block_length;
380 struct mmc_data *data = cmd->data;
381
382 unsigned int blocks;
383 unsigned int ier = 0;
384
385 host->cmd = cmd;
386
387 /* Not sure if this is needed */
388#if 0
389 if ((at91_mci_read(AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
390 DBG("Clearing timeout\n");
391 at91_mci_write(AT91_MCI_ARGR, 0);
392 at91_mci_write(AT91_MCI_CMDR, AT91_MCI_OPDCMD);
393 while (!(at91_mci_read(AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
394 /* spin */
395 DBG("Clearing: SR = %08X\n", at91_mci_read(AT91_MCI_SR));
396 }
397 }
398#endif
399 cmdr = cmd->opcode;
400
401 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
402 cmdr |= AT91_MCI_RSPTYP_NONE;
403 else {
404 /* if a response is expected then allow maximum response latancy */
405 cmdr |= AT91_MCI_MAXLAT;
406 /* set 136 bit response for R2, 48 bit response otherwise */
407 if (mmc_resp_type(cmd) == MMC_RSP_R2)
408 cmdr |= AT91_MCI_RSPTYP_136;
409 else
410 cmdr |= AT91_MCI_RSPTYP_48;
411 }
412
413 if (data) {
414 block_length = 1 << data->blksz_bits;
415 blocks = data->blocks;
416
417 /* always set data start - also set direction flag for read */
418 if (data->flags & MMC_DATA_READ)
419 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
420 else if (data->flags & MMC_DATA_WRITE)
421 cmdr |= AT91_MCI_TRCMD_START;
422
423 if (data->flags & MMC_DATA_STREAM)
424 cmdr |= AT91_MCI_TRTYP_STREAM;
425 if (data->flags & MMC_DATA_MULTI)
426 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
427 }
428 else {
429 block_length = 0;
430 blocks = 0;
431 }
432
433 if (cmd->opcode == MMC_STOP_TRANSMISSION)
434 cmdr |= AT91_MCI_TRCMD_STOP;
435
436 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
437 cmdr |= AT91_MCI_OPDCMD;
438
439 /*
440 * Set the arguments and send the command
441 */
442 DBG("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08lX)\n",
443 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(AT91_MCI_MR));
444
445 if (!data) {
446 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_TXTDIS | AT91_PDC_RXTDIS);
447 at91_mci_write(AT91_PDC_RPR, 0);
448 at91_mci_write(AT91_PDC_RCR, 0);
449 at91_mci_write(AT91_PDC_RNPR, 0);
450 at91_mci_write(AT91_PDC_RNCR, 0);
451 at91_mci_write(AT91_PDC_TPR, 0);
452 at91_mci_write(AT91_PDC_TCR, 0);
453 at91_mci_write(AT91_PDC_TNPR, 0);
454 at91_mci_write(AT91_PDC_TNCR, 0);
455
456 at91_mci_write(AT91_MCI_ARGR, cmd->arg);
457 at91_mci_write(AT91_MCI_CMDR, cmdr);
458 return AT91_MCI_CMDRDY;
459 }
460
461 mr = at91_mci_read(AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */
462 at91_mci_write(AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
463
464 /*
465 * Disable the PDC controller
466 */
467 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
468
469 if (cmdr & AT91_MCI_TRCMD_START) {
470 data->bytes_xfered = 0;
471 host->transfer_index = 0;
472 host->in_use_index = 0;
473 if (cmdr & AT91_MCI_TRDIR) {
474 /*
475 * Handle a read
476 */
477 host->buffer = NULL;
478 host->total_length = 0;
479
480 at91mci_pre_dma_read(host);
481 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
482 }
483 else {
484 /*
485 * Handle a write
486 */
487 host->total_length = block_length * blocks;
488 host->buffer = dma_alloc_coherent(NULL,
489 host->total_length,
490 &host->physical_address, GFP_KERNEL);
491
492 at91mci_sg_to_dma(host, data);
493
494 DBG("Transmitting %d bytes\n", host->total_length);
495
496 at91_mci_write(AT91_PDC_TPR, host->physical_address);
497 at91_mci_write(AT91_PDC_TCR, host->total_length / 4);
498 ier = AT91_MCI_TXBUFE;
499 }
500 }
501
502 /*
503 * Send the command and then enable the PDC - not the other way round as
504 * the data sheet says
505 */
506
507 at91_mci_write(AT91_MCI_ARGR, cmd->arg);
508 at91_mci_write(AT91_MCI_CMDR, cmdr);
509
510 if (cmdr & AT91_MCI_TRCMD_START) {
511 if (cmdr & AT91_MCI_TRDIR)
512 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_RXTEN);
513 else
514 at91_mci_write(AT91_PDC_PTCR, AT91_PDC_TXTEN);
515 }
516 return ier;
517}
518
519/*
520 * Wait for a command to complete
521 */
522static void at91mci_process_command(struct at91mci_host *host, struct mmc_command *cmd)
523{
524 unsigned int ier;
525
526 ier = at91_mci_send_command(host, cmd);
527
528 DBG("setting ier to %08X\n", ier);
529
530 /* Stop on errors or the required value */
531 at91_mci_write(AT91_MCI_IER, 0xffff0000 | ier);
532}
533
534/*
535 * Process the next step in the request
536 */
537static void at91mci_process_next(struct at91mci_host *host)
538{
539 if (!(host->flags & FL_SENT_COMMAND)) {
540 host->flags |= FL_SENT_COMMAND;
541 at91mci_process_command(host, host->request->cmd);
542 }
543 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
544 host->flags |= FL_SENT_STOP;
545 at91mci_process_command(host, host->request->stop);
546 }
547 else
548 mmc_request_done(host->mmc, host->request);
549}
550
551/*
552 * Handle a command that has been completed
553 */
554static void at91mci_completed_command(struct at91mci_host *host)
555{
556 struct mmc_command *cmd = host->cmd;
557 unsigned int status;
558
559 at91_mci_write(AT91_MCI_IDR, 0xffffffff);
560
561 cmd->resp[0] = at91_mci_read(AT91_MCI_RSPR(0));
562 cmd->resp[1] = at91_mci_read(AT91_MCI_RSPR(1));
563 cmd->resp[2] = at91_mci_read(AT91_MCI_RSPR(2));
564 cmd->resp[3] = at91_mci_read(AT91_MCI_RSPR(3));
565
566 if (host->buffer) {
567 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
568 host->buffer = NULL;
569 }
570
571 status = at91_mci_read(AT91_MCI_SR);
572
573 DBG("Status = %08X [%08X %08X %08X %08X]\n",
574 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
575
576 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
577 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
578 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
579 if ((status & AT91_MCI_RCRCE) &&
580 ((cmd->opcode == MMC_SEND_OP_COND) || (cmd->opcode == SD_APP_OP_COND))) {
581 cmd->error = MMC_ERR_NONE;
582 }
583 else {
584 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
585 cmd->error = MMC_ERR_TIMEOUT;
586 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
587 cmd->error = MMC_ERR_BADCRC;
588 else if (status & (AT91_MCI_OVRE | AT91_MCI_UNRE))
589 cmd->error = MMC_ERR_FIFO;
590 else
591 cmd->error = MMC_ERR_FAILED;
592
593 DBG("Error detected and set to %d (cmd = %d, retries = %d)\n",
594 cmd->error, cmd->opcode, cmd->retries);
595 }
596 }
597 else
598 cmd->error = MMC_ERR_NONE;
599
600 at91mci_process_next(host);
601}
602
603/*
604 * Handle an MMC request
605 */
606static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
607{
608 struct at91mci_host *host = mmc_priv(mmc);
609 host->request = mrq;
610 host->flags = 0;
611
612 at91mci_process_next(host);
613}
614
615/*
616 * Set the IOS
617 */
618static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
619{
620 int clkdiv;
621 struct at91mci_host *host = mmc_priv(mmc);
622 unsigned long at91_master_clock = clk_get_rate(mci_clk);
623
624 DBG("Clock %uHz, busmode %u, powermode %u, Vdd %u\n",
625 ios->clock, ios->bus_mode, ios->power_mode, ios->vdd);
626
627 if (host)
628 host->bus_mode = ios->bus_mode;
629 else
630 printk("MMC: No host for bus_mode\n");
631
632 if (ios->clock == 0) {
633 /* Disable the MCI controller */
634 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIDIS);
635 clkdiv = 0;
636 }
637 else {
638 /* Enable the MCI controller */
639 at91_mci_write(AT91_MCI_CR, AT91_MCI_MCIEN);
640
641 if ((at91_master_clock % (ios->clock * 2)) == 0)
642 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
643 else
644 clkdiv = (at91_master_clock / ios->clock) / 2;
645
646 DBG("clkdiv = %d. mcck = %ld\n", clkdiv,
647 at91_master_clock / (2 * (clkdiv + 1)));
648 }
649 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
650 DBG("MMC: Setting controller bus width to 4\n");
651 at91_mci_write(AT91_MCI_SDCR, at91_mci_read(AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
652 }
653 else {
654 DBG("MMC: Setting controller bus width to 1\n");
655 at91_mci_write(AT91_MCI_SDCR, at91_mci_read(AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
656 }
657
658 /* Set the clock divider */
659 at91_mci_write(AT91_MCI_MR, (at91_mci_read(AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
660
661 /* maybe switch power to the card */
662 if (host && host->board->vcc_pin) {
663 switch (ios->power_mode) {
664 case MMC_POWER_OFF:
665 at91_set_gpio_output(host->board->vcc_pin, 0);
666 break;
667 case MMC_POWER_UP:
668 case MMC_POWER_ON:
669 at91_set_gpio_output(host->board->vcc_pin, 1);
670 break;
671 }
672 }
673}
674
675/*
676 * Handle an interrupt
677 */
678static irqreturn_t at91_mci_irq(int irq, void *devid, struct pt_regs *regs)
679{
680 struct at91mci_host *host = devid;
681 int completed = 0;
682
683 unsigned int int_status;
684
685 if (host == NULL)
686 return IRQ_HANDLED;
687
688 int_status = at91_mci_read(AT91_MCI_SR);
689 DBG("MCI irq: status = %08X, %08lX, %08lX\n", int_status, at91_mci_read(AT91_MCI_IMR),
690 int_status & at91_mci_read(AT91_MCI_IMR));
691
692 if ((int_status & at91_mci_read(AT91_MCI_IMR)) & 0xffff0000)
693 completed = 1;
694
695 int_status &= at91_mci_read(AT91_MCI_IMR);
696
697 if (int_status & AT91_MCI_UNRE)
698 DBG("MMC: Underrun error\n");
699 if (int_status & AT91_MCI_OVRE)
700 DBG("MMC: Overrun error\n");
701 if (int_status & AT91_MCI_DTOE)
702 DBG("MMC: Data timeout\n");
703 if (int_status & AT91_MCI_DCRCE)
704 DBG("MMC: CRC error in data\n");
705 if (int_status & AT91_MCI_RTOE)
706 DBG("MMC: Response timeout\n");
707 if (int_status & AT91_MCI_RENDE)
708 DBG("MMC: Response end bit error\n");
709 if (int_status & AT91_MCI_RCRCE)
710 DBG("MMC: Response CRC error\n");
711 if (int_status & AT91_MCI_RDIRE)
712 DBG("MMC: Response direction error\n");
713 if (int_status & AT91_MCI_RINDE)
714 DBG("MMC: Response index error\n");
715
716 /* Only continue processing if no errors */
717 if (!completed) {
718 if (int_status & AT91_MCI_TXBUFE) {
719 DBG("TX buffer empty\n");
720 at91_mci_handle_transmitted(host);
721 }
722
723 if (int_status & AT91_MCI_RXBUFF) {
724 DBG("RX buffer full\n");
725 at91_mci_write(AT91_MCI_IER, AT91_MCI_CMDRDY);
726 }
727
728 if (int_status & AT91_MCI_ENDTX) {
729 DBG("Transmit has ended\n");
730 }
731
732 if (int_status & AT91_MCI_ENDRX) {
733 DBG("Receive has ended\n");
734 at91mci_post_dma_read(host);
735 }
736
737 if (int_status & AT91_MCI_NOTBUSY) {
738 DBG("Card is ready\n");
739 at91_mci_write(AT91_MCI_IER, AT91_MCI_CMDRDY);
740 }
741
742 if (int_status & AT91_MCI_DTIP) {
743 DBG("Data transfer in progress\n");
744 }
745
746 if (int_status & AT91_MCI_BLKE) {
747 DBG("Block transfer has ended\n");
748 }
749
750 if (int_status & AT91_MCI_TXRDY) {
751 DBG("Ready to transmit\n");
752 }
753
754 if (int_status & AT91_MCI_RXRDY) {
755 DBG("Ready to receive\n");
756 }
757
758 if (int_status & AT91_MCI_CMDRDY) {
759 DBG("Command ready\n");
760 completed = 1;
761 }
762 }
763 at91_mci_write(AT91_MCI_IDR, int_status);
764
765 if (completed) {
766 DBG("Completed command\n");
767 at91_mci_write(AT91_MCI_IDR, 0xffffffff);
768 at91mci_completed_command(host);
769 }
770
771 return IRQ_HANDLED;
772}
773
774static irqreturn_t at91_mmc_det_irq(int irq, void *_host, struct pt_regs *regs)
775{
776 struct at91mci_host *host = _host;
777 int present = !at91_get_gpio_value(irq);
778
779 /*
780 * we expect this irq on both insert and remove,
781 * and use a short delay to debounce.
782 */
783 if (present != host->present) {
784 host->present = present;
785 DBG("%s: card %s\n", mmc_hostname(host->mmc),
786 present ? "insert" : "remove");
787 if (!present) {
788 DBG("****** Resetting SD-card bus width ******\n");
789 at91_mci_write(AT91_MCI_SDCR, 0);
790 }
791 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
792 }
793 return IRQ_HANDLED;
794}
795
796int at91_mci_get_ro(struct mmc_host *mmc)
797{
798 int read_only = 0;
799 struct at91mci_host *host = mmc_priv(mmc);
800
801 if (host->board->wp_pin) {
802 read_only = at91_get_gpio_value(host->board->wp_pin);
803 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
804 (read_only ? "read-only" : "read-write") );
805 }
806 else {
807 printk(KERN_WARNING "%s: host does not support reading read-only "
808 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
809 }
810 return read_only;
811}
812
813static struct mmc_host_ops at91_mci_ops = {
814 .request = at91_mci_request,
815 .set_ios = at91_mci_set_ios,
816 .get_ro = at91_mci_get_ro,
817};
818
819/*
820 * Probe for the device
821 */
822static int at91_mci_probe(struct platform_device *pdev)
823{
824 struct mmc_host *mmc;
825 struct at91mci_host *host;
826 int ret;
827
828 DBG("Probe MCI devices\n");
829 at91_mci_disable();
830 at91_mci_enable();
831
832 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
833 if (!mmc) {
834 DBG("Failed to allocate mmc host\n");
835 return -ENOMEM;
836 }
837
838 mmc->ops = &at91_mci_ops;
839 mmc->f_min = 375000;
840 mmc->f_max = 25000000;
841 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
842
843 host = mmc_priv(mmc);
844 host->mmc = mmc;
845 host->buffer = NULL;
846 host->bus_mode = 0;
847 host->board = pdev->dev.platform_data;
848 if (host->board->wire4) {
849#ifdef SUPPORT_4WIRE
850 mmc->caps |= MMC_CAP_4_BIT_DATA;
851#else
852 printk("MMC: 4 wire bus mode not supported by this driver - using 1 wire\n");
853#endif
854 }
855
856 /*
857 * Get Clock
858 */
859 mci_clk = clk_get(&pdev->dev, "mci_clk");
860 if (!mci_clk) {
861 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
862 return -ENODEV;
863 }
864 clk_enable(mci_clk); /* Enable the peripheral clock */
865
866 /*
867 * Allocate the MCI interrupt
868 */
869 ret = request_irq(AT91_ID_MCI, at91_mci_irq, SA_SHIRQ, DRIVER_NAME, host);
870 if (ret) {
871 DBG("Failed to request MCI interrupt\n");
872 return ret;
873 }
874
875 platform_set_drvdata(pdev, mmc);
876
877 /*
878 * Add host to MMC layer
879 */
880 if (host->board->det_pin)
881 host->present = !at91_get_gpio_value(host->board->det_pin);
882 else
883 host->present = -1;
884
885 mmc_add_host(mmc);
886
887 /*
888 * monitor card insertion/removal if we can
889 */
890 if (host->board->det_pin) {
891 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
892 SA_SAMPLE_RANDOM, DRIVER_NAME, host);
893 if (ret)
894 DBG("couldn't allocate MMC detect irq\n");
895 }
896
897 DBG(KERN_INFO "Added MCI driver\n");
898
899 return 0;
900}
901
902/*
903 * Remove a device
904 */
905static int at91_mci_remove(struct platform_device *pdev)
906{
907 struct mmc_host *mmc = platform_get_drvdata(pdev);
908 struct at91mci_host *host;
909
910 if (!mmc)
911 return -1;
912
913 host = mmc_priv(mmc);
914
915 if (host->present != -1) {
916 free_irq(host->board->det_pin, host);
917 cancel_delayed_work(&host->mmc->detect);
918 }
919
920 mmc_remove_host(mmc);
921 at91_mci_disable();
922 free_irq(AT91_ID_MCI, host);
923 mmc_free_host(mmc);
924
925 clk_disable(mci_clk); /* Disable the peripheral clock */
926 clk_put(mci_clk);
927
928 platform_set_drvdata(pdev, NULL);
929
930 DBG("Removed\n");
931
932 return 0;
933}
934
935#ifdef CONFIG_PM
936static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
937{
938 struct mmc_host *mmc = platform_get_drvdata(pdev);
939 int ret = 0;
940
941 if (mmc)
942 ret = mmc_suspend_host(mmc, state);
943
944 return ret;
945}
946
947static int at91_mci_resume(struct platform_device *pdev)
948{
949 struct mmc_host *mmc = platform_get_drvdata(pdev);
950 int ret = 0;
951
952 if (mmc)
953 ret = mmc_resume_host(mmc);
954
955 return ret;
956}
957#else
958#define at91_mci_suspend NULL
959#define at91_mci_resume NULL
960#endif
961
962static struct platform_driver at91_mci_driver = {
963 .probe = at91_mci_probe,
964 .remove = at91_mci_remove,
965 .suspend = at91_mci_suspend,
966 .resume = at91_mci_resume,
967 .driver = {
968 .name = DRIVER_NAME,
969 .owner = THIS_MODULE,
970 },
971};
972
973static int __init at91_mci_init(void)
974{
975 return platform_driver_register(&at91_mci_driver);
976}
977
978static void __exit at91_mci_exit(void)
979{
980 platform_driver_unregister(&at91_mci_driver);
981}
982
983module_init(at91_mci_init);
984module_exit(at91_mci_exit);
985
986MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
987MODULE_AUTHOR("Nick Randell");
988MODULE_LICENSE("GPL");