blob: 49df71e6be178d77039c375d97067c0198177d1e [file] [log] [blame]
Ian Molton4a489982008-07-15 16:02:21 +01001/*
2 * linux/drivers/mmc/tmio_mmc.c
3 *
4 * Copyright (C) 2004 Ian Molton
5 * Copyright (C) 2007 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Driver for the MMC / SD / SDIO cell found in:
12 *
13 * TC6393XB TC6391XB TC6387XB T7L66XB
14 *
15 * This driver draws mainly on scattered spec sheets, Reverse engineering
16 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
17 * support). (Further 4 bit support from a later datasheet).
18 *
19 * TODO:
20 * Investigate using a workqueue for PIO transfers
21 * Eliminate FIXMEs
22 * SDIO support
23 * Better Power management
24 * Handle MMC errors better
25 * double buffer support
26 *
27 */
28#include <linux/module.h>
29#include <linux/irq.h>
30#include <linux/device.h>
31#include <linux/delay.h>
32#include <linux/mmc/host.h>
33#include <linux/mfd/core.h>
34#include <linux/mfd/tmio.h>
35
36#include "tmio_mmc.h"
37
Ian Molton4a489982008-07-15 16:02:21 +010038static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
39{
40 void __iomem *cnf = host->cnf;
41 void __iomem *ctl = host->ctl;
Philipp Zabelf0e46cc2009-06-04 20:12:31 +020042 u32 clk = 0, clock, f_min = host->mmc->f_min;
Ian Molton4a489982008-07-15 16:02:21 +010043
44 if (new_clock) {
Philipp Zabelf0e46cc2009-06-04 20:12:31 +020045 for (clock = f_min, clk = 0x100; new_clock >= (clock<<1); ) {
Ian Molton4a489982008-07-15 16:02:21 +010046 clock <<= 1;
47 clk >>= 1;
48 }
49 if (clk & 0x1)
50 clk = 0x20000;
51
52 clk >>= 2;
53 tmio_iowrite8((clk & 0x8000) ? 0 : 1, cnf + CNF_SD_CLK_MODE);
54 clk |= 0x100;
55 }
56
57 tmio_iowrite16(clk, ctl + CTL_SD_CARD_CLK_CTL);
58}
59
60static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
61{
62 void __iomem *ctl = host->ctl;
63
64 tmio_iowrite16(0x0000, ctl + CTL_CLK_AND_WAIT_CTL);
65 msleep(10);
66 tmio_iowrite16(tmio_ioread16(ctl + CTL_SD_CARD_CLK_CTL) & ~0x0100,
67 ctl + CTL_SD_CARD_CLK_CTL);
68 msleep(10);
69}
70
71static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
72{
73 void __iomem *ctl = host->ctl;
74
75 tmio_iowrite16(tmio_ioread16(ctl + CTL_SD_CARD_CLK_CTL) | 0x0100,
76 ctl + CTL_SD_CARD_CLK_CTL);
77 msleep(10);
78 tmio_iowrite16(0x0100, ctl + CTL_CLK_AND_WAIT_CTL);
79 msleep(10);
80}
81
82static void reset(struct tmio_mmc_host *host)
83{
84 void __iomem *ctl = host->ctl;
85
86 /* FIXME - should we set stop clock reg here */
87 tmio_iowrite16(0x0000, ctl + CTL_RESET_SD);
88 tmio_iowrite16(0x0000, ctl + CTL_RESET_SDIO);
89 msleep(10);
90 tmio_iowrite16(0x0001, ctl + CTL_RESET_SD);
91 tmio_iowrite16(0x0001, ctl + CTL_RESET_SDIO);
92 msleep(10);
93}
94
95static void
96tmio_mmc_finish_request(struct tmio_mmc_host *host)
97{
98 struct mmc_request *mrq = host->mrq;
99
100 host->mrq = NULL;
101 host->cmd = NULL;
102 host->data = NULL;
103
104 mmc_request_done(host->mmc, mrq);
105}
106
107/* These are the bitmasks the tmio chip requires to implement the MMC response
108 * types. Note that R1 and R6 are the same in this scheme. */
109#define APP_CMD 0x0040
110#define RESP_NONE 0x0300
111#define RESP_R1 0x0400
112#define RESP_R1B 0x0500
113#define RESP_R2 0x0600
114#define RESP_R3 0x0700
115#define DATA_PRESENT 0x0800
116#define TRANSFER_READ 0x1000
117#define TRANSFER_MULTI 0x2000
118#define SECURITY_CMD 0x4000
119
120static int
121tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
122{
123 void __iomem *ctl = host->ctl;
124 struct mmc_data *data = host->data;
125 int c = cmd->opcode;
126
127 /* Command 12 is handled by hardware */
128 if (cmd->opcode == 12 && !cmd->arg) {
129 tmio_iowrite16(0x001, ctl + CTL_STOP_INTERNAL_ACTION);
130 return 0;
131 }
132
133 switch (mmc_resp_type(cmd)) {
134 case MMC_RSP_NONE: c |= RESP_NONE; break;
135 case MMC_RSP_R1: c |= RESP_R1; break;
136 case MMC_RSP_R1B: c |= RESP_R1B; break;
137 case MMC_RSP_R2: c |= RESP_R2; break;
138 case MMC_RSP_R3: c |= RESP_R3; break;
139 default:
140 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
141 return -EINVAL;
142 }
143
144 host->cmd = cmd;
145
146/* FIXME - this seems to be ok comented out but the spec suggest this bit should
147 * be set when issuing app commands.
148 * if(cmd->flags & MMC_FLAG_ACMD)
149 * c |= APP_CMD;
150 */
151 if (data) {
152 c |= DATA_PRESENT;
153 if (data->blocks > 1) {
154 tmio_iowrite16(0x100, ctl + CTL_STOP_INTERNAL_ACTION);
155 c |= TRANSFER_MULTI;
156 }
157 if (data->flags & MMC_DATA_READ)
158 c |= TRANSFER_READ;
159 }
160
161 enable_mmc_irqs(ctl, TMIO_MASK_CMD);
162
163 /* Fire off the command */
164 tmio_iowrite32(cmd->arg, ctl + CTL_ARG_REG);
165 tmio_iowrite16(c, ctl + CTL_SD_CMD);
166
167 return 0;
168}
169
170/* This chip always returns (at least?) as much data as you ask for.
171 * I'm unsure what happens if you ask for less than a block. This should be
172 * looked into to ensure that a funny length read doesnt hose the controller.
173 *
174 */
175static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
176{
177 void __iomem *ctl = host->ctl;
178 struct mmc_data *data = host->data;
179 unsigned short *buf;
180 unsigned int count;
181 unsigned long flags;
182
183 if (!data) {
184 pr_debug("Spurious PIO IRQ\n");
185 return;
186 }
187
188 buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) +
189 host->sg_off);
190
191 count = host->sg_ptr->length - host->sg_off;
192 if (count > data->blksz)
193 count = data->blksz;
194
195 pr_debug("count: %08x offset: %08x flags %08x\n",
196 count, host->sg_off, data->flags);
197
198 /* Transfer the data */
199 if (data->flags & MMC_DATA_READ)
200 tmio_ioread16_rep(ctl + CTL_SD_DATA_PORT, buf, count >> 1);
201 else
202 tmio_iowrite16_rep(ctl + CTL_SD_DATA_PORT, buf, count >> 1);
203
204 host->sg_off += count;
205
206 tmio_mmc_kunmap_atomic(host, &flags);
207
208 if (host->sg_off == host->sg_ptr->length)
209 tmio_mmc_next_sg(host);
210
211 return;
212}
213
214static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
215{
216 void __iomem *ctl = host->ctl;
217 struct mmc_data *data = host->data;
Julia Lawalla0d045c2008-12-16 16:13:09 +0100218 struct mmc_command *stop;
Ian Molton4a489982008-07-15 16:02:21 +0100219
220 host->data = NULL;
221
222 if (!data) {
223 pr_debug("Spurious data end IRQ\n");
224 return;
225 }
Julia Lawalla0d045c2008-12-16 16:13:09 +0100226 stop = data->stop;
Ian Molton4a489982008-07-15 16:02:21 +0100227
228 /* FIXME - return correct transfer count on errors */
229 if (!data->error)
230 data->bytes_xfered = data->blocks * data->blksz;
231 else
232 data->bytes_xfered = 0;
233
234 pr_debug("Completed data request\n");
235
236 /*FIXME - other drivers allow an optional stop command of any given type
237 * which we dont do, as the chip can auto generate them.
238 * Perhaps we can be smarter about when to use auto CMD12 and
239 * only issue the auto request when we know this is the desired
240 * stop command, allowing fallback to the stop command the
241 * upper layers expect. For now, we do what works.
242 */
243
244 if (data->flags & MMC_DATA_READ)
245 disable_mmc_irqs(ctl, TMIO_MASK_READOP);
246 else
247 disable_mmc_irqs(ctl, TMIO_MASK_WRITEOP);
248
249 if (stop) {
250 if (stop->opcode == 12 && !stop->arg)
251 tmio_iowrite16(0x000, ctl + CTL_STOP_INTERNAL_ACTION);
252 else
253 BUG();
254 }
255
256 tmio_mmc_finish_request(host);
257}
258
259static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
260 unsigned int stat)
261{
262 void __iomem *ctl = host->ctl, *addr;
263 struct mmc_command *cmd = host->cmd;
264 int i;
265
266 if (!host->cmd) {
267 pr_debug("Spurious CMD irq\n");
268 return;
269 }
270
271 host->cmd = NULL;
272
273 /* This controller is sicker than the PXA one. Not only do we need to
274 * drop the top 8 bits of the first response word, we also need to
275 * modify the order of the response for short response command types.
276 */
277
278 for (i = 3, addr = ctl + CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
279 cmd->resp[i] = tmio_ioread32(addr);
280
281 if (cmd->flags & MMC_RSP_136) {
282 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
283 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
284 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
285 cmd->resp[3] <<= 8;
286 } else if (cmd->flags & MMC_RSP_R3) {
287 cmd->resp[0] = cmd->resp[3];
288 }
289
290 if (stat & TMIO_STAT_CMDTIMEOUT)
291 cmd->error = -ETIMEDOUT;
292 else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
293 cmd->error = -EILSEQ;
294
295 /* If there is data to handle we enable data IRQs here, and
296 * we will ultimatley finish the request in the data_end handler.
297 * If theres no data or we encountered an error, finish now.
298 */
299 if (host->data && !cmd->error) {
300 if (host->data->flags & MMC_DATA_READ)
301 enable_mmc_irqs(ctl, TMIO_MASK_READOP);
302 else
303 enable_mmc_irqs(ctl, TMIO_MASK_WRITEOP);
304 } else {
305 tmio_mmc_finish_request(host);
306 }
307
308 return;
309}
310
311
312static irqreturn_t tmio_mmc_irq(int irq, void *devid)
313{
314 struct tmio_mmc_host *host = devid;
315 void __iomem *ctl = host->ctl;
316 unsigned int ireg, irq_mask, status;
317
318 pr_debug("MMC IRQ begin\n");
319
320 status = tmio_ioread32(ctl + CTL_STATUS);
321 irq_mask = tmio_ioread32(ctl + CTL_IRQ_MASK);
322 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
323
324 pr_debug_status(status);
325 pr_debug_status(ireg);
326
327 if (!ireg) {
328 disable_mmc_irqs(ctl, status & ~irq_mask);
329
330 pr_debug("tmio_mmc: Spurious irq, disabling! "
331 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
332 pr_debug_status(status);
333
334 goto out;
335 }
336
337 while (ireg) {
338 /* Card insert / remove attempts */
339 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
340 ack_mmc_irqs(ctl, TMIO_STAT_CARD_INSERT |
341 TMIO_STAT_CARD_REMOVE);
342 mmc_detect_change(host->mmc, 0);
343 }
344
345 /* CRC and other errors */
346/* if (ireg & TMIO_STAT_ERR_IRQ)
347 * handled |= tmio_error_irq(host, irq, stat);
348 */
349
350 /* Command completion */
351 if (ireg & TMIO_MASK_CMD) {
352 ack_mmc_irqs(ctl, TMIO_MASK_CMD);
353 tmio_mmc_cmd_irq(host, status);
354 }
355
356 /* Data transfer */
357 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
358 ack_mmc_irqs(ctl, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
359 tmio_mmc_pio_irq(host);
360 }
361
362 /* Data transfer completion */
363 if (ireg & TMIO_STAT_DATAEND) {
364 ack_mmc_irqs(ctl, TMIO_STAT_DATAEND);
365 tmio_mmc_data_irq(host);
366 }
367
368 /* Check status - keep going until we've handled it all */
369 status = tmio_ioread32(ctl + CTL_STATUS);
370 irq_mask = tmio_ioread32(ctl + CTL_IRQ_MASK);
371 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
372
373 pr_debug("Status at end of loop: %08x\n", status);
374 pr_debug_status(status);
375 }
376 pr_debug("MMC IRQ end\n");
377
378out:
379 return IRQ_HANDLED;
380}
381
382static int tmio_mmc_start_data(struct tmio_mmc_host *host,
383 struct mmc_data *data)
384{
385 void __iomem *ctl = host->ctl;
386
387 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
388 data->blksz, data->blocks);
389
390 /* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */
391 if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
392 printk(KERN_ERR "%s: %d byte block unsupported in 4 bit mode\n",
393 mmc_hostname(host->mmc), data->blksz);
394 return -EINVAL;
395 }
396
397 tmio_mmc_init_sg(host, data);
398 host->data = data;
399
400 /* Set transfer length / blocksize */
401 tmio_iowrite16(data->blksz, ctl + CTL_SD_XFER_LEN);
402 tmio_iowrite16(data->blocks, ctl + CTL_XFER_BLK_COUNT);
403
404 return 0;
405}
406
407/* Process requests from the MMC layer */
408static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
409{
410 struct tmio_mmc_host *host = mmc_priv(mmc);
411 int ret;
412
413 if (host->mrq)
414 pr_debug("request not null\n");
415
416 host->mrq = mrq;
417
418 if (mrq->data) {
419 ret = tmio_mmc_start_data(host, mrq->data);
420 if (ret)
421 goto fail;
422 }
423
424 ret = tmio_mmc_start_command(host, mrq->cmd);
425
426 if (!ret)
427 return;
428
429fail:
430 mrq->cmd->error = ret;
431 mmc_request_done(mmc, mrq);
432}
433
434/* Set MMC clock / power.
435 * Note: This controller uses a simple divider scheme therefore it cannot
436 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
437 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
438 * slowest setting.
439 */
440static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
441{
442 struct tmio_mmc_host *host = mmc_priv(mmc);
443 void __iomem *cnf = host->cnf;
444 void __iomem *ctl = host->ctl;
445
446 if (ios->clock)
447 tmio_mmc_set_clock(host, ios->clock);
448
449 /* Power sequence - OFF -> ON -> UP */
450 switch (ios->power_mode) {
451 case MMC_POWER_OFF: /* power down SD bus */
452 tmio_iowrite8(0x00, cnf + CNF_PWR_CTL_2);
453 tmio_mmc_clk_stop(host);
454 break;
455 case MMC_POWER_ON: /* power up SD bus */
456
457 tmio_iowrite8(0x02, cnf + CNF_PWR_CTL_2);
458 break;
459 case MMC_POWER_UP: /* start bus clock */
460 tmio_mmc_clk_start(host);
461 break;
462 }
463
464 switch (ios->bus_width) {
465 case MMC_BUS_WIDTH_1:
466 tmio_iowrite16(0x80e0, ctl + CTL_SD_MEM_CARD_OPT);
467 break;
468 case MMC_BUS_WIDTH_4:
469 tmio_iowrite16(0x00e0, ctl + CTL_SD_MEM_CARD_OPT);
470 break;
471 }
472
473 /* Let things settle. delay taken from winCE driver */
474 udelay(140);
475}
476
477static int tmio_mmc_get_ro(struct mmc_host *mmc)
478{
479 struct tmio_mmc_host *host = mmc_priv(mmc);
480 void __iomem *ctl = host->ctl;
481
482 return (tmio_ioread16(ctl + CTL_STATUS) & TMIO_STAT_WRPROTECT) ? 0 : 1;
483}
484
485static struct mmc_host_ops tmio_mmc_ops = {
486 .request = tmio_mmc_request,
487 .set_ios = tmio_mmc_set_ios,
488 .get_ro = tmio_mmc_get_ro,
489};
490
491#ifdef CONFIG_PM
492static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
493{
494 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
495 struct mmc_host *mmc = platform_get_drvdata(dev);
496 int ret;
497
498 ret = mmc_suspend_host(mmc, state);
499
500 /* Tell MFD core it can disable us now.*/
501 if (!ret && cell->disable)
502 cell->disable(dev);
503
504 return ret;
505}
506
507static int tmio_mmc_resume(struct platform_device *dev)
508{
509 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
510 struct mmc_host *mmc = platform_get_drvdata(dev);
511 struct tmio_mmc_host *host = mmc_priv(mmc);
512 void __iomem *cnf = host->cnf;
513 int ret = 0;
514
515 /* Enable the MMC/SD Control registers */
516 tmio_iowrite16(SDCREN, cnf + CNF_CMD);
517 tmio_iowrite32(dev->resource[0].start & 0xfffe, cnf + CNF_CTL_BASE);
518
519 /* Tell the MFD core we are ready to be enabled */
520 if (cell->enable) {
521 ret = cell->enable(dev);
522 if (ret)
523 goto out;
524 }
525
526 mmc_resume_host(mmc);
527
528out:
529 return ret;
530}
531#else
532#define tmio_mmc_suspend NULL
533#define tmio_mmc_resume NULL
534#endif
535
536static int __devinit tmio_mmc_probe(struct platform_device *dev)
537{
538 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
Philipp Zabelf0e46cc2009-06-04 20:12:31 +0200539 struct tmio_mmc_data *pdata;
Ian Molton4a489982008-07-15 16:02:21 +0100540 struct resource *res_ctl, *res_cnf;
541 struct tmio_mmc_host *host;
542 struct mmc_host *mmc;
543 int ret = -ENOMEM;
544
545 if (dev->num_resources != 3)
546 goto out;
547
548 res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0);
549 res_cnf = platform_get_resource(dev, IORESOURCE_MEM, 1);
550 if (!res_ctl || !res_cnf) {
551 ret = -EINVAL;
552 goto out;
553 }
554
Philipp Zabelf0e46cc2009-06-04 20:12:31 +0200555 pdata = cell->driver_data;
556 if (!pdata || !pdata->hclk) {
557 ret = -EINVAL;
558 goto out;
559 }
560
Ian Molton4a489982008-07-15 16:02:21 +0100561 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev);
562 if (!mmc)
563 goto out;
564
565 host = mmc_priv(mmc);
566 host->mmc = mmc;
567 platform_set_drvdata(dev, mmc);
568
Magnus Dammbc6772a2009-03-11 21:58:54 +0900569 host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
Ian Molton4a489982008-07-15 16:02:21 +0100570 if (!host->ctl)
571 goto host_free;
572
Magnus Dammbc6772a2009-03-11 21:58:54 +0900573 host->cnf = ioremap(res_cnf->start, resource_size(res_cnf));
Ian Molton4a489982008-07-15 16:02:21 +0100574 if (!host->cnf)
575 goto unmap_ctl;
576
577 mmc->ops = &tmio_mmc_ops;
578 mmc->caps = MMC_CAP_4_BIT_DATA;
Philipp Zabelf0e46cc2009-06-04 20:12:31 +0200579 mmc->f_max = pdata->hclk;
580 mmc->f_min = mmc->f_max / 512;
Ian Molton4a489982008-07-15 16:02:21 +0100581 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
582
583 /* Enable the MMC/SD Control registers */
584 tmio_iowrite16(SDCREN, host->cnf + CNF_CMD);
585 tmio_iowrite32(dev->resource[0].start & 0xfffe,
586 host->cnf + CNF_CTL_BASE);
587
588 /* Tell the MFD core we are ready to be enabled */
589 if (cell->enable) {
590 ret = cell->enable(dev);
591 if (ret)
592 goto unmap_cnf;
593 }
594
595 /* Disable SD power during suspend */
596 tmio_iowrite8(0x01, host->cnf + CNF_PWR_CTL_3);
597
598 /* The below is required but why? FIXME */
599 tmio_iowrite8(0x1f, host->cnf + CNF_STOP_CLK_CTL);
600
601 /* Power down SD bus*/
602 tmio_iowrite8(0x0, host->cnf + CNF_PWR_CTL_2);
603
604 tmio_mmc_clk_stop(host);
605 reset(host);
606
607 ret = platform_get_irq(dev, 0);
608 if (ret >= 0)
609 host->irq = ret;
610 else
611 goto unmap_cnf;
612
613 disable_mmc_irqs(host->ctl, TMIO_MASK_ALL);
614
615 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED, "tmio-mmc",
616 host);
617 if (ret)
618 goto unmap_cnf;
619
620 set_irq_type(host->irq, IRQ_TYPE_EDGE_FALLING);
621
622 mmc_add_host(mmc);
623
624 printk(KERN_INFO "%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
625 (unsigned long)host->ctl, host->irq);
626
627 /* Unmask the IRQs we want to know about */
628 enable_mmc_irqs(host->ctl, TMIO_MASK_IRQ);
629
630 return 0;
631
632unmap_cnf:
633 iounmap(host->cnf);
634unmap_ctl:
635 iounmap(host->ctl);
636host_free:
637 mmc_free_host(mmc);
638out:
639 return ret;
640}
641
642static int __devexit tmio_mmc_remove(struct platform_device *dev)
643{
644 struct mmc_host *mmc = platform_get_drvdata(dev);
645
646 platform_set_drvdata(dev, NULL);
647
648 if (mmc) {
649 struct tmio_mmc_host *host = mmc_priv(mmc);
650 mmc_remove_host(mmc);
Ian Molton4a489982008-07-15 16:02:21 +0100651 free_irq(host->irq, host);
652 iounmap(host->ctl);
653 iounmap(host->cnf);
Magnus Dammbedcc452009-03-11 21:59:03 +0900654 mmc_free_host(mmc);
Ian Molton4a489982008-07-15 16:02:21 +0100655 }
656
657 return 0;
658}
659
660/* ------------------- device registration ----------------------- */
661
662static struct platform_driver tmio_mmc_driver = {
663 .driver = {
664 .name = "tmio-mmc",
665 .owner = THIS_MODULE,
666 },
667 .probe = tmio_mmc_probe,
668 .remove = __devexit_p(tmio_mmc_remove),
669 .suspend = tmio_mmc_suspend,
670 .resume = tmio_mmc_resume,
671};
672
673
674static int __init tmio_mmc_init(void)
675{
676 return platform_driver_register(&tmio_mmc_driver);
677}
678
679static void __exit tmio_mmc_exit(void)
680{
681 platform_driver_unregister(&tmio_mmc_driver);
682}
683
684module_init(tmio_mmc_init);
685module_exit(tmio_mmc_exit);
686
687MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver");
688MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
689MODULE_LICENSE("GPL v2");
690MODULE_ALIAS("platform:tmio-mmc");