blob: 34b42533f4bbc9b3b809a622e3c89beebce911ce [file] [log] [blame]
Adrian Hunter36cd4fb2008-08-06 10:08:46 +03001/*
2 * linux/drivers/mtd/onenand/omap2.c
3 *
4 * OneNAND driver for OMAP2 / OMAP3
5 *
6 * Copyright © 2005-2006 Nokia Corporation
7 *
8 * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
9 * IRQ and DMA support written by Timo Teras
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 */
25
26#include <linux/device.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/onenand.h>
31#include <linux/mtd/partitions.h>
32#include <linux/platform_device.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
35
36#include <asm/io.h>
37#include <asm/mach/flash.h>
38#include <asm/arch/gpmc.h>
39#include <asm/arch/onenand.h>
40#include <asm/arch/gpio.h>
41#include <asm/arch/gpmc.h>
42#include <asm/arch/pm.h>
43
44#include <linux/dma-mapping.h>
45#include <asm/dma-mapping.h>
46#include <asm/arch/dma.h>
47
48#include <asm/arch/board.h>
49
50#define DRIVER_NAME "omap2-onenand"
51
52#define ONENAND_IO_SIZE SZ_128K
53#define ONENAND_BUFRAM_SIZE (1024 * 5)
54
55struct omap2_onenand {
56 struct platform_device *pdev;
57 int gpmc_cs;
58 unsigned long phys_base;
59 int gpio_irq;
60 struct mtd_info mtd;
61 struct mtd_partition *parts;
62 struct onenand_chip onenand;
63 struct completion irq_done;
64 struct completion dma_done;
65 int dma_channel;
66 int freq;
67 int (*setup)(void __iomem *base, int freq);
68};
69
70static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
71{
72 struct omap2_onenand *c = data;
73
74 complete(&c->dma_done);
75}
76
77static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
78{
79 struct omap2_onenand *c = dev_id;
80
81 complete(&c->irq_done);
82
83 return IRQ_HANDLED;
84}
85
86static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
87{
88 return readw(c->onenand.base + reg);
89}
90
91static inline void write_reg(struct omap2_onenand *c, unsigned short value,
92 int reg)
93{
94 writew(value, c->onenand.base + reg);
95}
96
97static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
98{
99 printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
100 msg, state, ctrl, intr);
101}
102
103static void wait_warn(char *msg, int state, unsigned int ctrl,
104 unsigned int intr)
105{
106 printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
107 "intr 0x%04x\n", msg, state, ctrl, intr);
108}
109
110static int omap2_onenand_wait(struct mtd_info *mtd, int state)
111{
112 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
113 unsigned int intr = 0;
114 unsigned int ctrl;
115 unsigned long timeout;
116 u32 syscfg;
117
118 if (state == FL_RESETING) {
119 int i;
120
121 for (i = 0; i < 20; i++) {
122 udelay(1);
123 intr = read_reg(c, ONENAND_REG_INTERRUPT);
124 if (intr & ONENAND_INT_MASTER)
125 break;
126 }
127 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
128 if (ctrl & ONENAND_CTRL_ERROR) {
129 wait_err("controller error", state, ctrl, intr);
130 return -EIO;
131 }
132 if (!(intr & ONENAND_INT_RESET)) {
133 wait_err("timeout", state, ctrl, intr);
134 return -EIO;
135 }
136 return 0;
137 }
138
139 if (state != FL_READING) {
140 int result;
141
142 /* Turn interrupts on */
143 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
Adrian Hunter782b7a32008-08-14 14:00:12 +0300144 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
145 syscfg |= ONENAND_SYS_CFG1_IOBE;
146 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
147 if (cpu_is_omap34xx())
148 /* Add a delay to let GPIO settle */
149 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
150 }
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300151
152 INIT_COMPLETION(c->irq_done);
153 if (c->gpio_irq) {
154 result = omap_get_gpio_datain(c->gpio_irq);
155 if (result == -1) {
156 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
157 intr = read_reg(c, ONENAND_REG_INTERRUPT);
158 wait_err("gpio error", state, ctrl, intr);
159 return -EIO;
160 }
161 } else
162 result = 0;
163 if (result == 0) {
164 int retry_cnt = 0;
165retry:
166 result = wait_for_completion_timeout(&c->irq_done,
167 msecs_to_jiffies(20));
168 if (result == 0) {
169 /* Timeout after 20ms */
170 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
171 if (ctrl & ONENAND_CTRL_ONGO) {
172 /*
173 * The operation seems to be still going
174 * so give it some more time.
175 */
176 retry_cnt += 1;
177 if (retry_cnt < 3)
178 goto retry;
179 intr = read_reg(c,
180 ONENAND_REG_INTERRUPT);
181 wait_err("timeout", state, ctrl, intr);
182 return -EIO;
183 }
184 intr = read_reg(c, ONENAND_REG_INTERRUPT);
185 if ((intr & ONENAND_INT_MASTER) == 0)
186 wait_warn("timeout", state, ctrl, intr);
187 }
188 }
189 } else {
190 /* Turn interrupts off */
191 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
192 syscfg &= ~ONENAND_SYS_CFG1_IOBE;
193 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
194
195 timeout = jiffies + msecs_to_jiffies(20);
196 while (time_before(jiffies, timeout)) {
197 intr = read_reg(c, ONENAND_REG_INTERRUPT);
198 if (intr & ONENAND_INT_MASTER)
199 break;
200 }
201 }
202
203 intr = read_reg(c, ONENAND_REG_INTERRUPT);
204 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
205
206 if (intr & ONENAND_INT_READ) {
207 int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
208
209 if (ecc) {
210 unsigned int addr1, addr8;
211
212 addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
213 addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
214 if (ecc & ONENAND_ECC_2BIT_ALL) {
215 printk(KERN_ERR "onenand_wait: ECC error = "
216 "0x%04x, addr1 %#x, addr8 %#x\n",
217 ecc, addr1, addr8);
218 mtd->ecc_stats.failed++;
219 return -EBADMSG;
220 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
221 printk(KERN_NOTICE "onenand_wait: correctable "
222 "ECC error = 0x%04x, addr1 %#x, "
223 "addr8 %#x\n", ecc, addr1, addr8);
224 mtd->ecc_stats.corrected++;
225 }
226 }
227 } else if (state == FL_READING) {
228 wait_err("timeout", state, ctrl, intr);
229 return -EIO;
230 }
231
232 if (ctrl & ONENAND_CTRL_ERROR) {
233 wait_err("controller error", state, ctrl, intr);
234 if (ctrl & ONENAND_CTRL_LOCK)
235 printk(KERN_ERR "onenand_wait: "
236 "Device is write protected!!!\n");
237 return -EIO;
238 }
239
240 if (ctrl & 0xFE9F)
241 wait_warn("unexpected controller status", state, ctrl, intr);
242
243 return 0;
244}
245
246static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
247{
248 struct onenand_chip *this = mtd->priv;
249
250 if (ONENAND_CURRENT_BUFFERRAM(this)) {
251 if (area == ONENAND_DATARAM)
252 return mtd->writesize;
253 if (area == ONENAND_SPARERAM)
254 return mtd->oobsize;
255 }
256
257 return 0;
258}
259
260#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
261
262static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
263 unsigned char *buffer, int offset,
264 size_t count)
265{
266 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
267 struct onenand_chip *this = mtd->priv;
268 dma_addr_t dma_src, dma_dst;
269 int bram_offset;
270 unsigned long timeout;
271 void *buf = (void *)buffer;
272 size_t xtra;
273 volatile unsigned *done;
274
275 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
276 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
277 goto out_copy;
278
279 if (buf >= high_memory) {
280 struct page *p1;
281
282 if (((size_t)buf & PAGE_MASK) !=
283 ((size_t)(buf + count - 1) & PAGE_MASK))
284 goto out_copy;
285 p1 = vmalloc_to_page(buf);
286 if (!p1)
287 goto out_copy;
288 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
289 }
290
291 xtra = count & 3;
292 if (xtra) {
293 count -= xtra;
294 memcpy(buf + count, this->base + bram_offset + count, xtra);
295 }
296
297 dma_src = c->phys_base + bram_offset;
298 dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
299 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
300 dev_err(&c->pdev->dev,
301 "Couldn't DMA map a %d byte buffer\n",
302 count);
303 goto out_copy;
304 }
305
306 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
307 count >> 2, 1, 0, 0, 0);
308 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
309 dma_src, 0, 0);
310 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
311 dma_dst, 0, 0);
312
313 INIT_COMPLETION(c->dma_done);
314 omap_start_dma(c->dma_channel);
315
316 timeout = jiffies + msecs_to_jiffies(20);
317 done = &c->dma_done.done;
318 while (time_before(jiffies, timeout))
319 if (*done)
320 break;
321
322 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
323
324 if (!*done) {
325 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
326 goto out_copy;
327 }
328
329 return 0;
330
331out_copy:
332 memcpy(buf, this->base + bram_offset, count);
333 return 0;
334}
335
336static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
337 const unsigned char *buffer,
338 int offset, size_t count)
339{
340 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
341 struct onenand_chip *this = mtd->priv;
342 dma_addr_t dma_src, dma_dst;
343 int bram_offset;
344 unsigned long timeout;
345 void *buf = (void *)buffer;
346 volatile unsigned *done;
347
348 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
349 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
350 goto out_copy;
351
352 /* panic_write() may be in an interrupt context */
353 if (in_interrupt())
354 goto out_copy;
355
356 if (buf >= high_memory) {
357 struct page *p1;
358
359 if (((size_t)buf & PAGE_MASK) !=
360 ((size_t)(buf + count - 1) & PAGE_MASK))
361 goto out_copy;
362 p1 = vmalloc_to_page(buf);
363 if (!p1)
364 goto out_copy;
365 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
366 }
367
368 dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
369 dma_dst = c->phys_base + bram_offset;
370 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
371 dev_err(&c->pdev->dev,
372 "Couldn't DMA map a %d byte buffer\n",
373 count);
374 return -1;
375 }
376
377 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
378 count >> 2, 1, 0, 0, 0);
379 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
380 dma_src, 0, 0);
381 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
382 dma_dst, 0, 0);
383
384 INIT_COMPLETION(c->dma_done);
385 omap_start_dma(c->dma_channel);
386
387 timeout = jiffies + msecs_to_jiffies(20);
388 done = &c->dma_done.done;
389 while (time_before(jiffies, timeout))
390 if (*done)
391 break;
392
393 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
394
395 if (!*done) {
396 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
397 goto out_copy;
398 }
399
400 return 0;
401
402out_copy:
403 memcpy(this->base + bram_offset, buf, count);
404 return 0;
405}
406
407#else
408
409int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
410 unsigned char *buffer, int offset,
411 size_t count);
412
413int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
414 const unsigned char *buffer,
415 int offset, size_t count);
416
417#endif
418
419#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
420
421static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
422 unsigned char *buffer, int offset,
423 size_t count)
424{
425 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
426 struct onenand_chip *this = mtd->priv;
427 dma_addr_t dma_src, dma_dst;
428 int bram_offset;
429
430 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
431 /* DMA is not used. Revisit PM requirements before enabling it. */
432 if (1 || (c->dma_channel < 0) ||
433 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
434 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
435 memcpy(buffer, (__force void *)(this->base + bram_offset),
436 count);
437 return 0;
438 }
439
440 dma_src = c->phys_base + bram_offset;
441 dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
442 DMA_FROM_DEVICE);
443 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
444 dev_err(&c->pdev->dev,
445 "Couldn't DMA map a %d byte buffer\n",
446 count);
447 return -1;
448 }
449
450 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
451 count / 4, 1, 0, 0, 0);
452 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
453 dma_src, 0, 0);
454 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
455 dma_dst, 0, 0);
456
457 INIT_COMPLETION(c->dma_done);
458 omap_start_dma(c->dma_channel);
459 wait_for_completion(&c->dma_done);
460
461 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
462
463 return 0;
464}
465
466static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
467 const unsigned char *buffer,
468 int offset, size_t count)
469{
470 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
471 struct onenand_chip *this = mtd->priv;
472 dma_addr_t dma_src, dma_dst;
473 int bram_offset;
474
475 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
476 /* DMA is not used. Revisit PM requirements before enabling it. */
477 if (1 || (c->dma_channel < 0) ||
478 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
479 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
480 memcpy((__force void *)(this->base + bram_offset), buffer,
481 count);
482 return 0;
483 }
484
485 dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
486 DMA_TO_DEVICE);
487 dma_dst = c->phys_base + bram_offset;
488 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
489 dev_err(&c->pdev->dev,
490 "Couldn't DMA map a %d byte buffer\n",
491 count);
492 return -1;
493 }
494
495 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
496 count / 2, 1, 0, 0, 0);
497 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
498 dma_src, 0, 0);
499 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
500 dma_dst, 0, 0);
501
502 INIT_COMPLETION(c->dma_done);
503 omap_start_dma(c->dma_channel);
504 wait_for_completion(&c->dma_done);
505
506 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
507
508 return 0;
509}
510
511#else
512
513int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
514 unsigned char *buffer, int offset,
515 size_t count);
516
517int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
518 const unsigned char *buffer,
519 int offset, size_t count);
520
521#endif
522
523static struct platform_driver omap2_onenand_driver;
524
525static int __adjust_timing(struct device *dev, void *data)
526{
527 int ret = 0;
528 struct omap2_onenand *c;
529
530 c = dev_get_drvdata(dev);
531
532 BUG_ON(c->setup == NULL);
533
534 /* DMA is not in use so this is all that is needed */
535 /* Revisit for OMAP3! */
536 ret = c->setup(c->onenand.base, c->freq);
537
538 return ret;
539}
540
541int omap2_onenand_rephase(void)
542{
543 return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
544 NULL, __adjust_timing);
545}
546
547static void __devexit omap2_onenand_shutdown(struct platform_device *pdev)
548{
549 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
550
551 /* With certain content in the buffer RAM, the OMAP boot ROM code
552 * can recognize the flash chip incorrectly. Zero it out before
553 * soft reset.
554 */
555 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
556}
557
558static int __devinit omap2_onenand_probe(struct platform_device *pdev)
559{
560 struct omap_onenand_platform_data *pdata;
561 struct omap2_onenand *c;
562 int r;
563
564 pdata = pdev->dev.platform_data;
565 if (pdata == NULL) {
566 dev_err(&pdev->dev, "platform data missing\n");
567 return -ENODEV;
568 }
569
570 c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
571 if (!c)
572 return -ENOMEM;
573
574 init_completion(&c->irq_done);
575 init_completion(&c->dma_done);
576 c->gpmc_cs = pdata->cs;
577 c->gpio_irq = pdata->gpio_irq;
578 c->dma_channel = pdata->dma_channel;
579 if (c->dma_channel < 0) {
580 /* if -1, don't use DMA */
581 c->gpio_irq = 0;
582 }
583
584 r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
585 if (r < 0) {
586 dev_err(&pdev->dev, "Cannot request GPMC CS\n");
587 goto err_kfree;
588 }
589
590 if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
591 pdev->dev.driver->name) == NULL) {
592 dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
593 "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
594 r = -EBUSY;
595 goto err_free_cs;
596 }
597 c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
598 if (c->onenand.base == NULL) {
599 r = -ENOMEM;
600 goto err_release_mem_region;
601 }
602
603 if (pdata->onenand_setup != NULL) {
604 r = pdata->onenand_setup(c->onenand.base, c->freq);
605 if (r < 0) {
606 dev_err(&pdev->dev, "Onenand platform setup failed: "
607 "%d\n", r);
608 goto err_iounmap;
609 }
610 c->setup = pdata->onenand_setup;
611 }
612
613 if (c->gpio_irq) {
614 if ((r = omap_request_gpio(c->gpio_irq)) < 0) {
615 dev_err(&pdev->dev, "Failed to request GPIO%d for "
616 "OneNAND\n", c->gpio_irq);
617 goto err_iounmap;
618 }
619 omap_set_gpio_direction(c->gpio_irq, 1);
620
621 if ((r = request_irq(OMAP_GPIO_IRQ(c->gpio_irq),
622 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
623 pdev->dev.driver->name, c)) < 0)
624 goto err_release_gpio;
625 }
626
627 if (c->dma_channel >= 0) {
628 r = omap_request_dma(0, pdev->dev.driver->name,
629 omap2_onenand_dma_cb, (void *) c,
630 &c->dma_channel);
631 if (r == 0) {
632 omap_set_dma_write_mode(c->dma_channel,
633 OMAP_DMA_WRITE_NON_POSTED);
634 omap_set_dma_src_data_pack(c->dma_channel, 1);
635 omap_set_dma_src_burst_mode(c->dma_channel,
636 OMAP_DMA_DATA_BURST_8);
637 omap_set_dma_dest_data_pack(c->dma_channel, 1);
638 omap_set_dma_dest_burst_mode(c->dma_channel,
639 OMAP_DMA_DATA_BURST_8);
640 } else {
641 dev_info(&pdev->dev,
642 "failed to allocate DMA for OneNAND, "
643 "using PIO instead\n");
644 c->dma_channel = -1;
645 }
646 }
647
648 dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
649 "base %p\n", c->gpmc_cs, c->phys_base,
650 c->onenand.base);
651
652 c->pdev = pdev;
653 c->mtd.name = pdev->dev.bus_id;
654 c->mtd.priv = &c->onenand;
655 c->mtd.owner = THIS_MODULE;
656
657 if (c->dma_channel >= 0) {
658 struct onenand_chip *this = &c->onenand;
659
660 this->wait = omap2_onenand_wait;
661 if (cpu_is_omap34xx()) {
662 this->read_bufferram = omap3_onenand_read_bufferram;
663 this->write_bufferram = omap3_onenand_write_bufferram;
664 } else {
665 this->read_bufferram = omap2_onenand_read_bufferram;
666 this->write_bufferram = omap2_onenand_write_bufferram;
667 }
668 }
669
670 if ((r = onenand_scan(&c->mtd, 1)) < 0)
671 goto err_release_dma;
672
673 switch ((c->onenand.version_id >> 4) & 0xf) {
674 case 0:
675 c->freq = 40;
676 break;
677 case 1:
678 c->freq = 54;
679 break;
680 case 2:
681 c->freq = 66;
682 break;
683 case 3:
684 c->freq = 83;
685 break;
686 }
687
688#ifdef CONFIG_MTD_PARTITIONS
689 if (pdata->parts != NULL)
690 r = add_mtd_partitions(&c->mtd, pdata->parts,
691 pdata->nr_parts);
692 else
693#endif
694 r = add_mtd_device(&c->mtd);
695 if (r < 0)
696 goto err_release_onenand;
697
698 platform_set_drvdata(pdev, c);
699
700 return 0;
701
702err_release_onenand:
703 onenand_release(&c->mtd);
704err_release_dma:
705 if (c->dma_channel != -1)
706 omap_free_dma(c->dma_channel);
707 if (c->gpio_irq)
708 free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c);
709err_release_gpio:
710 if (c->gpio_irq)
711 omap_free_gpio(c->gpio_irq);
712err_iounmap:
713 iounmap(c->onenand.base);
714err_release_mem_region:
715 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
716err_free_cs:
717 gpmc_cs_free(c->gpmc_cs);
718err_kfree:
719 kfree(c);
720
721 return r;
722}
723
724static int __devexit omap2_onenand_remove(struct platform_device *pdev)
725{
726 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
727
728 BUG_ON(c == NULL);
729
730#ifdef CONFIG_MTD_PARTITIONS
731 if (c->parts)
732 del_mtd_partitions(&c->mtd);
733 else
734 del_mtd_device(&c->mtd);
735#else
736 del_mtd_device(&c->mtd);
737#endif
738
739 onenand_release(&c->mtd);
740 if (c->dma_channel != -1)
741 omap_free_dma(c->dma_channel);
742 omap2_onenand_shutdown(pdev);
743 platform_set_drvdata(pdev, NULL);
744 if (c->gpio_irq) {
745 free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c);
746 omap_free_gpio(c->gpio_irq);
747 }
748 iounmap(c->onenand.base);
749 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
750 kfree(c);
751
752 return 0;
753}
754
755static struct platform_driver omap2_onenand_driver = {
756 .probe = omap2_onenand_probe,
757 .remove = omap2_onenand_remove,
758 .shutdown = omap2_onenand_shutdown,
759 .driver = {
760 .name = DRIVER_NAME,
761 .owner = THIS_MODULE,
762 },
763};
764
765static int __init omap2_onenand_init(void)
766{
767 printk(KERN_INFO "OneNAND driver initializing\n");
768 return platform_driver_register(&omap2_onenand_driver);
769}
770
771static void __exit omap2_onenand_exit(void)
772{
773 platform_driver_unregister(&omap2_onenand_driver);
774}
775
776module_init(omap2_onenand_init);
777module_exit(omap2_onenand_exit);
778
779MODULE_ALIAS(DRIVER_NAME);
780MODULE_LICENSE("GPL");
781MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
782MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");