blob: df26db863f2f3e191a0b1f1884a4718f6e8403ba [file] [log] [blame]
Adrian Hunter36cd4fb2008-08-06 10:08:46 +03001/*
2 * linux/drivers/mtd/onenand/omap2.c
3 *
4 * OneNAND driver for OMAP2 / OMAP3
5 *
6 * Copyright © 2005-2006 Nokia Corporation
7 *
8 * Author: Jarkko Lavinen <jarkko.lavinen@nokia.com> and Juha Yrjölä
9 * IRQ and DMA support written by Timo Teras
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; see the file COPYING. If not, write to the Free Software
22 * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 */
25
26#include <linux/device.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/mtd/mtd.h>
30#include <linux/mtd/onenand.h>
31#include <linux/mtd/partitions.h>
32#include <linux/platform_device.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
Adrian Huntercbbd6952008-11-24 14:44:36 +020035#include <linux/dma-mapping.h>
36#include <linux/io.h>
Adrian Hunter36cd4fb2008-08-06 10:08:46 +030037
Adrian Hunter36cd4fb2008-08-06 10:08:46 +030038#include <asm/mach/flash.h>
Adrian Hunterfe875352008-11-24 13:37:05 +020039#include <mach/gpmc.h>
40#include <mach/onenand.h>
41#include <mach/gpio.h>
42#include <mach/pm.h>
Adrian Hunter36cd4fb2008-08-06 10:08:46 +030043
Adrian Hunterfe875352008-11-24 13:37:05 +020044#include <mach/dma.h>
Adrian Hunter36cd4fb2008-08-06 10:08:46 +030045
Adrian Hunterfe875352008-11-24 13:37:05 +020046#include <mach/board.h>
Adrian Hunter36cd4fb2008-08-06 10:08:46 +030047
48#define DRIVER_NAME "omap2-onenand"
49
50#define ONENAND_IO_SIZE SZ_128K
51#define ONENAND_BUFRAM_SIZE (1024 * 5)
52
53struct omap2_onenand {
54 struct platform_device *pdev;
55 int gpmc_cs;
56 unsigned long phys_base;
57 int gpio_irq;
58 struct mtd_info mtd;
59 struct mtd_partition *parts;
60 struct onenand_chip onenand;
61 struct completion irq_done;
62 struct completion dma_done;
63 int dma_channel;
64 int freq;
65 int (*setup)(void __iomem *base, int freq);
66};
67
68static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
69{
70 struct omap2_onenand *c = data;
71
72 complete(&c->dma_done);
73}
74
75static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
76{
77 struct omap2_onenand *c = dev_id;
78
79 complete(&c->irq_done);
80
81 return IRQ_HANDLED;
82}
83
84static inline unsigned short read_reg(struct omap2_onenand *c, int reg)
85{
86 return readw(c->onenand.base + reg);
87}
88
89static inline void write_reg(struct omap2_onenand *c, unsigned short value,
90 int reg)
91{
92 writew(value, c->onenand.base + reg);
93}
94
95static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
96{
97 printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
98 msg, state, ctrl, intr);
99}
100
101static void wait_warn(char *msg, int state, unsigned int ctrl,
102 unsigned int intr)
103{
104 printk(KERN_WARNING "onenand_wait: %s! state %d ctrl 0x%04x "
105 "intr 0x%04x\n", msg, state, ctrl, intr);
106}
107
108static int omap2_onenand_wait(struct mtd_info *mtd, int state)
109{
110 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
111 unsigned int intr = 0;
112 unsigned int ctrl;
113 unsigned long timeout;
114 u32 syscfg;
115
116 if (state == FL_RESETING) {
117 int i;
118
119 for (i = 0; i < 20; i++) {
120 udelay(1);
121 intr = read_reg(c, ONENAND_REG_INTERRUPT);
122 if (intr & ONENAND_INT_MASTER)
123 break;
124 }
125 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
126 if (ctrl & ONENAND_CTRL_ERROR) {
127 wait_err("controller error", state, ctrl, intr);
128 return -EIO;
129 }
130 if (!(intr & ONENAND_INT_RESET)) {
131 wait_err("timeout", state, ctrl, intr);
132 return -EIO;
133 }
134 return 0;
135 }
136
137 if (state != FL_READING) {
138 int result;
139
140 /* Turn interrupts on */
141 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
Adrian Hunter782b7a32008-08-14 14:00:12 +0300142 if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
143 syscfg |= ONENAND_SYS_CFG1_IOBE;
144 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
145 if (cpu_is_omap34xx())
146 /* Add a delay to let GPIO settle */
147 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
148 }
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300149
150 INIT_COMPLETION(c->irq_done);
151 if (c->gpio_irq) {
David Brownell0b84b5c2008-12-10 17:35:25 -0800152 result = gpio_get_value(c->gpio_irq);
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300153 if (result == -1) {
154 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
155 intr = read_reg(c, ONENAND_REG_INTERRUPT);
156 wait_err("gpio error", state, ctrl, intr);
157 return -EIO;
158 }
159 } else
160 result = 0;
161 if (result == 0) {
162 int retry_cnt = 0;
163retry:
164 result = wait_for_completion_timeout(&c->irq_done,
165 msecs_to_jiffies(20));
166 if (result == 0) {
167 /* Timeout after 20ms */
168 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
169 if (ctrl & ONENAND_CTRL_ONGO) {
170 /*
171 * The operation seems to be still going
172 * so give it some more time.
173 */
174 retry_cnt += 1;
175 if (retry_cnt < 3)
176 goto retry;
177 intr = read_reg(c,
178 ONENAND_REG_INTERRUPT);
179 wait_err("timeout", state, ctrl, intr);
180 return -EIO;
181 }
182 intr = read_reg(c, ONENAND_REG_INTERRUPT);
183 if ((intr & ONENAND_INT_MASTER) == 0)
184 wait_warn("timeout", state, ctrl, intr);
185 }
186 }
187 } else {
Adrian Hunter8afbc112008-08-25 12:01:31 +0300188 int retry_cnt = 0;
189
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300190 /* Turn interrupts off */
191 syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
192 syscfg &= ~ONENAND_SYS_CFG1_IOBE;
193 write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
194
195 timeout = jiffies + msecs_to_jiffies(20);
Adrian Hunter8afbc112008-08-25 12:01:31 +0300196 while (1) {
197 if (time_before(jiffies, timeout)) {
198 intr = read_reg(c, ONENAND_REG_INTERRUPT);
199 if (intr & ONENAND_INT_MASTER)
200 break;
201 } else {
202 /* Timeout after 20ms */
203 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
204 if (ctrl & ONENAND_CTRL_ONGO) {
205 /*
206 * The operation seems to be still going
207 * so give it some more time.
208 */
209 retry_cnt += 1;
210 if (retry_cnt < 3) {
211 timeout = jiffies +
212 msecs_to_jiffies(20);
213 continue;
214 }
215 }
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300216 break;
Adrian Hunter8afbc112008-08-25 12:01:31 +0300217 }
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300218 }
219 }
220
221 intr = read_reg(c, ONENAND_REG_INTERRUPT);
222 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
223
224 if (intr & ONENAND_INT_READ) {
225 int ecc = read_reg(c, ONENAND_REG_ECC_STATUS);
226
227 if (ecc) {
228 unsigned int addr1, addr8;
229
230 addr1 = read_reg(c, ONENAND_REG_START_ADDRESS1);
231 addr8 = read_reg(c, ONENAND_REG_START_ADDRESS8);
232 if (ecc & ONENAND_ECC_2BIT_ALL) {
233 printk(KERN_ERR "onenand_wait: ECC error = "
234 "0x%04x, addr1 %#x, addr8 %#x\n",
235 ecc, addr1, addr8);
236 mtd->ecc_stats.failed++;
237 return -EBADMSG;
238 } else if (ecc & ONENAND_ECC_1BIT_ALL) {
239 printk(KERN_NOTICE "onenand_wait: correctable "
240 "ECC error = 0x%04x, addr1 %#x, "
241 "addr8 %#x\n", ecc, addr1, addr8);
242 mtd->ecc_stats.corrected++;
243 }
244 }
245 } else if (state == FL_READING) {
246 wait_err("timeout", state, ctrl, intr);
247 return -EIO;
248 }
249
250 if (ctrl & ONENAND_CTRL_ERROR) {
251 wait_err("controller error", state, ctrl, intr);
252 if (ctrl & ONENAND_CTRL_LOCK)
253 printk(KERN_ERR "onenand_wait: "
254 "Device is write protected!!!\n");
255 return -EIO;
256 }
257
258 if (ctrl & 0xFE9F)
259 wait_warn("unexpected controller status", state, ctrl, intr);
260
261 return 0;
262}
263
264static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
265{
266 struct onenand_chip *this = mtd->priv;
267
268 if (ONENAND_CURRENT_BUFFERRAM(this)) {
269 if (area == ONENAND_DATARAM)
270 return mtd->writesize;
271 if (area == ONENAND_SPARERAM)
272 return mtd->oobsize;
273 }
274
275 return 0;
276}
277
278#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
279
280static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
281 unsigned char *buffer, int offset,
282 size_t count)
283{
284 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
285 struct onenand_chip *this = mtd->priv;
286 dma_addr_t dma_src, dma_dst;
287 int bram_offset;
288 unsigned long timeout;
289 void *buf = (void *)buffer;
290 size_t xtra;
291 volatile unsigned *done;
292
293 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
294 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
295 goto out_copy;
296
Adrian Huntera29f2802009-03-23 14:57:38 +0200297 /* panic_write() may be in an interrupt context */
298 if (in_interrupt())
299 goto out_copy;
300
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300301 if (buf >= high_memory) {
302 struct page *p1;
303
304 if (((size_t)buf & PAGE_MASK) !=
305 ((size_t)(buf + count - 1) & PAGE_MASK))
306 goto out_copy;
307 p1 = vmalloc_to_page(buf);
308 if (!p1)
309 goto out_copy;
310 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
311 }
312
313 xtra = count & 3;
314 if (xtra) {
315 count -= xtra;
316 memcpy(buf + count, this->base + bram_offset + count, xtra);
317 }
318
319 dma_src = c->phys_base + bram_offset;
320 dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
321 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
322 dev_err(&c->pdev->dev,
323 "Couldn't DMA map a %d byte buffer\n",
324 count);
325 goto out_copy;
326 }
327
328 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
329 count >> 2, 1, 0, 0, 0);
330 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
331 dma_src, 0, 0);
332 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
333 dma_dst, 0, 0);
334
335 INIT_COMPLETION(c->dma_done);
336 omap_start_dma(c->dma_channel);
337
338 timeout = jiffies + msecs_to_jiffies(20);
339 done = &c->dma_done.done;
340 while (time_before(jiffies, timeout))
341 if (*done)
342 break;
343
344 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
345
346 if (!*done) {
347 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
348 goto out_copy;
349 }
350
351 return 0;
352
353out_copy:
354 memcpy(buf, this->base + bram_offset, count);
355 return 0;
356}
357
358static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
359 const unsigned char *buffer,
360 int offset, size_t count)
361{
362 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
363 struct onenand_chip *this = mtd->priv;
364 dma_addr_t dma_src, dma_dst;
365 int bram_offset;
366 unsigned long timeout;
367 void *buf = (void *)buffer;
368 volatile unsigned *done;
369
370 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
371 if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
372 goto out_copy;
373
374 /* panic_write() may be in an interrupt context */
375 if (in_interrupt())
376 goto out_copy;
377
378 if (buf >= high_memory) {
379 struct page *p1;
380
381 if (((size_t)buf & PAGE_MASK) !=
382 ((size_t)(buf + count - 1) & PAGE_MASK))
383 goto out_copy;
384 p1 = vmalloc_to_page(buf);
385 if (!p1)
386 goto out_copy;
387 buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
388 }
389
390 dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
391 dma_dst = c->phys_base + bram_offset;
392 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
393 dev_err(&c->pdev->dev,
394 "Couldn't DMA map a %d byte buffer\n",
395 count);
396 return -1;
397 }
398
399 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
400 count >> 2, 1, 0, 0, 0);
401 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
402 dma_src, 0, 0);
403 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
404 dma_dst, 0, 0);
405
406 INIT_COMPLETION(c->dma_done);
407 omap_start_dma(c->dma_channel);
408
409 timeout = jiffies + msecs_to_jiffies(20);
410 done = &c->dma_done.done;
411 while (time_before(jiffies, timeout))
412 if (*done)
413 break;
414
415 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
416
417 if (!*done) {
418 dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
419 goto out_copy;
420 }
421
422 return 0;
423
424out_copy:
425 memcpy(this->base + bram_offset, buf, count);
426 return 0;
427}
428
429#else
430
431int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
432 unsigned char *buffer, int offset,
433 size_t count);
434
435int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
436 const unsigned char *buffer,
437 int offset, size_t count);
438
439#endif
440
441#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
442
443static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
444 unsigned char *buffer, int offset,
445 size_t count)
446{
447 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
448 struct onenand_chip *this = mtd->priv;
449 dma_addr_t dma_src, dma_dst;
450 int bram_offset;
451
452 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
453 /* DMA is not used. Revisit PM requirements before enabling it. */
454 if (1 || (c->dma_channel < 0) ||
455 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
456 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
457 memcpy(buffer, (__force void *)(this->base + bram_offset),
458 count);
459 return 0;
460 }
461
462 dma_src = c->phys_base + bram_offset;
463 dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
464 DMA_FROM_DEVICE);
465 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
466 dev_err(&c->pdev->dev,
467 "Couldn't DMA map a %d byte buffer\n",
468 count);
469 return -1;
470 }
471
472 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
473 count / 4, 1, 0, 0, 0);
474 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
475 dma_src, 0, 0);
476 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
477 dma_dst, 0, 0);
478
479 INIT_COMPLETION(c->dma_done);
480 omap_start_dma(c->dma_channel);
481 wait_for_completion(&c->dma_done);
482
483 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
484
485 return 0;
486}
487
488static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
489 const unsigned char *buffer,
490 int offset, size_t count)
491{
492 struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
493 struct onenand_chip *this = mtd->priv;
494 dma_addr_t dma_src, dma_dst;
495 int bram_offset;
496
497 bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
498 /* DMA is not used. Revisit PM requirements before enabling it. */
499 if (1 || (c->dma_channel < 0) ||
500 ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
501 (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
502 memcpy((__force void *)(this->base + bram_offset), buffer,
503 count);
504 return 0;
505 }
506
507 dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
508 DMA_TO_DEVICE);
509 dma_dst = c->phys_base + bram_offset;
510 if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
511 dev_err(&c->pdev->dev,
512 "Couldn't DMA map a %d byte buffer\n",
513 count);
514 return -1;
515 }
516
517 omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
518 count / 2, 1, 0, 0, 0);
519 omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
520 dma_src, 0, 0);
521 omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
522 dma_dst, 0, 0);
523
524 INIT_COMPLETION(c->dma_done);
525 omap_start_dma(c->dma_channel);
526 wait_for_completion(&c->dma_done);
527
528 dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_TO_DEVICE);
529
530 return 0;
531}
532
533#else
534
535int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
536 unsigned char *buffer, int offset,
537 size_t count);
538
539int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
540 const unsigned char *buffer,
541 int offset, size_t count);
542
543#endif
544
545static struct platform_driver omap2_onenand_driver;
546
547static int __adjust_timing(struct device *dev, void *data)
548{
549 int ret = 0;
550 struct omap2_onenand *c;
551
552 c = dev_get_drvdata(dev);
553
554 BUG_ON(c->setup == NULL);
555
556 /* DMA is not in use so this is all that is needed */
557 /* Revisit for OMAP3! */
558 ret = c->setup(c->onenand.base, c->freq);
559
560 return ret;
561}
562
563int omap2_onenand_rephase(void)
564{
565 return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
566 NULL, __adjust_timing);
567}
568
Mika Korhonend3412db2009-05-21 23:09:42 +0300569static void omap2_onenand_shutdown(struct platform_device *pdev)
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300570{
571 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
572
573 /* With certain content in the buffer RAM, the OMAP boot ROM code
574 * can recognize the flash chip incorrectly. Zero it out before
575 * soft reset.
576 */
577 memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
578}
579
580static int __devinit omap2_onenand_probe(struct platform_device *pdev)
581{
582 struct omap_onenand_platform_data *pdata;
583 struct omap2_onenand *c;
584 int r;
585
586 pdata = pdev->dev.platform_data;
587 if (pdata == NULL) {
588 dev_err(&pdev->dev, "platform data missing\n");
589 return -ENODEV;
590 }
591
592 c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
593 if (!c)
594 return -ENOMEM;
595
596 init_completion(&c->irq_done);
597 init_completion(&c->dma_done);
598 c->gpmc_cs = pdata->cs;
599 c->gpio_irq = pdata->gpio_irq;
600 c->dma_channel = pdata->dma_channel;
601 if (c->dma_channel < 0) {
602 /* if -1, don't use DMA */
603 c->gpio_irq = 0;
604 }
605
606 r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
607 if (r < 0) {
608 dev_err(&pdev->dev, "Cannot request GPMC CS\n");
609 goto err_kfree;
610 }
611
612 if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
613 pdev->dev.driver->name) == NULL) {
614 dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
615 "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
616 r = -EBUSY;
617 goto err_free_cs;
618 }
619 c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
620 if (c->onenand.base == NULL) {
621 r = -ENOMEM;
622 goto err_release_mem_region;
623 }
624
625 if (pdata->onenand_setup != NULL) {
626 r = pdata->onenand_setup(c->onenand.base, c->freq);
627 if (r < 0) {
628 dev_err(&pdev->dev, "Onenand platform setup failed: "
629 "%d\n", r);
630 goto err_iounmap;
631 }
632 c->setup = pdata->onenand_setup;
633 }
634
635 if (c->gpio_irq) {
Jarkko Nikula73069e32009-01-15 13:09:52 +0200636 if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300637 dev_err(&pdev->dev, "Failed to request GPIO%d for "
638 "OneNAND\n", c->gpio_irq);
639 goto err_iounmap;
640 }
David Brownell40e39252008-12-10 17:35:26 -0800641 gpio_direction_input(c->gpio_irq);
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300642
David Brownell15f74b02008-12-10 17:35:26 -0800643 if ((r = request_irq(gpio_to_irq(c->gpio_irq),
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300644 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
645 pdev->dev.driver->name, c)) < 0)
646 goto err_release_gpio;
647 }
648
649 if (c->dma_channel >= 0) {
650 r = omap_request_dma(0, pdev->dev.driver->name,
651 omap2_onenand_dma_cb, (void *) c,
652 &c->dma_channel);
653 if (r == 0) {
654 omap_set_dma_write_mode(c->dma_channel,
655 OMAP_DMA_WRITE_NON_POSTED);
656 omap_set_dma_src_data_pack(c->dma_channel, 1);
657 omap_set_dma_src_burst_mode(c->dma_channel,
658 OMAP_DMA_DATA_BURST_8);
659 omap_set_dma_dest_data_pack(c->dma_channel, 1);
660 omap_set_dma_dest_burst_mode(c->dma_channel,
661 OMAP_DMA_DATA_BURST_8);
662 } else {
663 dev_info(&pdev->dev,
664 "failed to allocate DMA for OneNAND, "
665 "using PIO instead\n");
666 c->dma_channel = -1;
667 }
668 }
669
670 dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
671 "base %p\n", c->gpmc_cs, c->phys_base,
672 c->onenand.base);
673
674 c->pdev = pdev;
Kay Sievers475b44c2009-01-06 10:44:38 -0800675 c->mtd.name = dev_name(&pdev->dev);
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300676 c->mtd.priv = &c->onenand;
677 c->mtd.owner = THIS_MODULE;
678
David Brownell87f39f02009-03-26 00:42:50 -0700679 c->mtd.dev.parent = &pdev->dev;
680
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300681 if (c->dma_channel >= 0) {
682 struct onenand_chip *this = &c->onenand;
683
684 this->wait = omap2_onenand_wait;
685 if (cpu_is_omap34xx()) {
686 this->read_bufferram = omap3_onenand_read_bufferram;
687 this->write_bufferram = omap3_onenand_write_bufferram;
688 } else {
689 this->read_bufferram = omap2_onenand_read_bufferram;
690 this->write_bufferram = omap2_onenand_write_bufferram;
691 }
692 }
693
694 if ((r = onenand_scan(&c->mtd, 1)) < 0)
695 goto err_release_dma;
696
697 switch ((c->onenand.version_id >> 4) & 0xf) {
698 case 0:
699 c->freq = 40;
700 break;
701 case 1:
702 c->freq = 54;
703 break;
704 case 2:
705 c->freq = 66;
706 break;
707 case 3:
708 c->freq = 83;
709 break;
710 }
711
712#ifdef CONFIG_MTD_PARTITIONS
713 if (pdata->parts != NULL)
714 r = add_mtd_partitions(&c->mtd, pdata->parts,
715 pdata->nr_parts);
716 else
717#endif
718 r = add_mtd_device(&c->mtd);
719 if (r < 0)
720 goto err_release_onenand;
721
722 platform_set_drvdata(pdev, c);
723
724 return 0;
725
726err_release_onenand:
727 onenand_release(&c->mtd);
728err_release_dma:
729 if (c->dma_channel != -1)
730 omap_free_dma(c->dma_channel);
731 if (c->gpio_irq)
David Brownell15f74b02008-12-10 17:35:26 -0800732 free_irq(gpio_to_irq(c->gpio_irq), c);
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300733err_release_gpio:
734 if (c->gpio_irq)
Jarkko Nikula73069e32009-01-15 13:09:52 +0200735 gpio_free(c->gpio_irq);
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300736err_iounmap:
737 iounmap(c->onenand.base);
738err_release_mem_region:
739 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
740err_free_cs:
741 gpmc_cs_free(c->gpmc_cs);
742err_kfree:
743 kfree(c);
744
745 return r;
746}
747
748static int __devexit omap2_onenand_remove(struct platform_device *pdev)
749{
750 struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
751
752 BUG_ON(c == NULL);
753
754#ifdef CONFIG_MTD_PARTITIONS
755 if (c->parts)
756 del_mtd_partitions(&c->mtd);
757 else
758 del_mtd_device(&c->mtd);
759#else
760 del_mtd_device(&c->mtd);
761#endif
762
763 onenand_release(&c->mtd);
764 if (c->dma_channel != -1)
765 omap_free_dma(c->dma_channel);
766 omap2_onenand_shutdown(pdev);
767 platform_set_drvdata(pdev, NULL);
768 if (c->gpio_irq) {
David Brownell15f74b02008-12-10 17:35:26 -0800769 free_irq(gpio_to_irq(c->gpio_irq), c);
Jarkko Nikula73069e32009-01-15 13:09:52 +0200770 gpio_free(c->gpio_irq);
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300771 }
772 iounmap(c->onenand.base);
773 release_mem_region(c->phys_base, ONENAND_IO_SIZE);
774 kfree(c);
775
776 return 0;
777}
778
779static struct platform_driver omap2_onenand_driver = {
780 .probe = omap2_onenand_probe,
Mika Korhonend3412db2009-05-21 23:09:42 +0300781 .remove = __devexit_p(omap2_onenand_remove),
Adrian Hunter36cd4fb2008-08-06 10:08:46 +0300782 .shutdown = omap2_onenand_shutdown,
783 .driver = {
784 .name = DRIVER_NAME,
785 .owner = THIS_MODULE,
786 },
787};
788
789static int __init omap2_onenand_init(void)
790{
791 printk(KERN_INFO "OneNAND driver initializing\n");
792 return platform_driver_register(&omap2_onenand_driver);
793}
794
795static void __exit omap2_onenand_exit(void)
796{
797 platform_driver_unregister(&omap2_onenand_driver);
798}
799
800module_init(omap2_onenand_init);
801module_exit(omap2_onenand_exit);
802
803MODULE_ALIAS(DRIVER_NAME);
804MODULE_LICENSE("GPL");
805MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
806MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");