blob: a4c74a9ba430de20716eddbbd23b24f8fef9a8b5 [file] [log] [blame]
Kyungmin Park46f3e882010-04-28 17:46:49 +02001/*
2 * Samsung S3C64XX/S5PC1XX OneNAND driver
3 *
4 * Copyright © 2008-2010 Samsung Electronics
5 * Kyungmin Park <kyungmin.park@samsung.com>
6 * Marek Szyprowski <m.szyprowski@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Implementation:
13 * S3C64XX and S5PC100: emulate the pseudo BufferRAM
14 * S5PC110: use DMA
15 */
16
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/onenand.h>
23#include <linux/mtd/partitions.h>
24#include <linux/dma-mapping.h>
Kyungmin Parke23abf42010-09-28 19:27:15 +090025#include <linux/interrupt.h>
Kyungmin Park46f3e882010-04-28 17:46:49 +020026
27#include <asm/mach/flash.h>
28#include <plat/regs-onenand.h>
29
30#include <linux/io.h>
31
32enum soc_type {
33 TYPE_S3C6400,
34 TYPE_S3C6410,
35 TYPE_S5PC100,
36 TYPE_S5PC110,
37};
38
39#define ONENAND_ERASE_STATUS 0x00
40#define ONENAND_MULTI_ERASE_SET 0x01
41#define ONENAND_ERASE_START 0x03
42#define ONENAND_UNLOCK_START 0x08
43#define ONENAND_UNLOCK_END 0x09
44#define ONENAND_LOCK_START 0x0A
45#define ONENAND_LOCK_END 0x0B
46#define ONENAND_LOCK_TIGHT_START 0x0C
47#define ONENAND_LOCK_TIGHT_END 0x0D
48#define ONENAND_UNLOCK_ALL 0x0E
49#define ONENAND_OTP_ACCESS 0x12
50#define ONENAND_SPARE_ACCESS_ONLY 0x13
51#define ONENAND_MAIN_ACCESS_ONLY 0x14
52#define ONENAND_ERASE_VERIFY 0x15
53#define ONENAND_MAIN_SPARE_ACCESS 0x16
54#define ONENAND_PIPELINE_READ 0x4000
55
56#define MAP_00 (0x0)
57#define MAP_01 (0x1)
58#define MAP_10 (0x2)
59#define MAP_11 (0x3)
60
61#define S3C64XX_CMD_MAP_SHIFT 24
Kyungmin Parkaa6d1c02010-09-28 19:27:00 +090062#define S5PC100_CMD_MAP_SHIFT 26
Kyungmin Park46f3e882010-04-28 17:46:49 +020063
64#define S3C6400_FBA_SHIFT 10
65#define S3C6400_FPA_SHIFT 4
66#define S3C6400_FSA_SHIFT 2
67
68#define S3C6410_FBA_SHIFT 12
69#define S3C6410_FPA_SHIFT 6
70#define S3C6410_FSA_SHIFT 4
71
72#define S5PC100_FBA_SHIFT 13
73#define S5PC100_FPA_SHIFT 7
74#define S5PC100_FSA_SHIFT 5
75
76/* S5PC110 specific definitions */
77#define S5PC110_DMA_SRC_ADDR 0x400
78#define S5PC110_DMA_SRC_CFG 0x404
79#define S5PC110_DMA_DST_ADDR 0x408
80#define S5PC110_DMA_DST_CFG 0x40C
81#define S5PC110_DMA_TRANS_SIZE 0x414
82#define S5PC110_DMA_TRANS_CMD 0x418
83#define S5PC110_DMA_TRANS_STATUS 0x41C
84#define S5PC110_DMA_TRANS_DIR 0x420
Kyungmin Parke23abf42010-09-28 19:27:15 +090085#define S5PC110_INTC_DMA_CLR 0x1004
86#define S5PC110_INTC_ONENAND_CLR 0x1008
87#define S5PC110_INTC_DMA_MASK 0x1024
88#define S5PC110_INTC_ONENAND_MASK 0x1028
89#define S5PC110_INTC_DMA_PEND 0x1044
90#define S5PC110_INTC_ONENAND_PEND 0x1048
91#define S5PC110_INTC_DMA_STATUS 0x1064
92#define S5PC110_INTC_ONENAND_STATUS 0x1068
93
94#define S5PC110_INTC_DMA_TD (1 << 24)
95#define S5PC110_INTC_DMA_TE (1 << 16)
Kyungmin Park46f3e882010-04-28 17:46:49 +020096
97#define S5PC110_DMA_CFG_SINGLE (0x0 << 16)
98#define S5PC110_DMA_CFG_4BURST (0x2 << 16)
99#define S5PC110_DMA_CFG_8BURST (0x3 << 16)
100#define S5PC110_DMA_CFG_16BURST (0x4 << 16)
101
102#define S5PC110_DMA_CFG_INC (0x0 << 8)
103#define S5PC110_DMA_CFG_CNT (0x1 << 8)
104
105#define S5PC110_DMA_CFG_8BIT (0x0 << 0)
106#define S5PC110_DMA_CFG_16BIT (0x1 << 0)
107#define S5PC110_DMA_CFG_32BIT (0x2 << 0)
108
109#define S5PC110_DMA_SRC_CFG_READ (S5PC110_DMA_CFG_16BURST | \
110 S5PC110_DMA_CFG_INC | \
111 S5PC110_DMA_CFG_16BIT)
112#define S5PC110_DMA_DST_CFG_READ (S5PC110_DMA_CFG_16BURST | \
113 S5PC110_DMA_CFG_INC | \
114 S5PC110_DMA_CFG_32BIT)
115#define S5PC110_DMA_SRC_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
116 S5PC110_DMA_CFG_INC | \
117 S5PC110_DMA_CFG_32BIT)
118#define S5PC110_DMA_DST_CFG_WRITE (S5PC110_DMA_CFG_16BURST | \
119 S5PC110_DMA_CFG_INC | \
120 S5PC110_DMA_CFG_16BIT)
121
122#define S5PC110_DMA_TRANS_CMD_TDC (0x1 << 18)
123#define S5PC110_DMA_TRANS_CMD_TEC (0x1 << 16)
124#define S5PC110_DMA_TRANS_CMD_TR (0x1 << 0)
125
126#define S5PC110_DMA_TRANS_STATUS_TD (0x1 << 18)
127#define S5PC110_DMA_TRANS_STATUS_TB (0x1 << 17)
128#define S5PC110_DMA_TRANS_STATUS_TE (0x1 << 16)
129
130#define S5PC110_DMA_DIR_READ 0x0
131#define S5PC110_DMA_DIR_WRITE 0x1
132
133struct s3c_onenand {
134 struct mtd_info *mtd;
135 struct platform_device *pdev;
136 enum soc_type type;
137 void __iomem *base;
138 struct resource *base_res;
139 void __iomem *ahb_addr;
140 struct resource *ahb_res;
141 int bootram_command;
142 void __iomem *page_buf;
143 void __iomem *oob_buf;
144 unsigned int (*mem_addr)(int fba, int fpa, int fsa);
145 unsigned int (*cmd_map)(unsigned int type, unsigned int val);
146 void __iomem *dma_addr;
147 struct resource *dma_res;
148 unsigned long phys_base;
Kyungmin Parke23abf42010-09-28 19:27:15 +0900149 struct completion complete;
Kyungmin Park46f3e882010-04-28 17:46:49 +0200150#ifdef CONFIG_MTD_PARTITIONS
151 struct mtd_partition *parts;
152#endif
153};
154
155#define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1)))
156#define CMD_MAP_01(dev, mem_addr) (dev->cmd_map(MAP_01, (mem_addr)))
157#define CMD_MAP_10(dev, mem_addr) (dev->cmd_map(MAP_10, (mem_addr)))
158#define CMD_MAP_11(dev, addr) (dev->cmd_map(MAP_11, ((addr) << 2)))
159
160static struct s3c_onenand *onenand;
161
162#ifdef CONFIG_MTD_PARTITIONS
163static const char *part_probes[] = { "cmdlinepart", NULL, };
164#endif
165
166static inline int s3c_read_reg(int offset)
167{
168 return readl(onenand->base + offset);
169}
170
171static inline void s3c_write_reg(int value, int offset)
172{
173 writel(value, onenand->base + offset);
174}
175
176static inline int s3c_read_cmd(unsigned int cmd)
177{
178 return readl(onenand->ahb_addr + cmd);
179}
180
181static inline void s3c_write_cmd(int value, unsigned int cmd)
182{
183 writel(value, onenand->ahb_addr + cmd);
184}
185
186#ifdef SAMSUNG_DEBUG
187static void s3c_dump_reg(void)
188{
189 int i;
190
191 for (i = 0; i < 0x400; i += 0x40) {
192 printk(KERN_INFO "0x%08X: 0x%08x 0x%08x 0x%08x 0x%08x\n",
193 (unsigned int) onenand->base + i,
194 s3c_read_reg(i), s3c_read_reg(i + 0x10),
195 s3c_read_reg(i + 0x20), s3c_read_reg(i + 0x30));
196 }
197}
198#endif
199
200static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val)
201{
202 return (type << S3C64XX_CMD_MAP_SHIFT) | val;
203}
204
205static unsigned int s5pc1xx_cmd_map(unsigned type, unsigned val)
206{
Kyungmin Parkaa6d1c02010-09-28 19:27:00 +0900207 return (type << S5PC100_CMD_MAP_SHIFT) | val;
Kyungmin Park46f3e882010-04-28 17:46:49 +0200208}
209
210static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa)
211{
212 return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) |
213 (fsa << S3C6400_FSA_SHIFT);
214}
215
216static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa)
217{
218 return (fba << S3C6410_FBA_SHIFT) | (fpa << S3C6410_FPA_SHIFT) |
219 (fsa << S3C6410_FSA_SHIFT);
220}
221
222static unsigned int s5pc100_mem_addr(int fba, int fpa, int fsa)
223{
224 return (fba << S5PC100_FBA_SHIFT) | (fpa << S5PC100_FPA_SHIFT) |
225 (fsa << S5PC100_FSA_SHIFT);
226}
227
228static void s3c_onenand_reset(void)
229{
230 unsigned long timeout = 0x10000;
231 int stat;
232
233 s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
234 while (1 && timeout--) {
235 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
236 if (stat & RST_CMP)
237 break;
238 }
239 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
240 s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
241
242 /* Clear interrupt */
243 s3c_write_reg(0x0, INT_ERR_ACK_OFFSET);
244 /* Clear the ECC status */
245 s3c_write_reg(0x0, ECC_ERR_STAT_OFFSET);
246}
247
248static unsigned short s3c_onenand_readw(void __iomem *addr)
249{
250 struct onenand_chip *this = onenand->mtd->priv;
251 struct device *dev = &onenand->pdev->dev;
252 int reg = addr - this->base;
253 int word_addr = reg >> 1;
254 int value;
255
256 /* It's used for probing time */
257 switch (reg) {
258 case ONENAND_REG_MANUFACTURER_ID:
259 return s3c_read_reg(MANUFACT_ID_OFFSET);
260 case ONENAND_REG_DEVICE_ID:
261 return s3c_read_reg(DEVICE_ID_OFFSET);
262 case ONENAND_REG_VERSION_ID:
263 return s3c_read_reg(FLASH_VER_ID_OFFSET);
264 case ONENAND_REG_DATA_BUFFER_SIZE:
265 return s3c_read_reg(DATA_BUF_SIZE_OFFSET);
266 case ONENAND_REG_TECHNOLOGY:
267 return s3c_read_reg(TECH_OFFSET);
268 case ONENAND_REG_SYS_CFG1:
269 return s3c_read_reg(MEM_CFG_OFFSET);
270
271 /* Used at unlock all status */
272 case ONENAND_REG_CTRL_STATUS:
273 return 0;
274
275 case ONENAND_REG_WP_STATUS:
276 return ONENAND_WP_US;
277
278 default:
279 break;
280 }
281
282 /* BootRAM access control */
283 if ((unsigned int) addr < ONENAND_DATARAM && onenand->bootram_command) {
284 if (word_addr == 0)
285 return s3c_read_reg(MANUFACT_ID_OFFSET);
286 if (word_addr == 1)
287 return s3c_read_reg(DEVICE_ID_OFFSET);
288 if (word_addr == 2)
289 return s3c_read_reg(FLASH_VER_ID_OFFSET);
290 }
291
292 value = s3c_read_cmd(CMD_MAP_11(onenand, word_addr)) & 0xffff;
293 dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
294 word_addr, value);
295 return value;
296}
297
298static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
299{
300 struct onenand_chip *this = onenand->mtd->priv;
301 struct device *dev = &onenand->pdev->dev;
302 unsigned int reg = addr - this->base;
303 unsigned int word_addr = reg >> 1;
304
305 /* It's used for probing time */
306 switch (reg) {
307 case ONENAND_REG_SYS_CFG1:
308 s3c_write_reg(value, MEM_CFG_OFFSET);
309 return;
310
311 case ONENAND_REG_START_ADDRESS1:
312 case ONENAND_REG_START_ADDRESS2:
313 return;
314
315 /* Lock/lock-tight/unlock/unlock_all */
316 case ONENAND_REG_START_BLOCK_ADDRESS:
317 return;
318
319 default:
320 break;
321 }
322
323 /* BootRAM access control */
324 if ((unsigned int)addr < ONENAND_DATARAM) {
325 if (value == ONENAND_CMD_READID) {
326 onenand->bootram_command = 1;
327 return;
328 }
329 if (value == ONENAND_CMD_RESET) {
330 s3c_write_reg(ONENAND_MEM_RESET_COLD, MEM_RESET_OFFSET);
331 onenand->bootram_command = 0;
332 return;
333 }
334 }
335
336 dev_info(dev, "%s: Illegal access at reg 0x%x, value 0x%x\n", __func__,
337 word_addr, value);
338
339 s3c_write_cmd(value, CMD_MAP_11(onenand, word_addr));
340}
341
342static int s3c_onenand_wait(struct mtd_info *mtd, int state)
343{
344 struct device *dev = &onenand->pdev->dev;
345 unsigned int flags = INT_ACT;
346 unsigned int stat, ecc;
347 unsigned long timeout;
348
349 switch (state) {
350 case FL_READING:
351 flags |= BLK_RW_CMP | LOAD_CMP;
352 break;
353 case FL_WRITING:
354 flags |= BLK_RW_CMP | PGM_CMP;
355 break;
356 case FL_ERASING:
357 flags |= BLK_RW_CMP | ERS_CMP;
358 break;
359 case FL_LOCKING:
360 flags |= BLK_RW_CMP;
361 break;
362 default:
363 break;
364 }
365
366 /* The 20 msec is enough */
367 timeout = jiffies + msecs_to_jiffies(20);
368 while (time_before(jiffies, timeout)) {
369 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
370 if (stat & flags)
371 break;
372
373 if (state != FL_READING)
374 cond_resched();
375 }
376 /* To get correct interrupt status in timeout case */
377 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
378 s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
379
380 /*
381 * In the Spec. it checks the controller status first
382 * However if you get the correct information in case of
383 * power off recovery (POR) test, it should read ECC status first
384 */
385 if (stat & LOAD_CMP) {
386 ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
387 if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
388 dev_info(dev, "%s: ECC error = 0x%04x\n", __func__,
389 ecc);
390 mtd->ecc_stats.failed++;
391 return -EBADMSG;
392 }
393 }
394
395 if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) {
396 dev_info(dev, "%s: controller error = 0x%04x\n", __func__,
397 stat);
398 if (stat & LOCKED_BLK)
399 dev_info(dev, "%s: it's locked error = 0x%04x\n",
400 __func__, stat);
401
402 return -EIO;
403 }
404
405 return 0;
406}
407
408static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
409 size_t len)
410{
411 struct onenand_chip *this = mtd->priv;
412 unsigned int *m, *s;
413 int fba, fpa, fsa = 0;
414 unsigned int mem_addr, cmd_map_01, cmd_map_10;
415 int i, mcount, scount;
416 int index;
417
418 fba = (int) (addr >> this->erase_shift);
419 fpa = (int) (addr >> this->page_shift);
420 fpa &= this->page_mask;
421
422 mem_addr = onenand->mem_addr(fba, fpa, fsa);
423 cmd_map_01 = CMD_MAP_01(onenand, mem_addr);
424 cmd_map_10 = CMD_MAP_10(onenand, mem_addr);
425
426 switch (cmd) {
427 case ONENAND_CMD_READ:
428 case ONENAND_CMD_READOOB:
429 case ONENAND_CMD_BUFFERRAM:
430 ONENAND_SET_NEXT_BUFFERRAM(this);
431 default:
432 break;
433 }
434
435 index = ONENAND_CURRENT_BUFFERRAM(this);
436
437 /*
438 * Emulate Two BufferRAMs and access with 4 bytes pointer
439 */
440 m = (unsigned int *) onenand->page_buf;
441 s = (unsigned int *) onenand->oob_buf;
442
443 if (index) {
444 m += (this->writesize >> 2);
445 s += (mtd->oobsize >> 2);
446 }
447
448 mcount = mtd->writesize >> 2;
449 scount = mtd->oobsize >> 2;
450
451 switch (cmd) {
452 case ONENAND_CMD_READ:
453 /* Main */
454 for (i = 0; i < mcount; i++)
455 *m++ = s3c_read_cmd(cmd_map_01);
456 return 0;
457
458 case ONENAND_CMD_READOOB:
459 s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
460 /* Main */
461 for (i = 0; i < mcount; i++)
462 *m++ = s3c_read_cmd(cmd_map_01);
463
464 /* Spare */
465 for (i = 0; i < scount; i++)
466 *s++ = s3c_read_cmd(cmd_map_01);
467
468 s3c_write_reg(0, TRANS_SPARE_OFFSET);
469 return 0;
470
471 case ONENAND_CMD_PROG:
472 /* Main */
473 for (i = 0; i < mcount; i++)
474 s3c_write_cmd(*m++, cmd_map_01);
475 return 0;
476
477 case ONENAND_CMD_PROGOOB:
478 s3c_write_reg(TSRF, TRANS_SPARE_OFFSET);
479
480 /* Main - dummy write */
481 for (i = 0; i < mcount; i++)
482 s3c_write_cmd(0xffffffff, cmd_map_01);
483
484 /* Spare */
485 for (i = 0; i < scount; i++)
486 s3c_write_cmd(*s++, cmd_map_01);
487
488 s3c_write_reg(0, TRANS_SPARE_OFFSET);
489 return 0;
490
491 case ONENAND_CMD_UNLOCK_ALL:
492 s3c_write_cmd(ONENAND_UNLOCK_ALL, cmd_map_10);
493 return 0;
494
495 case ONENAND_CMD_ERASE:
496 s3c_write_cmd(ONENAND_ERASE_START, cmd_map_10);
497 return 0;
498
499 default:
500 break;
501 }
502
503 return 0;
504}
505
506static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
507{
508 struct onenand_chip *this = mtd->priv;
509 int index = ONENAND_CURRENT_BUFFERRAM(this);
510 unsigned char *p;
511
512 if (area == ONENAND_DATARAM) {
513 p = (unsigned char *) onenand->page_buf;
514 if (index == 1)
515 p += this->writesize;
516 } else {
517 p = (unsigned char *) onenand->oob_buf;
518 if (index == 1)
519 p += mtd->oobsize;
520 }
521
522 return p;
523}
524
525static int onenand_read_bufferram(struct mtd_info *mtd, int area,
526 unsigned char *buffer, int offset,
527 size_t count)
528{
529 unsigned char *p;
530
531 p = s3c_get_bufferram(mtd, area);
532 memcpy(buffer, p + offset, count);
533 return 0;
534}
535
536static int onenand_write_bufferram(struct mtd_info *mtd, int area,
537 const unsigned char *buffer, int offset,
538 size_t count)
539{
540 unsigned char *p;
541
542 p = s3c_get_bufferram(mtd, area);
543 memcpy(p + offset, buffer, count);
544 return 0;
545}
546
Kyungmin Parke23abf42010-09-28 19:27:15 +0900547static int (*s5pc110_dma_ops)(void *dst, void *src, size_t count, int direction);
548
549static int s5pc110_dma_poll(void *dst, void *src, size_t count, int direction)
Kyungmin Park46f3e882010-04-28 17:46:49 +0200550{
551 void __iomem *base = onenand->dma_addr;
552 int status;
Kyungmin Parkebe8a642010-09-27 16:25:17 +0900553 unsigned long timeout;
Kyungmin Park46f3e882010-04-28 17:46:49 +0200554
555 writel(src, base + S5PC110_DMA_SRC_ADDR);
556 writel(dst, base + S5PC110_DMA_DST_ADDR);
557
558 if (direction == S5PC110_DMA_DIR_READ) {
559 writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
560 writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
561 } else {
562 writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
563 writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
564 }
565
566 writel(count, base + S5PC110_DMA_TRANS_SIZE);
567 writel(direction, base + S5PC110_DMA_TRANS_DIR);
568
569 writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
570
Kyungmin Parkebe8a642010-09-27 16:25:17 +0900571 /*
572 * There's no exact timeout values at Spec.
573 * In real case it takes under 1 msec.
574 * So 20 msecs are enough.
575 */
576 timeout = jiffies + msecs_to_jiffies(20);
577
Kyungmin Park46f3e882010-04-28 17:46:49 +0200578 do {
579 status = readl(base + S5PC110_DMA_TRANS_STATUS);
Kyungmin Park53d1e132010-08-27 11:55:37 +0900580 if (status & S5PC110_DMA_TRANS_STATUS_TE) {
581 writel(S5PC110_DMA_TRANS_CMD_TEC,
582 base + S5PC110_DMA_TRANS_CMD);
583 return -EIO;
584 }
Kyungmin Parkebe8a642010-09-27 16:25:17 +0900585 } while (!(status & S5PC110_DMA_TRANS_STATUS_TD) &&
586 time_before(jiffies, timeout));
Kyungmin Park46f3e882010-04-28 17:46:49 +0200587
Kyungmin Park46f3e882010-04-28 17:46:49 +0200588 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
589
590 return 0;
591}
592
Kyungmin Parke23abf42010-09-28 19:27:15 +0900593static irqreturn_t s5pc110_onenand_irq(int irq, void *data)
594{
595 void __iomem *base = onenand->dma_addr;
596 int status, cmd = 0;
597
598 status = readl(base + S5PC110_INTC_DMA_STATUS);
599
600 if (likely(status & S5PC110_INTC_DMA_TD))
601 cmd = S5PC110_DMA_TRANS_CMD_TDC;
602
603 if (unlikely(status & S5PC110_INTC_DMA_TE))
604 cmd = S5PC110_DMA_TRANS_CMD_TEC;
605
606 writel(cmd, base + S5PC110_DMA_TRANS_CMD);
607 writel(status, base + S5PC110_INTC_DMA_CLR);
608
609 if (!onenand->complete.done)
610 complete(&onenand->complete);
611
612 return IRQ_HANDLED;
613}
614
615static int s5pc110_dma_irq(void *dst, void *src, size_t count, int direction)
616{
617 void __iomem *base = onenand->dma_addr;
618 int status;
619
620 status = readl(base + S5PC110_INTC_DMA_MASK);
621 if (status) {
622 status &= ~(S5PC110_INTC_DMA_TD | S5PC110_INTC_DMA_TE);
623 writel(status, base + S5PC110_INTC_DMA_MASK);
624 }
625
626 writel(src, base + S5PC110_DMA_SRC_ADDR);
627 writel(dst, base + S5PC110_DMA_DST_ADDR);
628
629 if (direction == S5PC110_DMA_DIR_READ) {
630 writel(S5PC110_DMA_SRC_CFG_READ, base + S5PC110_DMA_SRC_CFG);
631 writel(S5PC110_DMA_DST_CFG_READ, base + S5PC110_DMA_DST_CFG);
632 } else {
633 writel(S5PC110_DMA_SRC_CFG_WRITE, base + S5PC110_DMA_SRC_CFG);
634 writel(S5PC110_DMA_DST_CFG_WRITE, base + S5PC110_DMA_DST_CFG);
635 }
636
637 writel(count, base + S5PC110_DMA_TRANS_SIZE);
638 writel(direction, base + S5PC110_DMA_TRANS_DIR);
639
640 writel(S5PC110_DMA_TRANS_CMD_TR, base + S5PC110_DMA_TRANS_CMD);
641
642 wait_for_completion_timeout(&onenand->complete, msecs_to_jiffies(20));
643
644 return 0;
645}
646
Kyungmin Park46f3e882010-04-28 17:46:49 +0200647static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
648 unsigned char *buffer, int offset, size_t count)
649{
650 struct onenand_chip *this = mtd->priv;
Kyungmin Park46f3e882010-04-28 17:46:49 +0200651 void __iomem *p;
652 void *buf = (void *) buffer;
653 dma_addr_t dma_src, dma_dst;
Kyungmin Park08b3af32010-11-02 10:28:46 +0900654 int err, ofs, page_dma = 0;
Kyungmin Parkdcf08222010-09-28 19:27:10 +0900655 struct device *dev = &onenand->pdev->dev;
Kyungmin Park46f3e882010-04-28 17:46:49 +0200656
Kyungmin Park9aba97a2010-08-27 11:55:44 +0900657 p = this->base + area;
Kyungmin Park46f3e882010-04-28 17:46:49 +0200658 if (ONENAND_CURRENT_BUFFERRAM(this)) {
659 if (area == ONENAND_DATARAM)
660 p += this->writesize;
661 else
662 p += mtd->oobsize;
663 }
664
665 if (offset & 3 || (size_t) buf & 3 ||
666 !onenand->dma_addr || count != mtd->writesize)
667 goto normal;
668
669 /* Handle vmalloc address */
670 if (buf >= high_memory) {
671 struct page *page;
672
673 if (((size_t) buf & PAGE_MASK) !=
674 ((size_t) (buf + count - 1) & PAGE_MASK))
675 goto normal;
676 page = vmalloc_to_page(buf);
677 if (!page)
678 goto normal;
Kyungmin Park46f3e882010-04-28 17:46:49 +0200679
Kyungmin Park08b3af32010-11-02 10:28:46 +0900680 /* Page offset */
681 ofs = ((size_t) buf & ~PAGE_MASK);
Kyungmin Parkdcf08222010-09-28 19:27:10 +0900682 page_dma = 1;
Kyungmin Park08b3af32010-11-02 10:28:46 +0900683
Kyungmin Parkdcf08222010-09-28 19:27:10 +0900684 /* DMA routine */
685 dma_src = onenand->phys_base + (p - this->base);
Kyungmin Park08b3af32010-11-02 10:28:46 +0900686 dma_dst = dma_map_page(dev, page, ofs, count, DMA_FROM_DEVICE);
Kyungmin Parkdcf08222010-09-28 19:27:10 +0900687 } else {
688 /* DMA routine */
689 dma_src = onenand->phys_base + (p - this->base);
690 dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
691 }
692 if (dma_mapping_error(dev, dma_dst)) {
693 dev_err(dev, "Couldn't map a %d byte buffer for DMA\n", count);
Kyungmin Park46f3e882010-04-28 17:46:49 +0200694 goto normal;
695 }
696 err = s5pc110_dma_ops((void *) dma_dst, (void *) dma_src,
697 count, S5PC110_DMA_DIR_READ);
Kyungmin Parkdcf08222010-09-28 19:27:10 +0900698
699 if (page_dma)
700 dma_unmap_page(dev, dma_dst, count, DMA_FROM_DEVICE);
701 else
702 dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
Kyungmin Park46f3e882010-04-28 17:46:49 +0200703
704 if (!err)
705 return 0;
706
707normal:
708 if (count != mtd->writesize) {
709 /* Copy the bufferram to memory to prevent unaligned access */
Kyungmin Park9aba97a2010-08-27 11:55:44 +0900710 memcpy(this->page_buf, p, mtd->writesize);
Kyungmin Park46f3e882010-04-28 17:46:49 +0200711 p = this->page_buf + offset;
712 }
713
714 memcpy(buffer, p, count);
715
716 return 0;
717}
718
Kyungmin Park7b0507e2010-05-28 11:15:35 +0900719static int s5pc110_chip_probe(struct mtd_info *mtd)
720{
721 /* Now just return 0 */
722 return 0;
723}
724
Kyungmin Park46f3e882010-04-28 17:46:49 +0200725static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state)
726{
727 unsigned int flags = INT_ACT | LOAD_CMP;
728 unsigned int stat;
729 unsigned long timeout;
730
731 /* The 20 msec is enough */
732 timeout = jiffies + msecs_to_jiffies(20);
733 while (time_before(jiffies, timeout)) {
734 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
735 if (stat & flags)
736 break;
737 }
738 /* To get correct interrupt status in timeout case */
739 stat = s3c_read_reg(INT_ERR_STAT_OFFSET);
740 s3c_write_reg(stat, INT_ERR_ACK_OFFSET);
741
742 if (stat & LD_FAIL_ECC_ERR) {
743 s3c_onenand_reset();
744 return ONENAND_BBT_READ_ERROR;
745 }
746
747 if (stat & LOAD_CMP) {
748 int ecc = s3c_read_reg(ECC_ERR_STAT_OFFSET);
749 if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
750 s3c_onenand_reset();
751 return ONENAND_BBT_READ_ERROR;
752 }
753 }
754
755 return 0;
756}
757
758static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
759{
760 struct onenand_chip *this = mtd->priv;
761 struct device *dev = &onenand->pdev->dev;
762 unsigned int block, end;
763 int tmp;
764
765 end = this->chipsize >> this->erase_shift;
766
767 for (block = 0; block < end; block++) {
768 unsigned int mem_addr = onenand->mem_addr(block, 0, 0);
769 tmp = s3c_read_cmd(CMD_MAP_01(onenand, mem_addr));
770
771 if (s3c_read_reg(INT_ERR_STAT_OFFSET) & LOCKED_BLK) {
772 dev_err(dev, "block %d is write-protected!\n", block);
773 s3c_write_reg(LOCKED_BLK, INT_ERR_ACK_OFFSET);
774 }
775 }
776}
777
778static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs,
779 size_t len, int cmd)
780{
781 struct onenand_chip *this = mtd->priv;
782 int start, end, start_mem_addr, end_mem_addr;
783
784 start = ofs >> this->erase_shift;
785 start_mem_addr = onenand->mem_addr(start, 0, 0);
786 end = start + (len >> this->erase_shift) - 1;
787 end_mem_addr = onenand->mem_addr(end, 0, 0);
788
789 if (cmd == ONENAND_CMD_LOCK) {
790 s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(onenand,
791 start_mem_addr));
792 s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(onenand,
793 end_mem_addr));
794 } else {
795 s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(onenand,
796 start_mem_addr));
797 s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(onenand,
798 end_mem_addr));
799 }
800
801 this->wait(mtd, FL_LOCKING);
802}
803
804static void s3c_unlock_all(struct mtd_info *mtd)
805{
806 struct onenand_chip *this = mtd->priv;
807 loff_t ofs = 0;
808 size_t len = this->chipsize;
809
810 if (this->options & ONENAND_HAS_UNLOCK_ALL) {
811 /* Write unlock command */
812 this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
813
814 /* No need to check return value */
815 this->wait(mtd, FL_LOCKING);
816
817 /* Workaround for all block unlock in DDP */
818 if (!ONENAND_IS_DDP(this)) {
819 s3c_onenand_check_lock_status(mtd);
820 return;
821 }
822
823 /* All blocks on another chip */
824 ofs = this->chipsize >> 1;
825 len = this->chipsize >> 1;
826 }
827
828 s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
829
830 s3c_onenand_check_lock_status(mtd);
831}
832
833static void s3c_onenand_setup(struct mtd_info *mtd)
834{
835 struct onenand_chip *this = mtd->priv;
836
837 onenand->mtd = mtd;
838
839 if (onenand->type == TYPE_S3C6400) {
840 onenand->mem_addr = s3c6400_mem_addr;
841 onenand->cmd_map = s3c64xx_cmd_map;
842 } else if (onenand->type == TYPE_S3C6410) {
843 onenand->mem_addr = s3c6410_mem_addr;
844 onenand->cmd_map = s3c64xx_cmd_map;
845 } else if (onenand->type == TYPE_S5PC100) {
846 onenand->mem_addr = s5pc100_mem_addr;
847 onenand->cmd_map = s5pc1xx_cmd_map;
848 } else if (onenand->type == TYPE_S5PC110) {
849 /* Use generic onenand functions */
Kyungmin Park46f3e882010-04-28 17:46:49 +0200850 this->read_bufferram = s5pc110_read_bufferram;
Kyungmin Park7b0507e2010-05-28 11:15:35 +0900851 this->chip_probe = s5pc110_chip_probe;
Kyungmin Park46f3e882010-04-28 17:46:49 +0200852 return;
853 } else {
854 BUG();
855 }
856
857 this->read_word = s3c_onenand_readw;
858 this->write_word = s3c_onenand_writew;
859
860 this->wait = s3c_onenand_wait;
861 this->bbt_wait = s3c_onenand_bbt_wait;
862 this->unlock_all = s3c_unlock_all;
863 this->command = s3c_onenand_command;
864
865 this->read_bufferram = onenand_read_bufferram;
866 this->write_bufferram = onenand_write_bufferram;
867}
868
869static int s3c_onenand_probe(struct platform_device *pdev)
870{
871 struct onenand_platform_data *pdata;
872 struct onenand_chip *this;
873 struct mtd_info *mtd;
874 struct resource *r;
875 int size, err;
Kyungmin Park46f3e882010-04-28 17:46:49 +0200876
877 pdata = pdev->dev.platform_data;
878 /* No need to check pdata. the platform data is optional */
879
880 size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
881 mtd = kzalloc(size, GFP_KERNEL);
882 if (!mtd) {
883 dev_err(&pdev->dev, "failed to allocate memory\n");
884 return -ENOMEM;
885 }
886
887 onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);
888 if (!onenand) {
889 err = -ENOMEM;
890 goto onenand_fail;
891 }
892
893 this = (struct onenand_chip *) &mtd[1];
894 mtd->priv = this;
895 mtd->dev.parent = &pdev->dev;
896 mtd->owner = THIS_MODULE;
897 onenand->pdev = pdev;
898 onenand->type = platform_get_device_id(pdev)->driver_data;
899
900 s3c_onenand_setup(mtd);
901
902 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
903 if (!r) {
904 dev_err(&pdev->dev, "no memory resource defined\n");
905 return -ENOENT;
906 goto ahb_resource_failed;
907 }
908
909 onenand->base_res = request_mem_region(r->start, resource_size(r),
910 pdev->name);
911 if (!onenand->base_res) {
912 dev_err(&pdev->dev, "failed to request memory resource\n");
913 err = -EBUSY;
914 goto resource_failed;
915 }
916
917 onenand->base = ioremap(r->start, resource_size(r));
918 if (!onenand->base) {
919 dev_err(&pdev->dev, "failed to map memory resource\n");
920 err = -EFAULT;
921 goto ioremap_failed;
922 }
923 /* Set onenand_chip also */
924 this->base = onenand->base;
925
926 /* Use runtime badblock check */
927 this->options |= ONENAND_SKIP_UNLOCK_CHECK;
928
929 if (onenand->type != TYPE_S5PC110) {
930 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
931 if (!r) {
932 dev_err(&pdev->dev, "no buffer memory resource defined\n");
933 return -ENOENT;
934 goto ahb_resource_failed;
935 }
936
937 onenand->ahb_res = request_mem_region(r->start, resource_size(r),
938 pdev->name);
939 if (!onenand->ahb_res) {
940 dev_err(&pdev->dev, "failed to request buffer memory resource\n");
941 err = -EBUSY;
942 goto ahb_resource_failed;
943 }
944
945 onenand->ahb_addr = ioremap(r->start, resource_size(r));
946 if (!onenand->ahb_addr) {
947 dev_err(&pdev->dev, "failed to map buffer memory resource\n");
948 err = -EINVAL;
949 goto ahb_ioremap_failed;
950 }
951
952 /* Allocate 4KiB BufferRAM */
953 onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL);
954 if (!onenand->page_buf) {
955 err = -ENOMEM;
956 goto page_buf_fail;
957 }
958
959 /* Allocate 128 SpareRAM */
960 onenand->oob_buf = kzalloc(128, GFP_KERNEL);
961 if (!onenand->oob_buf) {
962 err = -ENOMEM;
963 goto oob_buf_fail;
964 }
965
966 /* S3C doesn't handle subpage write */
967 mtd->subpage_sft = 0;
968 this->subpagesize = mtd->writesize;
969
970 } else { /* S5PC110 */
971 r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
972 if (!r) {
973 dev_err(&pdev->dev, "no dma memory resource defined\n");
974 return -ENOENT;
975 goto dma_resource_failed;
976 }
977
978 onenand->dma_res = request_mem_region(r->start, resource_size(r),
979 pdev->name);
980 if (!onenand->dma_res) {
981 dev_err(&pdev->dev, "failed to request dma memory resource\n");
982 err = -EBUSY;
983 goto dma_resource_failed;
984 }
985
986 onenand->dma_addr = ioremap(r->start, resource_size(r));
987 if (!onenand->dma_addr) {
988 dev_err(&pdev->dev, "failed to map dma memory resource\n");
989 err = -EINVAL;
990 goto dma_ioremap_failed;
991 }
992
993 onenand->phys_base = onenand->base_res->start;
Kyungmin Parke23abf42010-09-28 19:27:15 +0900994
995 s5pc110_dma_ops = s5pc110_dma_poll;
996 /* Interrupt support */
997 r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
998 if (r) {
999 init_completion(&onenand->complete);
1000 s5pc110_dma_ops = s5pc110_dma_irq;
1001 err = request_irq(r->start, s5pc110_onenand_irq,
1002 IRQF_SHARED, "onenand", &onenand);
1003 if (err) {
1004 dev_err(&pdev->dev, "failed to get irq\n");
1005 goto scan_failed;
1006 }
1007 }
Kyungmin Park46f3e882010-04-28 17:46:49 +02001008 }
1009
1010 if (onenand_scan(mtd, 1)) {
1011 err = -EFAULT;
1012 goto scan_failed;
1013 }
1014
Kyungmin Park7b0507e2010-05-28 11:15:35 +09001015 if (onenand->type != TYPE_S5PC110) {
Kyungmin Park46f3e882010-04-28 17:46:49 +02001016 /* S3C doesn't handle subpage write */
1017 mtd->subpage_sft = 0;
1018 this->subpagesize = mtd->writesize;
1019 }
1020
1021 if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
1022 dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
1023
1024#ifdef CONFIG_MTD_PARTITIONS
1025 err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0);
1026 if (err > 0)
1027 add_mtd_partitions(mtd, onenand->parts, err);
1028 else if (err <= 0 && pdata && pdata->parts)
1029 add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1030 else
1031#endif
1032 err = add_mtd_device(mtd);
1033
1034 platform_set_drvdata(pdev, mtd);
1035
1036 return 0;
1037
1038scan_failed:
1039 if (onenand->dma_addr)
1040 iounmap(onenand->dma_addr);
1041dma_ioremap_failed:
1042 if (onenand->dma_res)
1043 release_mem_region(onenand->dma_res->start,
1044 resource_size(onenand->dma_res));
1045 kfree(onenand->oob_buf);
1046oob_buf_fail:
1047 kfree(onenand->page_buf);
1048page_buf_fail:
1049 if (onenand->ahb_addr)
1050 iounmap(onenand->ahb_addr);
1051ahb_ioremap_failed:
1052 if (onenand->ahb_res)
1053 release_mem_region(onenand->ahb_res->start,
1054 resource_size(onenand->ahb_res));
1055dma_resource_failed:
1056ahb_resource_failed:
1057 iounmap(onenand->base);
1058ioremap_failed:
1059 if (onenand->base_res)
1060 release_mem_region(onenand->base_res->start,
1061 resource_size(onenand->base_res));
1062resource_failed:
1063 kfree(onenand);
1064onenand_fail:
1065 kfree(mtd);
1066 return err;
1067}
1068
1069static int __devexit s3c_onenand_remove(struct platform_device *pdev)
1070{
1071 struct mtd_info *mtd = platform_get_drvdata(pdev);
1072
1073 onenand_release(mtd);
1074 if (onenand->ahb_addr)
1075 iounmap(onenand->ahb_addr);
1076 if (onenand->ahb_res)
1077 release_mem_region(onenand->ahb_res->start,
1078 resource_size(onenand->ahb_res));
1079 if (onenand->dma_addr)
1080 iounmap(onenand->dma_addr);
1081 if (onenand->dma_res)
1082 release_mem_region(onenand->dma_res->start,
1083 resource_size(onenand->dma_res));
1084
1085 iounmap(onenand->base);
1086 release_mem_region(onenand->base_res->start,
1087 resource_size(onenand->base_res));
1088
1089 platform_set_drvdata(pdev, NULL);
1090 kfree(onenand->oob_buf);
1091 kfree(onenand->page_buf);
1092 kfree(onenand);
1093 kfree(mtd);
1094 return 0;
1095}
1096
1097static int s3c_pm_ops_suspend(struct device *dev)
1098{
1099 struct platform_device *pdev = to_platform_device(dev);
1100 struct mtd_info *mtd = platform_get_drvdata(pdev);
1101 struct onenand_chip *this = mtd->priv;
1102
1103 this->wait(mtd, FL_PM_SUSPENDED);
Kyungmin Park2316d3b2010-10-20 17:31:02 +09001104 return 0;
Kyungmin Park46f3e882010-04-28 17:46:49 +02001105}
1106
1107static int s3c_pm_ops_resume(struct device *dev)
1108{
1109 struct platform_device *pdev = to_platform_device(dev);
1110 struct mtd_info *mtd = platform_get_drvdata(pdev);
1111 struct onenand_chip *this = mtd->priv;
1112
Kyungmin Park46f3e882010-04-28 17:46:49 +02001113 this->unlock_all(mtd);
1114 return 0;
1115}
1116
1117static const struct dev_pm_ops s3c_pm_ops = {
1118 .suspend = s3c_pm_ops_suspend,
1119 .resume = s3c_pm_ops_resume,
1120};
1121
1122static struct platform_device_id s3c_onenand_driver_ids[] = {
1123 {
1124 .name = "s3c6400-onenand",
1125 .driver_data = TYPE_S3C6400,
1126 }, {
1127 .name = "s3c6410-onenand",
1128 .driver_data = TYPE_S3C6410,
1129 }, {
1130 .name = "s5pc100-onenand",
1131 .driver_data = TYPE_S5PC100,
1132 }, {
1133 .name = "s5pc110-onenand",
1134 .driver_data = TYPE_S5PC110,
1135 }, { },
1136};
1137MODULE_DEVICE_TABLE(platform, s3c_onenand_driver_ids);
1138
1139static struct platform_driver s3c_onenand_driver = {
1140 .driver = {
1141 .name = "samsung-onenand",
1142 .pm = &s3c_pm_ops,
1143 },
1144 .id_table = s3c_onenand_driver_ids,
1145 .probe = s3c_onenand_probe,
1146 .remove = __devexit_p(s3c_onenand_remove),
1147};
1148
1149static int __init s3c_onenand_init(void)
1150{
1151 return platform_driver_register(&s3c_onenand_driver);
1152}
1153
1154static void __exit s3c_onenand_exit(void)
1155{
1156 platform_driver_unregister(&s3c_onenand_driver);
1157}
1158
1159module_init(s3c_onenand_init);
1160module_exit(s3c_onenand_exit);
1161
1162MODULE_LICENSE("GPL");
1163MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
1164MODULE_DESCRIPTION("Samsung OneNAND controller support");