blob: 60bac8e6e9fa737bb5d280ea0bd7b08686fb3ff6 [file] [log] [blame]
Vimal Singh67ce04b2009-05-12 13:47:03 -07001/*
2 * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
3 * Copyright © 2004 Micron Technology Inc.
4 * Copyright © 2004 David Brownell
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h>
13#include <linux/delay.h>
vimal singhc276aca2009-06-27 11:07:06 +053014#include <linux/jiffies.h>
15#include <linux/sched.h>
Vimal Singh67ce04b2009-05-12 13:47:03 -070016#include <linux/mtd/mtd.h>
17#include <linux/mtd/nand.h>
18#include <linux/mtd/partitions.h>
19#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Vimal Singh67ce04b2009-05-12 13:47:03 -070021
Tony Lindgrence491cf2009-10-20 09:40:47 -070022#include <plat/dma.h>
23#include <plat/gpmc.h>
24#include <plat/nand.h>
Vimal Singh67ce04b2009-05-12 13:47:03 -070025
Vimal Singh67ce04b2009-05-12 13:47:03 -070026#define DRIVER_NAME "omap2-nand"
27
Vimal Singh67ce04b2009-05-12 13:47:03 -070028#define NAND_Ecc_P1e (1 << 0)
29#define NAND_Ecc_P2e (1 << 1)
30#define NAND_Ecc_P4e (1 << 2)
31#define NAND_Ecc_P8e (1 << 3)
32#define NAND_Ecc_P16e (1 << 4)
33#define NAND_Ecc_P32e (1 << 5)
34#define NAND_Ecc_P64e (1 << 6)
35#define NAND_Ecc_P128e (1 << 7)
36#define NAND_Ecc_P256e (1 << 8)
37#define NAND_Ecc_P512e (1 << 9)
38#define NAND_Ecc_P1024e (1 << 10)
39#define NAND_Ecc_P2048e (1 << 11)
40
41#define NAND_Ecc_P1o (1 << 16)
42#define NAND_Ecc_P2o (1 << 17)
43#define NAND_Ecc_P4o (1 << 18)
44#define NAND_Ecc_P8o (1 << 19)
45#define NAND_Ecc_P16o (1 << 20)
46#define NAND_Ecc_P32o (1 << 21)
47#define NAND_Ecc_P64o (1 << 22)
48#define NAND_Ecc_P128o (1 << 23)
49#define NAND_Ecc_P256o (1 << 24)
50#define NAND_Ecc_P512o (1 << 25)
51#define NAND_Ecc_P1024o (1 << 26)
52#define NAND_Ecc_P2048o (1 << 27)
53
54#define TF(value) (value ? 1 : 0)
55
56#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
57#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
58#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
59#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
60#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
61#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
62#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
63#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
64
65#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
66#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
67#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
68#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
69#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
70#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
71#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
72#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
73
74#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
75#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
76#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
77#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
78#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
79#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
80#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
81#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
82
83#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
84#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
85#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
86#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
87#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
88#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
89#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
90#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
91
92#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
93#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
94
95#ifdef CONFIG_MTD_PARTITIONS
96static const char *part_probes[] = { "cmdlinepart", NULL };
97#endif
98
99struct omap_nand_info {
100 struct nand_hw_control controller;
101 struct omap_nand_platform_data *pdata;
102 struct mtd_info mtd;
103 struct mtd_partition *parts;
104 struct nand_chip nand;
105 struct platform_device *pdev;
106
107 int gpmc_cs;
108 unsigned long phys_base;
vimal singhdfe32892009-07-13 16:29:16 +0530109 struct completion comp;
110 int dma_ch;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700111};
112
113/**
Vimal Singh67ce04b2009-05-12 13:47:03 -0700114 * omap_hwcontrol - hardware specific access to control-lines
115 * @mtd: MTD device structure
116 * @cmd: command to device
117 * @ctrl:
118 * NAND_NCE: bit 0 -> don't care
119 * NAND_CLE: bit 1 -> Command Latch
120 * NAND_ALE: bit 2 -> Address Latch
121 *
122 * NOTE: boards may use different bits for these!!
123 */
124static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
125{
126 struct omap_nand_info *info = container_of(mtd,
127 struct omap_nand_info, mtd);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700128
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000129 if (cmd != NAND_CMD_NONE) {
130 if (ctrl & NAND_CLE)
131 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700132
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000133 else if (ctrl & NAND_ALE)
134 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
135
136 else /* NAND_NCE */
137 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700138 }
Vimal Singh67ce04b2009-05-12 13:47:03 -0700139}
140
141/**
vimal singh59e9c5a2009-07-13 16:26:24 +0530142 * omap_read_buf8 - read data from NAND controller into buffer
143 * @mtd: MTD device structure
144 * @buf: buffer to store date
145 * @len: number of bytes to read
146 */
147static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
148{
149 struct nand_chip *nand = mtd->priv;
150
151 ioread8_rep(nand->IO_ADDR_R, buf, len);
152}
153
154/**
155 * omap_write_buf8 - write buffer to NAND controller
156 * @mtd: MTD device structure
157 * @buf: data buffer
158 * @len: number of bytes to write
159 */
160static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
161{
162 struct omap_nand_info *info = container_of(mtd,
163 struct omap_nand_info, mtd);
164 u_char *p = (u_char *)buf;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000165 u32 status = 0;
vimal singh59e9c5a2009-07-13 16:26:24 +0530166
167 while (len--) {
168 iowrite8(*p++, info->nand.IO_ADDR_W);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000169 /* wait until buffer is available for write */
170 do {
171 status = gpmc_read_status(GPMC_STATUS_BUFFER);
172 } while (!status);
vimal singh59e9c5a2009-07-13 16:26:24 +0530173 }
174}
175
176/**
Vimal Singh67ce04b2009-05-12 13:47:03 -0700177 * omap_read_buf16 - read data from NAND controller into buffer
178 * @mtd: MTD device structure
179 * @buf: buffer to store date
180 * @len: number of bytes to read
181 */
182static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
183{
184 struct nand_chip *nand = mtd->priv;
185
vimal singh59e9c5a2009-07-13 16:26:24 +0530186 ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700187}
188
189/**
190 * omap_write_buf16 - write buffer to NAND controller
191 * @mtd: MTD device structure
192 * @buf: data buffer
193 * @len: number of bytes to write
194 */
195static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
196{
197 struct omap_nand_info *info = container_of(mtd,
198 struct omap_nand_info, mtd);
199 u16 *p = (u16 *) buf;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000200 u32 status = 0;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700201 /* FIXME try bursts of writesw() or DMA ... */
202 len >>= 1;
203
204 while (len--) {
vimal singh59e9c5a2009-07-13 16:26:24 +0530205 iowrite16(*p++, info->nand.IO_ADDR_W);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000206 /* wait until buffer is available for write */
207 do {
208 status = gpmc_read_status(GPMC_STATUS_BUFFER);
209 } while (!status);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700210 }
211}
vimal singh59e9c5a2009-07-13 16:26:24 +0530212
213/**
214 * omap_read_buf_pref - read data from NAND controller into buffer
215 * @mtd: MTD device structure
216 * @buf: buffer to store date
217 * @len: number of bytes to read
218 */
219static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
220{
221 struct omap_nand_info *info = container_of(mtd,
222 struct omap_nand_info, mtd);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000223 uint32_t r_count = 0;
vimal singh59e9c5a2009-07-13 16:26:24 +0530224 int ret = 0;
225 u32 *p = (u32 *)buf;
226
227 /* take care of subpage reads */
Vimal Singhc3341d02010-01-07 12:16:26 +0530228 if (len % 4) {
229 if (info->nand.options & NAND_BUSWIDTH_16)
230 omap_read_buf16(mtd, buf, len % 4);
231 else
232 omap_read_buf8(mtd, buf, len % 4);
233 p = (u32 *) (buf + len % 4);
234 len -= len % 4;
vimal singh59e9c5a2009-07-13 16:26:24 +0530235 }
vimal singh59e9c5a2009-07-13 16:26:24 +0530236
237 /* configure and start prefetch transfer */
238 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
239 if (ret) {
240 /* PFPW engine is busy, use cpu copy method */
241 if (info->nand.options & NAND_BUSWIDTH_16)
242 omap_read_buf16(mtd, buf, len);
243 else
244 omap_read_buf8(mtd, buf, len);
245 } else {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000246 p = (u32 *) buf;
vimal singh59e9c5a2009-07-13 16:26:24 +0530247 do {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000248 r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
249 r_count = r_count >> 2;
250 ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
vimal singh59e9c5a2009-07-13 16:26:24 +0530251 p += r_count;
252 len -= r_count << 2;
253 } while (len);
vimal singh59e9c5a2009-07-13 16:26:24 +0530254 /* disable and stop the PFPW engine */
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000255 gpmc_prefetch_reset(info->gpmc_cs);
vimal singh59e9c5a2009-07-13 16:26:24 +0530256 }
257}
258
259/**
260 * omap_write_buf_pref - write buffer to NAND controller
261 * @mtd: MTD device structure
262 * @buf: data buffer
263 * @len: number of bytes to write
264 */
265static void omap_write_buf_pref(struct mtd_info *mtd,
266 const u_char *buf, int len)
267{
268 struct omap_nand_info *info = container_of(mtd,
269 struct omap_nand_info, mtd);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000270 uint32_t pref_count = 0, w_count = 0;
vimal singh59e9c5a2009-07-13 16:26:24 +0530271 int i = 0, ret = 0;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000272 u16 *p;
vimal singh59e9c5a2009-07-13 16:26:24 +0530273
274 /* take care of subpage writes */
275 if (len % 2 != 0) {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000276 writeb(*buf, info->nand.IO_ADDR_W);
vimal singh59e9c5a2009-07-13 16:26:24 +0530277 p = (u16 *)(buf + 1);
278 len--;
279 }
280
281 /* configure and start prefetch transfer */
282 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1);
283 if (ret) {
284 /* PFPW engine is busy, use cpu copy method */
285 if (info->nand.options & NAND_BUSWIDTH_16)
286 omap_write_buf16(mtd, buf, len);
287 else
288 omap_write_buf8(mtd, buf, len);
289 } else {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000290 p = (u16 *) buf;
291 while (len) {
292 w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
293 w_count = w_count >> 1;
vimal singh59e9c5a2009-07-13 16:26:24 +0530294 for (i = 0; (i < w_count) && len; i++, len -= 2)
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000295 iowrite16(*p++, info->nand.IO_ADDR_W);
vimal singh59e9c5a2009-07-13 16:26:24 +0530296 }
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000297 /* wait for data to flushed-out before reset the prefetch */
298 do {
299 pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT);
300 } while (pref_count);
vimal singh59e9c5a2009-07-13 16:26:24 +0530301 /* disable and stop the PFPW engine */
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000302 gpmc_prefetch_reset(info->gpmc_cs);
vimal singh59e9c5a2009-07-13 16:26:24 +0530303 }
304}
305
vimal singhdfe32892009-07-13 16:29:16 +0530306/*
307 * omap_nand_dma_cb: callback on the completion of dma transfer
308 * @lch: logical channel
309 * @ch_satuts: channel status
310 * @data: pointer to completion data structure
311 */
312static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
313{
314 complete((struct completion *) data);
315}
316
317/*
318 * omap_nand_dma_transfer: configer and start dma transfer
319 * @mtd: MTD device structure
320 * @addr: virtual address in RAM of source/destination
321 * @len: number of data bytes to be transferred
322 * @is_write: flag for read/write operation
323 */
324static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
325 unsigned int len, int is_write)
326{
327 struct omap_nand_info *info = container_of(mtd,
328 struct omap_nand_info, mtd);
329 uint32_t prefetch_status = 0;
330 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
331 DMA_FROM_DEVICE;
332 dma_addr_t dma_addr;
333 int ret;
334
335 /* The fifo depth is 64 bytes. We have a sync at each frame and frame
336 * length is 64 bytes.
337 */
338 int buf_len = len >> 6;
339
340 if (addr >= high_memory) {
341 struct page *p1;
342
343 if (((size_t)addr & PAGE_MASK) !=
344 ((size_t)(addr + len - 1) & PAGE_MASK))
345 goto out_copy;
346 p1 = vmalloc_to_page(addr);
347 if (!p1)
348 goto out_copy;
349 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
350 }
351
352 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
353 if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
354 dev_err(&info->pdev->dev,
355 "Couldn't DMA map a %d byte buffer\n", len);
356 goto out_copy;
357 }
358
359 if (is_write) {
360 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
361 info->phys_base, 0, 0);
362 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
363 dma_addr, 0, 0);
364 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
365 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
366 OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
367 } else {
368 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
369 info->phys_base, 0, 0);
370 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
371 dma_addr, 0, 0);
372 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
373 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
374 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
375 }
376 /* configure and start prefetch transfer */
377 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write);
378 if (ret)
379 /* PFPW engine is busy, use cpu copy methode */
380 goto out_copy;
381
382 init_completion(&info->comp);
383
384 omap_start_dma(info->dma_ch);
385
386 /* setup and start DMA using dma_addr */
387 wait_for_completion(&info->comp);
388
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000389 do {
390 prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
391 } while (prefetch_status);
vimal singhdfe32892009-07-13 16:29:16 +0530392 /* disable and stop the PFPW engine */
Daniel J Bluemanf12f6622010-09-29 21:01:55 +0100393 gpmc_prefetch_reset(info->gpmc_cs);
vimal singhdfe32892009-07-13 16:29:16 +0530394
395 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
396 return 0;
397
398out_copy:
399 if (info->nand.options & NAND_BUSWIDTH_16)
400 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
401 : omap_write_buf16(mtd, (u_char *) addr, len);
402 else
403 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
404 : omap_write_buf8(mtd, (u_char *) addr, len);
405 return 0;
406}
vimal singhdfe32892009-07-13 16:29:16 +0530407
408/**
409 * omap_read_buf_dma_pref - read data from NAND controller into buffer
410 * @mtd: MTD device structure
411 * @buf: buffer to store date
412 * @len: number of bytes to read
413 */
414static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
415{
416 if (len <= mtd->oobsize)
417 omap_read_buf_pref(mtd, buf, len);
418 else
419 /* start transfer in DMA mode */
420 omap_nand_dma_transfer(mtd, buf, len, 0x0);
421}
422
423/**
424 * omap_write_buf_dma_pref - write buffer to NAND controller
425 * @mtd: MTD device structure
426 * @buf: data buffer
427 * @len: number of bytes to write
428 */
429static void omap_write_buf_dma_pref(struct mtd_info *mtd,
430 const u_char *buf, int len)
431{
432 if (len <= mtd->oobsize)
433 omap_write_buf_pref(mtd, buf, len);
434 else
435 /* start transfer in DMA mode */
Vimal Singhbdaefc42010-01-05 12:49:24 +0530436 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
vimal singhdfe32892009-07-13 16:29:16 +0530437}
438
Vimal Singh67ce04b2009-05-12 13:47:03 -0700439/**
440 * omap_verify_buf - Verify chip data against buffer
441 * @mtd: MTD device structure
442 * @buf: buffer containing the data to compare
443 * @len: number of bytes to compare
444 */
445static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
446{
447 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
448 mtd);
449 u16 *p = (u16 *) buf;
450
451 len >>= 1;
452 while (len--) {
453 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
454 return -EFAULT;
455 }
456
457 return 0;
458}
459
460#ifdef CONFIG_MTD_NAND_OMAP_HWECC
Vimal Singh67ce04b2009-05-12 13:47:03 -0700461
462/**
463 * gen_true_ecc - This function will generate true ECC value
464 * @ecc_buf: buffer to store ecc code
465 *
466 * This generated true ECC value can be used when correcting
467 * data read from NAND flash memory core
468 */
469static void gen_true_ecc(u8 *ecc_buf)
470{
471 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
472 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
473
474 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
475 P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
476 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
477 P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
478 ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
479 P1e(tmp) | P2048o(tmp) | P2048e(tmp));
480}
481
482/**
483 * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
484 * @ecc_data1: ecc code from nand spare area
485 * @ecc_data2: ecc code from hardware register obtained from hardware ecc
486 * @page_data: page data
487 *
488 * This function compares two ECC's and indicates if there is an error.
489 * If the error can be corrected it will be corrected to the buffer.
490 */
491static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
492 u8 *ecc_data2, /* read from register */
493 u8 *page_data)
494{
495 uint i;
496 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
497 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
498 u8 ecc_bit[24];
499 u8 ecc_sum = 0;
500 u8 find_bit = 0;
501 uint find_byte = 0;
502 int isEccFF;
503
504 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
505
506 gen_true_ecc(ecc_data1);
507 gen_true_ecc(ecc_data2);
508
509 for (i = 0; i <= 2; i++) {
510 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
511 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
512 }
513
514 for (i = 0; i < 8; i++) {
515 tmp0_bit[i] = *ecc_data1 % 2;
516 *ecc_data1 = *ecc_data1 / 2;
517 }
518
519 for (i = 0; i < 8; i++) {
520 tmp1_bit[i] = *(ecc_data1 + 1) % 2;
521 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
522 }
523
524 for (i = 0; i < 8; i++) {
525 tmp2_bit[i] = *(ecc_data1 + 2) % 2;
526 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
527 }
528
529 for (i = 0; i < 8; i++) {
530 comp0_bit[i] = *ecc_data2 % 2;
531 *ecc_data2 = *ecc_data2 / 2;
532 }
533
534 for (i = 0; i < 8; i++) {
535 comp1_bit[i] = *(ecc_data2 + 1) % 2;
536 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
537 }
538
539 for (i = 0; i < 8; i++) {
540 comp2_bit[i] = *(ecc_data2 + 2) % 2;
541 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
542 }
543
544 for (i = 0; i < 6; i++)
545 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
546
547 for (i = 0; i < 8; i++)
548 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
549
550 for (i = 0; i < 8; i++)
551 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
552
553 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
554 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
555
556 for (i = 0; i < 24; i++)
557 ecc_sum += ecc_bit[i];
558
559 switch (ecc_sum) {
560 case 0:
561 /* Not reached because this function is not called if
562 * ECC values are equal
563 */
564 return 0;
565
566 case 1:
567 /* Uncorrectable error */
568 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
569 return -1;
570
571 case 11:
572 /* UN-Correctable error */
573 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
574 return -1;
575
576 case 12:
577 /* Correctable error */
578 find_byte = (ecc_bit[23] << 8) +
579 (ecc_bit[21] << 7) +
580 (ecc_bit[19] << 6) +
581 (ecc_bit[17] << 5) +
582 (ecc_bit[15] << 4) +
583 (ecc_bit[13] << 3) +
584 (ecc_bit[11] << 2) +
585 (ecc_bit[9] << 1) +
586 ecc_bit[7];
587
588 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
589
590 DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
591 "offset: %d, bit: %d\n", find_byte, find_bit);
592
593 page_data[find_byte] ^= (1 << find_bit);
594
595 return 0;
596 default:
597 if (isEccFF) {
598 if (ecc_data2[0] == 0 &&
599 ecc_data2[1] == 0 &&
600 ecc_data2[2] == 0)
601 return 0;
602 }
603 DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
604 return -1;
605 }
606}
607
608/**
609 * omap_correct_data - Compares the ECC read with HW generated ECC
610 * @mtd: MTD device structure
611 * @dat: page data
612 * @read_ecc: ecc read from nand flash
613 * @calc_ecc: ecc read from HW ECC registers
614 *
615 * Compares the ecc read from nand spare area with ECC registers values
616 * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection
617 * and correction.
618 */
619static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
620 u_char *read_ecc, u_char *calc_ecc)
621{
622 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
623 mtd);
624 int blockCnt = 0, i = 0, ret = 0;
625
626 /* Ex NAND_ECC_HW12_2048 */
627 if ((info->nand.ecc.mode == NAND_ECC_HW) &&
628 (info->nand.ecc.size == 2048))
629 blockCnt = 4;
630 else
631 blockCnt = 1;
632
633 for (i = 0; i < blockCnt; i++) {
634 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
635 ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
636 if (ret < 0)
637 return ret;
638 }
639 read_ecc += 3;
640 calc_ecc += 3;
641 dat += 512;
642 }
643 return 0;
644}
645
646/**
647 * omap_calcuate_ecc - Generate non-inverted ECC bytes.
648 * @mtd: MTD device structure
649 * @dat: The pointer to data on which ecc is computed
650 * @ecc_code: The ecc_code buffer
651 *
652 * Using noninverted ECC can be considered ugly since writing a blank
653 * page ie. padding will clear the ECC bytes. This is no problem as long
654 * nobody is trying to write data on the seemingly unused page. Reading
655 * an erased page will produce an ECC mismatch between generated and read
656 * ECC bytes that has to be dealt with separately.
657 */
658static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
659 u_char *ecc_code)
660{
661 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
662 mtd);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000663 return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700664}
665
666/**
667 * omap_enable_hwecc - This function enables the hardware ecc functionality
668 * @mtd: MTD device structure
669 * @mode: Read/Write mode
670 */
671static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
672{
673 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
674 mtd);
675 struct nand_chip *chip = mtd->priv;
676 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700677
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000678 gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700679}
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000680
Vimal Singh67ce04b2009-05-12 13:47:03 -0700681#endif
682
683/**
684 * omap_wait - wait until the command is done
685 * @mtd: MTD device structure
686 * @chip: NAND Chip structure
687 *
688 * Wait function is called during Program and erase operations and
689 * the way it is called from MTD layer, we should wait till the NAND
690 * chip is ready after the programming/erase operation has completed.
691 *
692 * Erase can take up to 400ms and program up to 20ms according to
693 * general NAND and SmartMedia specs
694 */
695static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
696{
697 struct nand_chip *this = mtd->priv;
698 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
699 mtd);
700 unsigned long timeo = jiffies;
vimal singhc276aca2009-06-27 11:07:06 +0530701 int status = NAND_STATUS_FAIL, state = this->state;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700702
703 if (state == FL_ERASING)
704 timeo += (HZ * 400) / 1000;
705 else
706 timeo += (HZ * 20) / 1000;
707
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000708 gpmc_nand_write(info->gpmc_cs,
709 GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
Vimal Singh67ce04b2009-05-12 13:47:03 -0700710 while (time_before(jiffies, timeo)) {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000711 status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
vimal singhc276aca2009-06-27 11:07:06 +0530712 if (status & NAND_STATUS_READY)
Vimal Singh67ce04b2009-05-12 13:47:03 -0700713 break;
vimal singhc276aca2009-06-27 11:07:06 +0530714 cond_resched();
Vimal Singh67ce04b2009-05-12 13:47:03 -0700715 }
716 return status;
717}
718
719/**
720 * omap_dev_ready - calls the platform specific dev_ready function
721 * @mtd: MTD device structure
722 */
723static int omap_dev_ready(struct mtd_info *mtd)
724{
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000725 unsigned int val = 0;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700726 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
727 mtd);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700728
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000729 val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700730 if ((val & 0x100) == 0x100) {
731 /* Clear IRQ Interrupt */
732 val |= 0x100;
733 val &= ~(0x0);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000734 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700735 } else {
736 unsigned int cnt = 0;
737 while (cnt++ < 0x1FF) {
738 if ((val & 0x100) == 0x100)
739 return 0;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000740 val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700741 }
742 }
743
744 return 1;
745}
746
747static int __devinit omap_nand_probe(struct platform_device *pdev)
748{
749 struct omap_nand_info *info;
750 struct omap_nand_platform_data *pdata;
751 int err;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700752
753 pdata = pdev->dev.platform_data;
754 if (pdata == NULL) {
755 dev_err(&pdev->dev, "platform data missing\n");
756 return -ENODEV;
757 }
758
759 info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
760 if (!info)
761 return -ENOMEM;
762
763 platform_set_drvdata(pdev, info);
764
765 spin_lock_init(&info->controller.lock);
766 init_waitqueue_head(&info->controller.wq);
767
768 info->pdev = pdev;
769
770 info->gpmc_cs = pdata->cs;
Vimal Singh2f70a1e2010-02-15 10:03:33 -0800771 info->phys_base = pdata->phys_base;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700772
773 info->mtd.priv = &info->nand;
774 info->mtd.name = dev_name(&pdev->dev);
775 info->mtd.owner = THIS_MODULE;
776
Sukumar Ghoraid5ce2b62011-01-28 15:42:03 +0530777 info->nand.options = pdata->devsize;
Vimal Singh2f70a1e2010-02-15 10:03:33 -0800778 info->nand.options |= NAND_SKIP_BBTSCAN;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700779
780 /* NAND write protect off */
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000781 gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700782
783 if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
784 pdev->dev.driver->name)) {
785 err = -EBUSY;
Vimal Singh2f70a1e2010-02-15 10:03:33 -0800786 goto out_free_info;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700787 }
788
789 info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
790 if (!info->nand.IO_ADDR_R) {
791 err = -ENOMEM;
792 goto out_release_mem_region;
793 }
vimal singh59e9c5a2009-07-13 16:26:24 +0530794
Vimal Singh67ce04b2009-05-12 13:47:03 -0700795 info->nand.controller = &info->controller;
796
797 info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
798 info->nand.cmd_ctrl = omap_hwcontrol;
799
Vimal Singh67ce04b2009-05-12 13:47:03 -0700800 /*
801 * If RDY/BSY line is connected to OMAP then use the omap ready
802 * funcrtion and the generic nand_wait function which reads the status
803 * register after monitoring the RDY/BSY line.Otherwise use a standard
804 * chip delay which is slightly more than tR (AC Timing) of the NAND
805 * device and read status register until you get a failure or success
806 */
807 if (pdata->dev_ready) {
808 info->nand.dev_ready = omap_dev_ready;
809 info->nand.chip_delay = 0;
810 } else {
811 info->nand.waitfunc = omap_wait;
812 info->nand.chip_delay = 50;
813 }
814
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +0530815 switch (pdata->xfer_type) {
816 case NAND_OMAP_PREFETCH_POLLED:
vimal singh59e9c5a2009-07-13 16:26:24 +0530817 info->nand.read_buf = omap_read_buf_pref;
818 info->nand.write_buf = omap_write_buf_pref;
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +0530819 break;
vimal singhdfe32892009-07-13 16:29:16 +0530820
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +0530821 case NAND_OMAP_POLLED:
vimal singh59e9c5a2009-07-13 16:26:24 +0530822 if (info->nand.options & NAND_BUSWIDTH_16) {
823 info->nand.read_buf = omap_read_buf16;
824 info->nand.write_buf = omap_write_buf16;
825 } else {
826 info->nand.read_buf = omap_read_buf8;
827 info->nand.write_buf = omap_write_buf8;
828 }
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +0530829 break;
830
831 case NAND_OMAP_PREFETCH_DMA:
832 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
833 omap_nand_dma_cb, &info->comp, &info->dma_ch);
834 if (err < 0) {
835 info->dma_ch = -1;
836 dev_err(&pdev->dev, "DMA request failed!\n");
837 goto out_release_mem_region;
838 } else {
839 omap_set_dma_dest_burst_mode(info->dma_ch,
840 OMAP_DMA_DATA_BURST_16);
841 omap_set_dma_src_burst_mode(info->dma_ch,
842 OMAP_DMA_DATA_BURST_16);
843
844 info->nand.read_buf = omap_read_buf_dma_pref;
845 info->nand.write_buf = omap_write_buf_dma_pref;
846 }
847 break;
848
849 default:
850 dev_err(&pdev->dev,
851 "xfer_type(%d) not supported!\n", pdata->xfer_type);
852 err = -EINVAL;
853 goto out_release_mem_region;
vimal singh59e9c5a2009-07-13 16:26:24 +0530854 }
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +0530855
vimal singh59e9c5a2009-07-13 16:26:24 +0530856 info->nand.verify_buf = omap_verify_buf;
857
Vimal Singh67ce04b2009-05-12 13:47:03 -0700858#ifdef CONFIG_MTD_NAND_OMAP_HWECC
859 info->nand.ecc.bytes = 3;
860 info->nand.ecc.size = 512;
861 info->nand.ecc.calculate = omap_calculate_ecc;
862 info->nand.ecc.hwctl = omap_enable_hwecc;
863 info->nand.ecc.correct = omap_correct_data;
864 info->nand.ecc.mode = NAND_ECC_HW;
865
Vimal Singh67ce04b2009-05-12 13:47:03 -0700866#else
867 info->nand.ecc.mode = NAND_ECC_SOFT;
868#endif
869
870 /* DIP switches on some boards change between 8 and 16 bit
871 * bus widths for flash. Try the other width if the first try fails.
872 */
873 if (nand_scan(&info->mtd, 1)) {
874 info->nand.options ^= NAND_BUSWIDTH_16;
875 if (nand_scan(&info->mtd, 1)) {
876 err = -ENXIO;
877 goto out_release_mem_region;
878 }
879 }
880
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +0530881
Vimal Singh67ce04b2009-05-12 13:47:03 -0700882#ifdef CONFIG_MTD_PARTITIONS
883 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
884 if (err > 0)
885 add_mtd_partitions(&info->mtd, info->parts, err);
886 else if (pdata->parts)
887 add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
888 else
889#endif
890 add_mtd_device(&info->mtd);
891
892 platform_set_drvdata(pdev, &info->mtd);
893
894 return 0;
895
896out_release_mem_region:
897 release_mem_region(info->phys_base, NAND_IO_SIZE);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700898out_free_info:
899 kfree(info);
900
901 return err;
902}
903
904static int omap_nand_remove(struct platform_device *pdev)
905{
906 struct mtd_info *mtd = platform_get_drvdata(pdev);
Vimal Singhf35b6ed2010-01-05 16:01:08 +0530907 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
908 mtd);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700909
910 platform_set_drvdata(pdev, NULL);
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +0530911 if (info->dma_ch != -1)
vimal singhdfe32892009-07-13 16:29:16 +0530912 omap_free_dma(info->dma_ch);
913
Vimal Singh67ce04b2009-05-12 13:47:03 -0700914 /* Release NAND device, its internal structures and partitions */
915 nand_release(&info->mtd);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000916 iounmap(info->nand.IO_ADDR_R);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700917 kfree(&info->mtd);
918 return 0;
919}
920
921static struct platform_driver omap_nand_driver = {
922 .probe = omap_nand_probe,
923 .remove = omap_nand_remove,
924 .driver = {
925 .name = DRIVER_NAME,
926 .owner = THIS_MODULE,
927 },
928};
929
930static int __init omap_nand_init(void)
931{
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +0530932 pr_info("%s driver initializing\n", DRIVER_NAME);
vimal singhdfe32892009-07-13 16:29:16 +0530933
Vimal Singh67ce04b2009-05-12 13:47:03 -0700934 return platform_driver_register(&omap_nand_driver);
935}
936
937static void __exit omap_nand_exit(void)
938{
939 platform_driver_unregister(&omap_nand_driver);
940}
941
942module_init(omap_nand_init);
943module_exit(omap_nand_exit);
944
945MODULE_ALIAS(DRIVER_NAME);
946MODULE_LICENSE("GPL");
947MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");