blob: d7a4e2550b13260ca591b8ccef12b4ebb9b99966 [file] [log] [blame]
Vimal Singh67ce04b2009-05-12 13:47:03 -07001/*
2 * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
3 * Copyright © 2004 Micron Technology Inc.
4 * Copyright © 2004 David Brownell
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h>
13#include <linux/delay.h>
vimal singhc276aca2009-06-27 11:07:06 +053014#include <linux/jiffies.h>
15#include <linux/sched.h>
Vimal Singh67ce04b2009-05-12 13:47:03 -070016#include <linux/mtd/mtd.h>
17#include <linux/mtd/nand.h>
18#include <linux/mtd/partitions.h>
19#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Vimal Singh67ce04b2009-05-12 13:47:03 -070021
Tony Lindgrence491cf2009-10-20 09:40:47 -070022#include <plat/dma.h>
23#include <plat/gpmc.h>
24#include <plat/nand.h>
Vimal Singh67ce04b2009-05-12 13:47:03 -070025
Vimal Singh67ce04b2009-05-12 13:47:03 -070026#define DRIVER_NAME "omap2-nand"
27
Vimal Singh67ce04b2009-05-12 13:47:03 -070028#define NAND_Ecc_P1e (1 << 0)
29#define NAND_Ecc_P2e (1 << 1)
30#define NAND_Ecc_P4e (1 << 2)
31#define NAND_Ecc_P8e (1 << 3)
32#define NAND_Ecc_P16e (1 << 4)
33#define NAND_Ecc_P32e (1 << 5)
34#define NAND_Ecc_P64e (1 << 6)
35#define NAND_Ecc_P128e (1 << 7)
36#define NAND_Ecc_P256e (1 << 8)
37#define NAND_Ecc_P512e (1 << 9)
38#define NAND_Ecc_P1024e (1 << 10)
39#define NAND_Ecc_P2048e (1 << 11)
40
41#define NAND_Ecc_P1o (1 << 16)
42#define NAND_Ecc_P2o (1 << 17)
43#define NAND_Ecc_P4o (1 << 18)
44#define NAND_Ecc_P8o (1 << 19)
45#define NAND_Ecc_P16o (1 << 20)
46#define NAND_Ecc_P32o (1 << 21)
47#define NAND_Ecc_P64o (1 << 22)
48#define NAND_Ecc_P128o (1 << 23)
49#define NAND_Ecc_P256o (1 << 24)
50#define NAND_Ecc_P512o (1 << 25)
51#define NAND_Ecc_P1024o (1 << 26)
52#define NAND_Ecc_P2048o (1 << 27)
53
54#define TF(value) (value ? 1 : 0)
55
56#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
57#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
58#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
59#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
60#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
61#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
62#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
63#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
64
65#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
66#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
67#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
68#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
69#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
70#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
71#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
72#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
73
74#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
75#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
76#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
77#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
78#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
79#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
80#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
81#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
82
83#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
84#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
85#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
86#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
87#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
88#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
89#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
90#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
91
92#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
93#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
94
95#ifdef CONFIG_MTD_PARTITIONS
96static const char *part_probes[] = { "cmdlinepart", NULL };
97#endif
98
vimal singh59e9c5a2009-07-13 16:26:24 +053099#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH
100static int use_prefetch = 1;
101
102/* "modprobe ... use_prefetch=0" etc */
103module_param(use_prefetch, bool, 0);
104MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH");
vimal singhdfe32892009-07-13 16:29:16 +0530105
106#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
107static int use_dma = 1;
108
109/* "modprobe ... use_dma=0" etc */
110module_param(use_dma, bool, 0);
111MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
112#else
G, Manjunath Kondaiah733daa52010-10-06 03:26:56 +0530113static const int use_dma;
vimal singhdfe32892009-07-13 16:29:16 +0530114#endif
vimal singh59e9c5a2009-07-13 16:26:24 +0530115#else
116const int use_prefetch;
G, Manjunath Kondaiah733daa52010-10-06 03:26:56 +0530117static const int use_dma;
vimal singh59e9c5a2009-07-13 16:26:24 +0530118#endif
119
Vimal Singh67ce04b2009-05-12 13:47:03 -0700120struct omap_nand_info {
121 struct nand_hw_control controller;
122 struct omap_nand_platform_data *pdata;
123 struct mtd_info mtd;
124 struct mtd_partition *parts;
125 struct nand_chip nand;
126 struct platform_device *pdev;
127
128 int gpmc_cs;
129 unsigned long phys_base;
vimal singhdfe32892009-07-13 16:29:16 +0530130 struct completion comp;
131 int dma_ch;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700132};
133
134/**
Vimal Singh67ce04b2009-05-12 13:47:03 -0700135 * omap_hwcontrol - hardware specific access to control-lines
136 * @mtd: MTD device structure
137 * @cmd: command to device
138 * @ctrl:
139 * NAND_NCE: bit 0 -> don't care
140 * NAND_CLE: bit 1 -> Command Latch
141 * NAND_ALE: bit 2 -> Address Latch
142 *
143 * NOTE: boards may use different bits for these!!
144 */
145static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
146{
147 struct omap_nand_info *info = container_of(mtd,
148 struct omap_nand_info, mtd);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700149
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000150 if (cmd != NAND_CMD_NONE) {
151 if (ctrl & NAND_CLE)
152 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700153
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000154 else if (ctrl & NAND_ALE)
155 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
156
157 else /* NAND_NCE */
158 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700159 }
Vimal Singh67ce04b2009-05-12 13:47:03 -0700160}
161
162/**
vimal singh59e9c5a2009-07-13 16:26:24 +0530163 * omap_read_buf8 - read data from NAND controller into buffer
164 * @mtd: MTD device structure
165 * @buf: buffer to store date
166 * @len: number of bytes to read
167 */
168static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
169{
170 struct nand_chip *nand = mtd->priv;
171
172 ioread8_rep(nand->IO_ADDR_R, buf, len);
173}
174
175/**
176 * omap_write_buf8 - write buffer to NAND controller
177 * @mtd: MTD device structure
178 * @buf: data buffer
179 * @len: number of bytes to write
180 */
181static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
182{
183 struct omap_nand_info *info = container_of(mtd,
184 struct omap_nand_info, mtd);
185 u_char *p = (u_char *)buf;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000186 u32 status = 0;
vimal singh59e9c5a2009-07-13 16:26:24 +0530187
188 while (len--) {
189 iowrite8(*p++, info->nand.IO_ADDR_W);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000190 /* wait until buffer is available for write */
191 do {
192 status = gpmc_read_status(GPMC_STATUS_BUFFER);
193 } while (!status);
vimal singh59e9c5a2009-07-13 16:26:24 +0530194 }
195}
196
197/**
Vimal Singh67ce04b2009-05-12 13:47:03 -0700198 * omap_read_buf16 - read data from NAND controller into buffer
199 * @mtd: MTD device structure
200 * @buf: buffer to store date
201 * @len: number of bytes to read
202 */
203static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
204{
205 struct nand_chip *nand = mtd->priv;
206
vimal singh59e9c5a2009-07-13 16:26:24 +0530207 ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700208}
209
210/**
211 * omap_write_buf16 - write buffer to NAND controller
212 * @mtd: MTD device structure
213 * @buf: data buffer
214 * @len: number of bytes to write
215 */
216static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
217{
218 struct omap_nand_info *info = container_of(mtd,
219 struct omap_nand_info, mtd);
220 u16 *p = (u16 *) buf;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000221 u32 status = 0;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700222 /* FIXME try bursts of writesw() or DMA ... */
223 len >>= 1;
224
225 while (len--) {
vimal singh59e9c5a2009-07-13 16:26:24 +0530226 iowrite16(*p++, info->nand.IO_ADDR_W);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000227 /* wait until buffer is available for write */
228 do {
229 status = gpmc_read_status(GPMC_STATUS_BUFFER);
230 } while (!status);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700231 }
232}
vimal singh59e9c5a2009-07-13 16:26:24 +0530233
234/**
235 * omap_read_buf_pref - read data from NAND controller into buffer
236 * @mtd: MTD device structure
237 * @buf: buffer to store date
238 * @len: number of bytes to read
239 */
240static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
241{
242 struct omap_nand_info *info = container_of(mtd,
243 struct omap_nand_info, mtd);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000244 uint32_t r_count = 0;
vimal singh59e9c5a2009-07-13 16:26:24 +0530245 int ret = 0;
246 u32 *p = (u32 *)buf;
247
248 /* take care of subpage reads */
Vimal Singhc3341d02010-01-07 12:16:26 +0530249 if (len % 4) {
250 if (info->nand.options & NAND_BUSWIDTH_16)
251 omap_read_buf16(mtd, buf, len % 4);
252 else
253 omap_read_buf8(mtd, buf, len % 4);
254 p = (u32 *) (buf + len % 4);
255 len -= len % 4;
vimal singh59e9c5a2009-07-13 16:26:24 +0530256 }
vimal singh59e9c5a2009-07-13 16:26:24 +0530257
258 /* configure and start prefetch transfer */
259 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
260 if (ret) {
261 /* PFPW engine is busy, use cpu copy method */
262 if (info->nand.options & NAND_BUSWIDTH_16)
263 omap_read_buf16(mtd, buf, len);
264 else
265 omap_read_buf8(mtd, buf, len);
266 } else {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000267 p = (u32 *) buf;
vimal singh59e9c5a2009-07-13 16:26:24 +0530268 do {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000269 r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
270 r_count = r_count >> 2;
271 ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
vimal singh59e9c5a2009-07-13 16:26:24 +0530272 p += r_count;
273 len -= r_count << 2;
274 } while (len);
vimal singh59e9c5a2009-07-13 16:26:24 +0530275 /* disable and stop the PFPW engine */
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000276 gpmc_prefetch_reset(info->gpmc_cs);
vimal singh59e9c5a2009-07-13 16:26:24 +0530277 }
278}
279
280/**
281 * omap_write_buf_pref - write buffer to NAND controller
282 * @mtd: MTD device structure
283 * @buf: data buffer
284 * @len: number of bytes to write
285 */
286static void omap_write_buf_pref(struct mtd_info *mtd,
287 const u_char *buf, int len)
288{
289 struct omap_nand_info *info = container_of(mtd,
290 struct omap_nand_info, mtd);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000291 uint32_t pref_count = 0, w_count = 0;
vimal singh59e9c5a2009-07-13 16:26:24 +0530292 int i = 0, ret = 0;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000293 u16 *p;
vimal singh59e9c5a2009-07-13 16:26:24 +0530294
295 /* take care of subpage writes */
296 if (len % 2 != 0) {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000297 writeb(*buf, info->nand.IO_ADDR_W);
vimal singh59e9c5a2009-07-13 16:26:24 +0530298 p = (u16 *)(buf + 1);
299 len--;
300 }
301
302 /* configure and start prefetch transfer */
303 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1);
304 if (ret) {
305 /* PFPW engine is busy, use cpu copy method */
306 if (info->nand.options & NAND_BUSWIDTH_16)
307 omap_write_buf16(mtd, buf, len);
308 else
309 omap_write_buf8(mtd, buf, len);
310 } else {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000311 p = (u16 *) buf;
312 while (len) {
313 w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
314 w_count = w_count >> 1;
vimal singh59e9c5a2009-07-13 16:26:24 +0530315 for (i = 0; (i < w_count) && len; i++, len -= 2)
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000316 iowrite16(*p++, info->nand.IO_ADDR_W);
vimal singh59e9c5a2009-07-13 16:26:24 +0530317 }
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000318 /* wait for data to flushed-out before reset the prefetch */
319 do {
320 pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT);
321 } while (pref_count);
vimal singh59e9c5a2009-07-13 16:26:24 +0530322 /* disable and stop the PFPW engine */
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000323 gpmc_prefetch_reset(info->gpmc_cs);
vimal singh59e9c5a2009-07-13 16:26:24 +0530324 }
325}
326
vimal singhdfe32892009-07-13 16:29:16 +0530327#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
328/*
329 * omap_nand_dma_cb: callback on the completion of dma transfer
330 * @lch: logical channel
331 * @ch_satuts: channel status
332 * @data: pointer to completion data structure
333 */
334static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
335{
336 complete((struct completion *) data);
337}
338
339/*
340 * omap_nand_dma_transfer: configer and start dma transfer
341 * @mtd: MTD device structure
342 * @addr: virtual address in RAM of source/destination
343 * @len: number of data bytes to be transferred
344 * @is_write: flag for read/write operation
345 */
346static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
347 unsigned int len, int is_write)
348{
349 struct omap_nand_info *info = container_of(mtd,
350 struct omap_nand_info, mtd);
351 uint32_t prefetch_status = 0;
352 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
353 DMA_FROM_DEVICE;
354 dma_addr_t dma_addr;
355 int ret;
356
357 /* The fifo depth is 64 bytes. We have a sync at each frame and frame
358 * length is 64 bytes.
359 */
360 int buf_len = len >> 6;
361
362 if (addr >= high_memory) {
363 struct page *p1;
364
365 if (((size_t)addr & PAGE_MASK) !=
366 ((size_t)(addr + len - 1) & PAGE_MASK))
367 goto out_copy;
368 p1 = vmalloc_to_page(addr);
369 if (!p1)
370 goto out_copy;
371 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
372 }
373
374 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
375 if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
376 dev_err(&info->pdev->dev,
377 "Couldn't DMA map a %d byte buffer\n", len);
378 goto out_copy;
379 }
380
381 if (is_write) {
382 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
383 info->phys_base, 0, 0);
384 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
385 dma_addr, 0, 0);
386 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
387 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
388 OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
389 } else {
390 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
391 info->phys_base, 0, 0);
392 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
393 dma_addr, 0, 0);
394 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
395 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
396 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
397 }
398 /* configure and start prefetch transfer */
399 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write);
400 if (ret)
401 /* PFPW engine is busy, use cpu copy methode */
402 goto out_copy;
403
404 init_completion(&info->comp);
405
406 omap_start_dma(info->dma_ch);
407
408 /* setup and start DMA using dma_addr */
409 wait_for_completion(&info->comp);
410
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000411 do {
412 prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
413 } while (prefetch_status);
vimal singhdfe32892009-07-13 16:29:16 +0530414 /* disable and stop the PFPW engine */
Daniel J Bluemanf12f6622010-09-29 21:01:55 +0100415 gpmc_prefetch_reset(info->gpmc_cs);
vimal singhdfe32892009-07-13 16:29:16 +0530416
417 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
418 return 0;
419
420out_copy:
421 if (info->nand.options & NAND_BUSWIDTH_16)
422 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
423 : omap_write_buf16(mtd, (u_char *) addr, len);
424 else
425 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
426 : omap_write_buf8(mtd, (u_char *) addr, len);
427 return 0;
428}
429#else
430static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {}
431static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
432 unsigned int len, int is_write)
433{
434 return 0;
435}
436#endif
437
438/**
439 * omap_read_buf_dma_pref - read data from NAND controller into buffer
440 * @mtd: MTD device structure
441 * @buf: buffer to store date
442 * @len: number of bytes to read
443 */
444static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
445{
446 if (len <= mtd->oobsize)
447 omap_read_buf_pref(mtd, buf, len);
448 else
449 /* start transfer in DMA mode */
450 omap_nand_dma_transfer(mtd, buf, len, 0x0);
451}
452
453/**
454 * omap_write_buf_dma_pref - write buffer to NAND controller
455 * @mtd: MTD device structure
456 * @buf: data buffer
457 * @len: number of bytes to write
458 */
459static void omap_write_buf_dma_pref(struct mtd_info *mtd,
460 const u_char *buf, int len)
461{
462 if (len <= mtd->oobsize)
463 omap_write_buf_pref(mtd, buf, len);
464 else
465 /* start transfer in DMA mode */
Vimal Singhbdaefc42010-01-05 12:49:24 +0530466 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
vimal singhdfe32892009-07-13 16:29:16 +0530467}
468
Vimal Singh67ce04b2009-05-12 13:47:03 -0700469/**
470 * omap_verify_buf - Verify chip data against buffer
471 * @mtd: MTD device structure
472 * @buf: buffer containing the data to compare
473 * @len: number of bytes to compare
474 */
475static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
476{
477 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
478 mtd);
479 u16 *p = (u16 *) buf;
480
481 len >>= 1;
482 while (len--) {
483 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
484 return -EFAULT;
485 }
486
487 return 0;
488}
489
490#ifdef CONFIG_MTD_NAND_OMAP_HWECC
Vimal Singh67ce04b2009-05-12 13:47:03 -0700491
492/**
493 * gen_true_ecc - This function will generate true ECC value
494 * @ecc_buf: buffer to store ecc code
495 *
496 * This generated true ECC value can be used when correcting
497 * data read from NAND flash memory core
498 */
499static void gen_true_ecc(u8 *ecc_buf)
500{
501 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
502 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
503
504 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
505 P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
506 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
507 P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
508 ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
509 P1e(tmp) | P2048o(tmp) | P2048e(tmp));
510}
511
512/**
513 * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
514 * @ecc_data1: ecc code from nand spare area
515 * @ecc_data2: ecc code from hardware register obtained from hardware ecc
516 * @page_data: page data
517 *
518 * This function compares two ECC's and indicates if there is an error.
519 * If the error can be corrected it will be corrected to the buffer.
John Ogness74f1b722011-02-28 13:12:46 +0100520 * If there is no error, %0 is returned. If there is an error but it
521 * was corrected, %1 is returned. Otherwise, %-1 is returned.
Vimal Singh67ce04b2009-05-12 13:47:03 -0700522 */
523static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
524 u8 *ecc_data2, /* read from register */
525 u8 *page_data)
526{
527 uint i;
528 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
529 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
530 u8 ecc_bit[24];
531 u8 ecc_sum = 0;
532 u8 find_bit = 0;
533 uint find_byte = 0;
534 int isEccFF;
535
536 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
537
538 gen_true_ecc(ecc_data1);
539 gen_true_ecc(ecc_data2);
540
541 for (i = 0; i <= 2; i++) {
542 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
543 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
544 }
545
546 for (i = 0; i < 8; i++) {
547 tmp0_bit[i] = *ecc_data1 % 2;
548 *ecc_data1 = *ecc_data1 / 2;
549 }
550
551 for (i = 0; i < 8; i++) {
552 tmp1_bit[i] = *(ecc_data1 + 1) % 2;
553 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
554 }
555
556 for (i = 0; i < 8; i++) {
557 tmp2_bit[i] = *(ecc_data1 + 2) % 2;
558 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
559 }
560
561 for (i = 0; i < 8; i++) {
562 comp0_bit[i] = *ecc_data2 % 2;
563 *ecc_data2 = *ecc_data2 / 2;
564 }
565
566 for (i = 0; i < 8; i++) {
567 comp1_bit[i] = *(ecc_data2 + 1) % 2;
568 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
569 }
570
571 for (i = 0; i < 8; i++) {
572 comp2_bit[i] = *(ecc_data2 + 2) % 2;
573 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
574 }
575
576 for (i = 0; i < 6; i++)
577 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
578
579 for (i = 0; i < 8; i++)
580 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
581
582 for (i = 0; i < 8; i++)
583 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
584
585 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
586 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
587
588 for (i = 0; i < 24; i++)
589 ecc_sum += ecc_bit[i];
590
591 switch (ecc_sum) {
592 case 0:
593 /* Not reached because this function is not called if
594 * ECC values are equal
595 */
596 return 0;
597
598 case 1:
599 /* Uncorrectable error */
600 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
601 return -1;
602
603 case 11:
604 /* UN-Correctable error */
605 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
606 return -1;
607
608 case 12:
609 /* Correctable error */
610 find_byte = (ecc_bit[23] << 8) +
611 (ecc_bit[21] << 7) +
612 (ecc_bit[19] << 6) +
613 (ecc_bit[17] << 5) +
614 (ecc_bit[15] << 4) +
615 (ecc_bit[13] << 3) +
616 (ecc_bit[11] << 2) +
617 (ecc_bit[9] << 1) +
618 ecc_bit[7];
619
620 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
621
622 DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
623 "offset: %d, bit: %d\n", find_byte, find_bit);
624
625 page_data[find_byte] ^= (1 << find_bit);
626
John Ogness74f1b722011-02-28 13:12:46 +0100627 return 1;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700628 default:
629 if (isEccFF) {
630 if (ecc_data2[0] == 0 &&
631 ecc_data2[1] == 0 &&
632 ecc_data2[2] == 0)
633 return 0;
634 }
635 DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
636 return -1;
637 }
638}
639
640/**
641 * omap_correct_data - Compares the ECC read with HW generated ECC
642 * @mtd: MTD device structure
643 * @dat: page data
644 * @read_ecc: ecc read from nand flash
645 * @calc_ecc: ecc read from HW ECC registers
646 *
647 * Compares the ecc read from nand spare area with ECC registers values
John Ogness74f1b722011-02-28 13:12:46 +0100648 * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
649 * detection and correction. If there are no errors, %0 is returned. If
650 * there were errors and all of the errors were corrected, the number of
651 * corrected errors is returned. If uncorrectable errors exist, %-1 is
652 * returned.
Vimal Singh67ce04b2009-05-12 13:47:03 -0700653 */
654static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
655 u_char *read_ecc, u_char *calc_ecc)
656{
657 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
658 mtd);
659 int blockCnt = 0, i = 0, ret = 0;
John Ogness74f1b722011-02-28 13:12:46 +0100660 int stat = 0;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700661
662 /* Ex NAND_ECC_HW12_2048 */
663 if ((info->nand.ecc.mode == NAND_ECC_HW) &&
664 (info->nand.ecc.size == 2048))
665 blockCnt = 4;
666 else
667 blockCnt = 1;
668
669 for (i = 0; i < blockCnt; i++) {
670 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
671 ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
672 if (ret < 0)
673 return ret;
John Ogness74f1b722011-02-28 13:12:46 +0100674 /* keep track of the number of corrected errors */
675 stat += ret;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700676 }
677 read_ecc += 3;
678 calc_ecc += 3;
679 dat += 512;
680 }
John Ogness74f1b722011-02-28 13:12:46 +0100681 return stat;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700682}
683
684/**
685 * omap_calcuate_ecc - Generate non-inverted ECC bytes.
686 * @mtd: MTD device structure
687 * @dat: The pointer to data on which ecc is computed
688 * @ecc_code: The ecc_code buffer
689 *
690 * Using noninverted ECC can be considered ugly since writing a blank
691 * page ie. padding will clear the ECC bytes. This is no problem as long
692 * nobody is trying to write data on the seemingly unused page. Reading
693 * an erased page will produce an ECC mismatch between generated and read
694 * ECC bytes that has to be dealt with separately.
695 */
696static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
697 u_char *ecc_code)
698{
699 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
700 mtd);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000701 return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700702}
703
704/**
705 * omap_enable_hwecc - This function enables the hardware ecc functionality
706 * @mtd: MTD device structure
707 * @mode: Read/Write mode
708 */
709static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
710{
711 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
712 mtd);
713 struct nand_chip *chip = mtd->priv;
714 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700715
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000716 gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700717}
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000718
Vimal Singh67ce04b2009-05-12 13:47:03 -0700719#endif
720
721/**
722 * omap_wait - wait until the command is done
723 * @mtd: MTD device structure
724 * @chip: NAND Chip structure
725 *
726 * Wait function is called during Program and erase operations and
727 * the way it is called from MTD layer, we should wait till the NAND
728 * chip is ready after the programming/erase operation has completed.
729 *
730 * Erase can take up to 400ms and program up to 20ms according to
731 * general NAND and SmartMedia specs
732 */
733static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
734{
735 struct nand_chip *this = mtd->priv;
736 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
737 mtd);
738 unsigned long timeo = jiffies;
vimal singhc276aca2009-06-27 11:07:06 +0530739 int status = NAND_STATUS_FAIL, state = this->state;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700740
741 if (state == FL_ERASING)
742 timeo += (HZ * 400) / 1000;
743 else
744 timeo += (HZ * 20) / 1000;
745
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000746 gpmc_nand_write(info->gpmc_cs,
747 GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
Vimal Singh67ce04b2009-05-12 13:47:03 -0700748 while (time_before(jiffies, timeo)) {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000749 status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
vimal singhc276aca2009-06-27 11:07:06 +0530750 if (status & NAND_STATUS_READY)
Vimal Singh67ce04b2009-05-12 13:47:03 -0700751 break;
vimal singhc276aca2009-06-27 11:07:06 +0530752 cond_resched();
Vimal Singh67ce04b2009-05-12 13:47:03 -0700753 }
754 return status;
755}
756
757/**
758 * omap_dev_ready - calls the platform specific dev_ready function
759 * @mtd: MTD device structure
760 */
761static int omap_dev_ready(struct mtd_info *mtd)
762{
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000763 unsigned int val = 0;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700764 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
765 mtd);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700766
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000767 val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700768 if ((val & 0x100) == 0x100) {
769 /* Clear IRQ Interrupt */
770 val |= 0x100;
771 val &= ~(0x0);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000772 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700773 } else {
774 unsigned int cnt = 0;
775 while (cnt++ < 0x1FF) {
776 if ((val & 0x100) == 0x100)
777 return 0;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000778 val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700779 }
780 }
781
782 return 1;
783}
784
785static int __devinit omap_nand_probe(struct platform_device *pdev)
786{
787 struct omap_nand_info *info;
788 struct omap_nand_platform_data *pdata;
789 int err;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700790
791 pdata = pdev->dev.platform_data;
792 if (pdata == NULL) {
793 dev_err(&pdev->dev, "platform data missing\n");
794 return -ENODEV;
795 }
796
797 info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
798 if (!info)
799 return -ENOMEM;
800
801 platform_set_drvdata(pdev, info);
802
803 spin_lock_init(&info->controller.lock);
804 init_waitqueue_head(&info->controller.wq);
805
806 info->pdev = pdev;
807
808 info->gpmc_cs = pdata->cs;
Vimal Singh2f70a1e2010-02-15 10:03:33 -0800809 info->phys_base = pdata->phys_base;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700810
811 info->mtd.priv = &info->nand;
812 info->mtd.name = dev_name(&pdev->dev);
813 info->mtd.owner = THIS_MODULE;
814
Vimal Singh2f70a1e2010-02-15 10:03:33 -0800815 info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0;
816 info->nand.options |= NAND_SKIP_BBTSCAN;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700817
818 /* NAND write protect off */
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000819 gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700820
821 if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
822 pdev->dev.driver->name)) {
823 err = -EBUSY;
Vimal Singh2f70a1e2010-02-15 10:03:33 -0800824 goto out_free_info;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700825 }
826
827 info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
828 if (!info->nand.IO_ADDR_R) {
829 err = -ENOMEM;
830 goto out_release_mem_region;
831 }
vimal singh59e9c5a2009-07-13 16:26:24 +0530832
Vimal Singh67ce04b2009-05-12 13:47:03 -0700833 info->nand.controller = &info->controller;
834
835 info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
836 info->nand.cmd_ctrl = omap_hwcontrol;
837
Vimal Singh67ce04b2009-05-12 13:47:03 -0700838 /*
839 * If RDY/BSY line is connected to OMAP then use the omap ready
840 * funcrtion and the generic nand_wait function which reads the status
841 * register after monitoring the RDY/BSY line.Otherwise use a standard
842 * chip delay which is slightly more than tR (AC Timing) of the NAND
843 * device and read status register until you get a failure or success
844 */
845 if (pdata->dev_ready) {
846 info->nand.dev_ready = omap_dev_ready;
847 info->nand.chip_delay = 0;
848 } else {
849 info->nand.waitfunc = omap_wait;
850 info->nand.chip_delay = 50;
851 }
852
vimal singh59e9c5a2009-07-13 16:26:24 +0530853 if (use_prefetch) {
vimal singh59e9c5a2009-07-13 16:26:24 +0530854
855 info->nand.read_buf = omap_read_buf_pref;
856 info->nand.write_buf = omap_write_buf_pref;
vimal singhdfe32892009-07-13 16:29:16 +0530857 if (use_dma) {
858 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
859 omap_nand_dma_cb, &info->comp, &info->dma_ch);
860 if (err < 0) {
861 info->dma_ch = -1;
862 printk(KERN_WARNING "DMA request failed."
863 " Non-dma data transfer mode\n");
864 } else {
865 omap_set_dma_dest_burst_mode(info->dma_ch,
866 OMAP_DMA_DATA_BURST_16);
867 omap_set_dma_src_burst_mode(info->dma_ch,
868 OMAP_DMA_DATA_BURST_16);
869
870 info->nand.read_buf = omap_read_buf_dma_pref;
871 info->nand.write_buf = omap_write_buf_dma_pref;
872 }
873 }
vimal singh59e9c5a2009-07-13 16:26:24 +0530874 } else {
875 if (info->nand.options & NAND_BUSWIDTH_16) {
876 info->nand.read_buf = omap_read_buf16;
877 info->nand.write_buf = omap_write_buf16;
878 } else {
879 info->nand.read_buf = omap_read_buf8;
880 info->nand.write_buf = omap_write_buf8;
881 }
882 }
883 info->nand.verify_buf = omap_verify_buf;
884
Vimal Singh67ce04b2009-05-12 13:47:03 -0700885#ifdef CONFIG_MTD_NAND_OMAP_HWECC
886 info->nand.ecc.bytes = 3;
887 info->nand.ecc.size = 512;
888 info->nand.ecc.calculate = omap_calculate_ecc;
889 info->nand.ecc.hwctl = omap_enable_hwecc;
890 info->nand.ecc.correct = omap_correct_data;
891 info->nand.ecc.mode = NAND_ECC_HW;
892
Vimal Singh67ce04b2009-05-12 13:47:03 -0700893#else
894 info->nand.ecc.mode = NAND_ECC_SOFT;
895#endif
896
897 /* DIP switches on some boards change between 8 and 16 bit
898 * bus widths for flash. Try the other width if the first try fails.
899 */
900 if (nand_scan(&info->mtd, 1)) {
901 info->nand.options ^= NAND_BUSWIDTH_16;
902 if (nand_scan(&info->mtd, 1)) {
903 err = -ENXIO;
904 goto out_release_mem_region;
905 }
906 }
907
908#ifdef CONFIG_MTD_PARTITIONS
909 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
910 if (err > 0)
911 add_mtd_partitions(&info->mtd, info->parts, err);
912 else if (pdata->parts)
913 add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
914 else
915#endif
916 add_mtd_device(&info->mtd);
917
918 platform_set_drvdata(pdev, &info->mtd);
919
920 return 0;
921
922out_release_mem_region:
923 release_mem_region(info->phys_base, NAND_IO_SIZE);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700924out_free_info:
925 kfree(info);
926
927 return err;
928}
929
930static int omap_nand_remove(struct platform_device *pdev)
931{
932 struct mtd_info *mtd = platform_get_drvdata(pdev);
Vimal Singhf35b6ed2010-01-05 16:01:08 +0530933 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
934 mtd);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700935
936 platform_set_drvdata(pdev, NULL);
vimal singhdfe32892009-07-13 16:29:16 +0530937 if (use_dma)
938 omap_free_dma(info->dma_ch);
939
Vimal Singh67ce04b2009-05-12 13:47:03 -0700940 /* Release NAND device, its internal structures and partitions */
941 nand_release(&info->mtd);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000942 iounmap(info->nand.IO_ADDR_R);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700943 kfree(&info->mtd);
944 return 0;
945}
946
947static struct platform_driver omap_nand_driver = {
948 .probe = omap_nand_probe,
949 .remove = omap_nand_remove,
950 .driver = {
951 .name = DRIVER_NAME,
952 .owner = THIS_MODULE,
953 },
954};
955
956static int __init omap_nand_init(void)
957{
958 printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
vimal singhdfe32892009-07-13 16:29:16 +0530959
960 /* This check is required if driver is being
961 * loaded run time as a module
962 */
963 if ((1 == use_dma) && (0 == use_prefetch)) {
964 printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 "
965 "without use_prefetch'. Prefetch will not be"
966 " used in either mode (mpu or dma)\n");
967 }
Vimal Singh67ce04b2009-05-12 13:47:03 -0700968 return platform_driver_register(&omap_nand_driver);
969}
970
971static void __exit omap_nand_exit(void)
972{
973 platform_driver_unregister(&omap_nand_driver);
974}
975
976module_init(omap_nand_init);
977module_exit(omap_nand_exit);
978
Axel Linc804c732011-03-07 11:04:24 +0800979MODULE_ALIAS("platform:" DRIVER_NAME);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700980MODULE_LICENSE("GPL");
981MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");