blob: 52fc089d5bcb01cdb4041af5a45234a6e26c24f4 [file] [log] [blame]
Vimal Singh67ce04b2009-05-12 13:47:03 -07001/*
2 * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
3 * Copyright © 2004 Micron Technology Inc.
4 * Copyright © 2004 David Brownell
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/platform_device.h>
Russell King763e7352012-04-25 00:16:00 +010012#include <linux/dmaengine.h>
Vimal Singh67ce04b2009-05-12 13:47:03 -070013#include <linux/dma-mapping.h>
14#include <linux/delay.h>
Paul Gortmakera0e5cc52011-07-03 15:17:31 -040015#include <linux/module.h>
Sukumar Ghorai4e070372011-01-28 15:42:06 +053016#include <linux/interrupt.h>
vimal singhc276aca2009-06-27 11:07:06 +053017#include <linux/jiffies.h>
18#include <linux/sched.h>
Vimal Singh67ce04b2009-05-12 13:47:03 -070019#include <linux/mtd/mtd.h>
20#include <linux/mtd/nand.h>
21#include <linux/mtd/partitions.h>
Russell King763e7352012-04-25 00:16:00 +010022#include <linux/omap-dma.h>
Vimal Singh67ce04b2009-05-12 13:47:03 -070023#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Vimal Singh67ce04b2009-05-12 13:47:03 -070025
Ivan Djelic0e618ef2012-04-30 12:17:18 +020026#ifdef CONFIG_MTD_NAND_OMAP_BCH
27#include <linux/bch.h>
28#endif
29
Tony Lindgrence491cf2009-10-20 09:40:47 -070030#include <plat/dma.h>
31#include <plat/gpmc.h>
32#include <plat/nand.h>
Vimal Singh67ce04b2009-05-12 13:47:03 -070033
Vimal Singh67ce04b2009-05-12 13:47:03 -070034#define DRIVER_NAME "omap2-nand"
Sukumar Ghorai4e070372011-01-28 15:42:06 +053035#define OMAP_NAND_TIMEOUT_MS 5000
Vimal Singh67ce04b2009-05-12 13:47:03 -070036
Vimal Singh67ce04b2009-05-12 13:47:03 -070037#define NAND_Ecc_P1e (1 << 0)
38#define NAND_Ecc_P2e (1 << 1)
39#define NAND_Ecc_P4e (1 << 2)
40#define NAND_Ecc_P8e (1 << 3)
41#define NAND_Ecc_P16e (1 << 4)
42#define NAND_Ecc_P32e (1 << 5)
43#define NAND_Ecc_P64e (1 << 6)
44#define NAND_Ecc_P128e (1 << 7)
45#define NAND_Ecc_P256e (1 << 8)
46#define NAND_Ecc_P512e (1 << 9)
47#define NAND_Ecc_P1024e (1 << 10)
48#define NAND_Ecc_P2048e (1 << 11)
49
50#define NAND_Ecc_P1o (1 << 16)
51#define NAND_Ecc_P2o (1 << 17)
52#define NAND_Ecc_P4o (1 << 18)
53#define NAND_Ecc_P8o (1 << 19)
54#define NAND_Ecc_P16o (1 << 20)
55#define NAND_Ecc_P32o (1 << 21)
56#define NAND_Ecc_P64o (1 << 22)
57#define NAND_Ecc_P128o (1 << 23)
58#define NAND_Ecc_P256o (1 << 24)
59#define NAND_Ecc_P512o (1 << 25)
60#define NAND_Ecc_P1024o (1 << 26)
61#define NAND_Ecc_P2048o (1 << 27)
62
63#define TF(value) (value ? 1 : 0)
64
65#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
66#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
67#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
68#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
69#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
70#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
71#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
72#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
73
74#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
75#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
76#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
77#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
78#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
79#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
80#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
81#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
82
83#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
84#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
85#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
86#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
87#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
88#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
89#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
90#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
91
92#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
93#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
94#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
95#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
96#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
97#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
98#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
99#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
100
101#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
102#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
103
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700104#define PREFETCH_CONFIG1_CS_SHIFT 24
105#define ECC_CONFIG_CS_SHIFT 1
106#define CS_MASK 0x7
107#define ENABLE_PREFETCH (0x1 << 7)
108#define DMA_MPU_MODE_SHIFT 2
109#define ECCSIZE1_SHIFT 22
110#define ECC1RESULTSIZE 0x1
111#define ECCCLEAR 0x100
112#define ECC1 0x1
113
Sukumar Ghoraif040d332011-01-28 15:42:09 +0530114/* oob info generated runtime depending on ecc algorithm and layout selected */
115static struct nand_ecclayout omap_oobinfo;
116/* Define some generic bad / good block scan pattern which are used
117 * while scanning a device for factory marked good / bad blocks
118 */
119static uint8_t scan_ff_pattern[] = { 0xff };
120static struct nand_bbt_descr bb_descrip_flashbased = {
121 .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
122 .offs = 0,
123 .len = 1,
124 .pattern = scan_ff_pattern,
125};
vimal singh59e9c5a2009-07-13 16:26:24 +0530126
vimal singh59e9c5a2009-07-13 16:26:24 +0530127
Vimal Singh67ce04b2009-05-12 13:47:03 -0700128struct omap_nand_info {
129 struct nand_hw_control controller;
130 struct omap_nand_platform_data *pdata;
131 struct mtd_info mtd;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700132 struct nand_chip nand;
133 struct platform_device *pdev;
134
135 int gpmc_cs;
136 unsigned long phys_base;
vimal singhdfe32892009-07-13 16:29:16 +0530137 struct completion comp;
Russell King763e7352012-04-25 00:16:00 +0100138 struct dma_chan *dma;
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530139 int gpmc_irq;
140 enum {
141 OMAP_NAND_IO_READ = 0, /* read */
142 OMAP_NAND_IO_WRITE, /* write */
143 } iomode;
144 u_char *buf;
145 int buf_len;
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700146 struct gpmc_nand_regs reg;
Ivan Djelic0e618ef2012-04-30 12:17:18 +0200147
148#ifdef CONFIG_MTD_NAND_OMAP_BCH
149 struct bch_control *bch;
150 struct nand_ecclayout ecclayout;
151#endif
Vimal Singh67ce04b2009-05-12 13:47:03 -0700152};
153
154/**
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700155 * omap_prefetch_enable - configures and starts prefetch transfer
156 * @cs: cs (chip select) number
157 * @fifo_th: fifo threshold to be used for read/ write
158 * @dma_mode: dma mode enable (1) or disable (0)
159 * @u32_count: number of bytes to be transferred
160 * @is_write: prefetch read(0) or write post(1) mode
161 */
162static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
163 unsigned int u32_count, int is_write, struct omap_nand_info *info)
164{
165 u32 val;
166
167 if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
168 return -1;
169
170 if (readl(info->reg.gpmc_prefetch_control))
171 return -EBUSY;
172
173 /* Set the amount of bytes to be prefetched */
174 writel(u32_count, info->reg.gpmc_prefetch_config2);
175
176 /* Set dma/mpu mode, the prefetch read / post write and
177 * enable the engine. Set which cs is has requested for.
178 */
179 val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
180 PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
181 (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write));
182 writel(val, info->reg.gpmc_prefetch_config1);
183
184 /* Start the prefetch engine */
185 writel(0x1, info->reg.gpmc_prefetch_control);
186
187 return 0;
188}
189
190/**
191 * omap_prefetch_reset - disables and stops the prefetch engine
192 */
193static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
194{
195 u32 config1;
196
197 /* check if the same module/cs is trying to reset */
198 config1 = readl(info->reg.gpmc_prefetch_config1);
199 if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
200 return -EINVAL;
201
202 /* Stop the PFPW engine */
203 writel(0x0, info->reg.gpmc_prefetch_control);
204
205 /* Reset/disable the PFPW engine */
206 writel(0x0, info->reg.gpmc_prefetch_config1);
207
208 return 0;
209}
210
211/**
Vimal Singh67ce04b2009-05-12 13:47:03 -0700212 * omap_hwcontrol - hardware specific access to control-lines
213 * @mtd: MTD device structure
214 * @cmd: command to device
215 * @ctrl:
216 * NAND_NCE: bit 0 -> don't care
217 * NAND_CLE: bit 1 -> Command Latch
218 * NAND_ALE: bit 2 -> Address Latch
219 *
220 * NOTE: boards may use different bits for these!!
221 */
222static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
223{
224 struct omap_nand_info *info = container_of(mtd,
225 struct omap_nand_info, mtd);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700226
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000227 if (cmd != NAND_CMD_NONE) {
228 if (ctrl & NAND_CLE)
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700229 writeb(cmd, info->reg.gpmc_nand_command);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700230
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000231 else if (ctrl & NAND_ALE)
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700232 writeb(cmd, info->reg.gpmc_nand_address);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000233
234 else /* NAND_NCE */
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700235 writeb(cmd, info->reg.gpmc_nand_data);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700236 }
Vimal Singh67ce04b2009-05-12 13:47:03 -0700237}
238
239/**
vimal singh59e9c5a2009-07-13 16:26:24 +0530240 * omap_read_buf8 - read data from NAND controller into buffer
241 * @mtd: MTD device structure
242 * @buf: buffer to store date
243 * @len: number of bytes to read
244 */
245static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
246{
247 struct nand_chip *nand = mtd->priv;
248
249 ioread8_rep(nand->IO_ADDR_R, buf, len);
250}
251
252/**
253 * omap_write_buf8 - write buffer to NAND controller
254 * @mtd: MTD device structure
255 * @buf: data buffer
256 * @len: number of bytes to write
257 */
258static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
259{
260 struct omap_nand_info *info = container_of(mtd,
261 struct omap_nand_info, mtd);
262 u_char *p = (u_char *)buf;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000263 u32 status = 0;
vimal singh59e9c5a2009-07-13 16:26:24 +0530264
265 while (len--) {
266 iowrite8(*p++, info->nand.IO_ADDR_W);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000267 /* wait until buffer is available for write */
268 do {
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700269 status = readl(info->reg.gpmc_status) &
270 GPMC_STATUS_BUFF_EMPTY;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000271 } while (!status);
vimal singh59e9c5a2009-07-13 16:26:24 +0530272 }
273}
274
275/**
Vimal Singh67ce04b2009-05-12 13:47:03 -0700276 * omap_read_buf16 - read data from NAND controller into buffer
277 * @mtd: MTD device structure
278 * @buf: buffer to store date
279 * @len: number of bytes to read
280 */
281static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
282{
283 struct nand_chip *nand = mtd->priv;
284
vimal singh59e9c5a2009-07-13 16:26:24 +0530285 ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700286}
287
288/**
289 * omap_write_buf16 - write buffer to NAND controller
290 * @mtd: MTD device structure
291 * @buf: data buffer
292 * @len: number of bytes to write
293 */
294static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
295{
296 struct omap_nand_info *info = container_of(mtd,
297 struct omap_nand_info, mtd);
298 u16 *p = (u16 *) buf;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000299 u32 status = 0;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700300 /* FIXME try bursts of writesw() or DMA ... */
301 len >>= 1;
302
303 while (len--) {
vimal singh59e9c5a2009-07-13 16:26:24 +0530304 iowrite16(*p++, info->nand.IO_ADDR_W);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000305 /* wait until buffer is available for write */
306 do {
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700307 status = readl(info->reg.gpmc_status) &
308 GPMC_STATUS_BUFF_EMPTY;
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000309 } while (!status);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700310 }
311}
vimal singh59e9c5a2009-07-13 16:26:24 +0530312
313/**
314 * omap_read_buf_pref - read data from NAND controller into buffer
315 * @mtd: MTD device structure
316 * @buf: buffer to store date
317 * @len: number of bytes to read
318 */
319static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
320{
321 struct omap_nand_info *info = container_of(mtd,
322 struct omap_nand_info, mtd);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000323 uint32_t r_count = 0;
vimal singh59e9c5a2009-07-13 16:26:24 +0530324 int ret = 0;
325 u32 *p = (u32 *)buf;
326
327 /* take care of subpage reads */
Vimal Singhc3341d02010-01-07 12:16:26 +0530328 if (len % 4) {
329 if (info->nand.options & NAND_BUSWIDTH_16)
330 omap_read_buf16(mtd, buf, len % 4);
331 else
332 omap_read_buf8(mtd, buf, len % 4);
333 p = (u32 *) (buf + len % 4);
334 len -= len % 4;
vimal singh59e9c5a2009-07-13 16:26:24 +0530335 }
vimal singh59e9c5a2009-07-13 16:26:24 +0530336
337 /* configure and start prefetch transfer */
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700338 ret = omap_prefetch_enable(info->gpmc_cs,
339 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
vimal singh59e9c5a2009-07-13 16:26:24 +0530340 if (ret) {
341 /* PFPW engine is busy, use cpu copy method */
342 if (info->nand.options & NAND_BUSWIDTH_16)
Kishore Kadiyalac5d8c0c2011-05-11 21:17:27 +0530343 omap_read_buf16(mtd, (u_char *)p, len);
vimal singh59e9c5a2009-07-13 16:26:24 +0530344 else
Kishore Kadiyalac5d8c0c2011-05-11 21:17:27 +0530345 omap_read_buf8(mtd, (u_char *)p, len);
vimal singh59e9c5a2009-07-13 16:26:24 +0530346 } else {
347 do {
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700348 r_count = readl(info->reg.gpmc_prefetch_status);
349 r_count = GPMC_PREFETCH_STATUS_FIFO_CNT(r_count);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000350 r_count = r_count >> 2;
351 ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
vimal singh59e9c5a2009-07-13 16:26:24 +0530352 p += r_count;
353 len -= r_count << 2;
354 } while (len);
vimal singh59e9c5a2009-07-13 16:26:24 +0530355 /* disable and stop the PFPW engine */
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700356 omap_prefetch_reset(info->gpmc_cs, info);
vimal singh59e9c5a2009-07-13 16:26:24 +0530357 }
358}
359
360/**
361 * omap_write_buf_pref - write buffer to NAND controller
362 * @mtd: MTD device structure
363 * @buf: data buffer
364 * @len: number of bytes to write
365 */
366static void omap_write_buf_pref(struct mtd_info *mtd,
367 const u_char *buf, int len)
368{
369 struct omap_nand_info *info = container_of(mtd,
370 struct omap_nand_info, mtd);
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530371 uint32_t w_count = 0;
vimal singh59e9c5a2009-07-13 16:26:24 +0530372 int i = 0, ret = 0;
Kishore Kadiyalac5d8c0c2011-05-11 21:17:27 +0530373 u16 *p = (u16 *)buf;
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530374 unsigned long tim, limit;
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700375 u32 val;
vimal singh59e9c5a2009-07-13 16:26:24 +0530376
377 /* take care of subpage writes */
378 if (len % 2 != 0) {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000379 writeb(*buf, info->nand.IO_ADDR_W);
vimal singh59e9c5a2009-07-13 16:26:24 +0530380 p = (u16 *)(buf + 1);
381 len--;
382 }
383
384 /* configure and start prefetch transfer */
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700385 ret = omap_prefetch_enable(info->gpmc_cs,
386 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
vimal singh59e9c5a2009-07-13 16:26:24 +0530387 if (ret) {
388 /* PFPW engine is busy, use cpu copy method */
389 if (info->nand.options & NAND_BUSWIDTH_16)
Kishore Kadiyalac5d8c0c2011-05-11 21:17:27 +0530390 omap_write_buf16(mtd, (u_char *)p, len);
vimal singh59e9c5a2009-07-13 16:26:24 +0530391 else
Kishore Kadiyalac5d8c0c2011-05-11 21:17:27 +0530392 omap_write_buf8(mtd, (u_char *)p, len);
vimal singh59e9c5a2009-07-13 16:26:24 +0530393 } else {
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000394 while (len) {
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700395 w_count = readl(info->reg.gpmc_prefetch_status);
396 w_count = GPMC_PREFETCH_STATUS_FIFO_CNT(w_count);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000397 w_count = w_count >> 1;
vimal singh59e9c5a2009-07-13 16:26:24 +0530398 for (i = 0; (i < w_count) && len; i++, len -= 2)
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000399 iowrite16(*p++, info->nand.IO_ADDR_W);
vimal singh59e9c5a2009-07-13 16:26:24 +0530400 }
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000401 /* wait for data to flushed-out before reset the prefetch */
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530402 tim = 0;
403 limit = (loops_per_jiffy *
404 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700405 do {
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530406 cpu_relax();
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700407 val = readl(info->reg.gpmc_prefetch_status);
408 val = GPMC_PREFETCH_STATUS_COUNT(val);
409 } while (val && (tim++ < limit));
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530410
vimal singh59e9c5a2009-07-13 16:26:24 +0530411 /* disable and stop the PFPW engine */
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700412 omap_prefetch_reset(info->gpmc_cs, info);
vimal singh59e9c5a2009-07-13 16:26:24 +0530413 }
414}
415
vimal singhdfe32892009-07-13 16:29:16 +0530416/*
Russell King2df41d02012-04-25 00:19:39 +0100417 * omap_nand_dma_callback: callback on the completion of dma transfer
vimal singhdfe32892009-07-13 16:29:16 +0530418 * @data: pointer to completion data structure
419 */
Russell King763e7352012-04-25 00:16:00 +0100420static void omap_nand_dma_callback(void *data)
421{
422 complete((struct completion *) data);
423}
vimal singhdfe32892009-07-13 16:29:16 +0530424
425/*
426 * omap_nand_dma_transfer: configer and start dma transfer
427 * @mtd: MTD device structure
428 * @addr: virtual address in RAM of source/destination
429 * @len: number of data bytes to be transferred
430 * @is_write: flag for read/write operation
431 */
432static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
433 unsigned int len, int is_write)
434{
435 struct omap_nand_info *info = container_of(mtd,
436 struct omap_nand_info, mtd);
Russell King2df41d02012-04-25 00:19:39 +0100437 struct dma_async_tx_descriptor *tx;
vimal singhdfe32892009-07-13 16:29:16 +0530438 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
439 DMA_FROM_DEVICE;
Russell King2df41d02012-04-25 00:19:39 +0100440 struct scatterlist sg;
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530441 unsigned long tim, limit;
Russell King2df41d02012-04-25 00:19:39 +0100442 unsigned n;
443 int ret;
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700444 u32 val;
vimal singhdfe32892009-07-13 16:29:16 +0530445
446 if (addr >= high_memory) {
447 struct page *p1;
448
449 if (((size_t)addr & PAGE_MASK) !=
450 ((size_t)(addr + len - 1) & PAGE_MASK))
451 goto out_copy;
452 p1 = vmalloc_to_page(addr);
453 if (!p1)
454 goto out_copy;
455 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
456 }
457
Russell King2df41d02012-04-25 00:19:39 +0100458 sg_init_one(&sg, addr, len);
459 n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
460 if (n == 0) {
vimal singhdfe32892009-07-13 16:29:16 +0530461 dev_err(&info->pdev->dev,
462 "Couldn't DMA map a %d byte buffer\n", len);
463 goto out_copy;
464 }
465
Russell King2df41d02012-04-25 00:19:39 +0100466 tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
467 is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
468 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
469 if (!tx)
470 goto out_copy_unmap;
471
472 tx->callback = omap_nand_dma_callback;
473 tx->callback_param = &info->comp;
474 dmaengine_submit(tx);
475
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700476 /* configure and start prefetch transfer */
477 ret = omap_prefetch_enable(info->gpmc_cs,
478 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
vimal singhdfe32892009-07-13 16:29:16 +0530479 if (ret)
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530480 /* PFPW engine is busy, use cpu copy method */
Grazvydas Ignotasd7efe222012-04-11 04:04:34 +0300481 goto out_copy_unmap;
vimal singhdfe32892009-07-13 16:29:16 +0530482
483 init_completion(&info->comp);
Russell King2df41d02012-04-25 00:19:39 +0100484 dma_async_issue_pending(info->dma);
vimal singhdfe32892009-07-13 16:29:16 +0530485
486 /* setup and start DMA using dma_addr */
487 wait_for_completion(&info->comp);
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530488 tim = 0;
489 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700490
491 do {
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530492 cpu_relax();
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700493 val = readl(info->reg.gpmc_prefetch_status);
494 val = GPMC_PREFETCH_STATUS_COUNT(val);
495 } while (val && (tim++ < limit));
vimal singhdfe32892009-07-13 16:29:16 +0530496
vimal singhdfe32892009-07-13 16:29:16 +0530497 /* disable and stop the PFPW engine */
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700498 omap_prefetch_reset(info->gpmc_cs, info);
vimal singhdfe32892009-07-13 16:29:16 +0530499
Russell King2df41d02012-04-25 00:19:39 +0100500 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
vimal singhdfe32892009-07-13 16:29:16 +0530501 return 0;
502
Grazvydas Ignotasd7efe222012-04-11 04:04:34 +0300503out_copy_unmap:
Russell King2df41d02012-04-25 00:19:39 +0100504 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
vimal singhdfe32892009-07-13 16:29:16 +0530505out_copy:
506 if (info->nand.options & NAND_BUSWIDTH_16)
507 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
508 : omap_write_buf16(mtd, (u_char *) addr, len);
509 else
510 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
511 : omap_write_buf8(mtd, (u_char *) addr, len);
512 return 0;
513}
vimal singhdfe32892009-07-13 16:29:16 +0530514
515/**
516 * omap_read_buf_dma_pref - read data from NAND controller into buffer
517 * @mtd: MTD device structure
518 * @buf: buffer to store date
519 * @len: number of bytes to read
520 */
521static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
522{
523 if (len <= mtd->oobsize)
524 omap_read_buf_pref(mtd, buf, len);
525 else
526 /* start transfer in DMA mode */
527 omap_nand_dma_transfer(mtd, buf, len, 0x0);
528}
529
530/**
531 * omap_write_buf_dma_pref - write buffer to NAND controller
532 * @mtd: MTD device structure
533 * @buf: data buffer
534 * @len: number of bytes to write
535 */
536static void omap_write_buf_dma_pref(struct mtd_info *mtd,
537 const u_char *buf, int len)
538{
539 if (len <= mtd->oobsize)
540 omap_write_buf_pref(mtd, buf, len);
541 else
542 /* start transfer in DMA mode */
Vimal Singhbdaefc42010-01-05 12:49:24 +0530543 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
vimal singhdfe32892009-07-13 16:29:16 +0530544}
545
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530546/*
547 * omap_nand_irq - GMPC irq handler
548 * @this_irq: gpmc irq number
549 * @dev: omap_nand_info structure pointer is passed here
550 */
551static irqreturn_t omap_nand_irq(int this_irq, void *dev)
552{
553 struct omap_nand_info *info = (struct omap_nand_info *) dev;
554 u32 bytes;
555 u32 irq_stat;
556
557 irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700558 bytes = readl(info->reg.gpmc_prefetch_status);
559 bytes = GPMC_PREFETCH_STATUS_FIFO_CNT(bytes);
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530560 bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
561 if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
562 if (irq_stat & 0x2)
563 goto done;
564
565 if (info->buf_len && (info->buf_len < bytes))
566 bytes = info->buf_len;
567 else if (!info->buf_len)
568 bytes = 0;
569 iowrite32_rep(info->nand.IO_ADDR_W,
570 (u32 *)info->buf, bytes >> 2);
571 info->buf = info->buf + bytes;
572 info->buf_len -= bytes;
573
574 } else {
575 ioread32_rep(info->nand.IO_ADDR_R,
576 (u32 *)info->buf, bytes >> 2);
577 info->buf = info->buf + bytes;
578
579 if (irq_stat & 0x2)
580 goto done;
581 }
582 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
583
584 return IRQ_HANDLED;
585
586done:
587 complete(&info->comp);
588 /* disable irq */
589 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
590
591 /* clear status */
592 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
593
594 return IRQ_HANDLED;
595}
596
597/*
598 * omap_read_buf_irq_pref - read data from NAND controller into buffer
599 * @mtd: MTD device structure
600 * @buf: buffer to store date
601 * @len: number of bytes to read
602 */
603static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
604{
605 struct omap_nand_info *info = container_of(mtd,
606 struct omap_nand_info, mtd);
607 int ret = 0;
608
609 if (len <= mtd->oobsize) {
610 omap_read_buf_pref(mtd, buf, len);
611 return;
612 }
613
614 info->iomode = OMAP_NAND_IO_READ;
615 info->buf = buf;
616 init_completion(&info->comp);
617
618 /* configure and start prefetch transfer */
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700619 ret = omap_prefetch_enable(info->gpmc_cs,
620 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530621 if (ret)
622 /* PFPW engine is busy, use cpu copy method */
623 goto out_copy;
624
625 info->buf_len = len;
626 /* enable irq */
627 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
628 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
629
630 /* waiting for read to complete */
631 wait_for_completion(&info->comp);
632
633 /* disable and stop the PFPW engine */
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700634 omap_prefetch_reset(info->gpmc_cs, info);
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530635 return;
636
637out_copy:
638 if (info->nand.options & NAND_BUSWIDTH_16)
639 omap_read_buf16(mtd, buf, len);
640 else
641 omap_read_buf8(mtd, buf, len);
642}
643
644/*
645 * omap_write_buf_irq_pref - write buffer to NAND controller
646 * @mtd: MTD device structure
647 * @buf: data buffer
648 * @len: number of bytes to write
649 */
650static void omap_write_buf_irq_pref(struct mtd_info *mtd,
651 const u_char *buf, int len)
652{
653 struct omap_nand_info *info = container_of(mtd,
654 struct omap_nand_info, mtd);
655 int ret = 0;
656 unsigned long tim, limit;
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700657 u32 val;
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530658
659 if (len <= mtd->oobsize) {
660 omap_write_buf_pref(mtd, buf, len);
661 return;
662 }
663
664 info->iomode = OMAP_NAND_IO_WRITE;
665 info->buf = (u_char *) buf;
666 init_completion(&info->comp);
667
Sukumar Ghorai317379a2011-01-28 15:42:07 +0530668 /* configure and start prefetch transfer : size=24 */
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700669 ret = omap_prefetch_enable(info->gpmc_cs,
670 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530671 if (ret)
672 /* PFPW engine is busy, use cpu copy method */
673 goto out_copy;
674
675 info->buf_len = len;
676 /* enable irq */
677 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
678 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
679
680 /* waiting for write to complete */
681 wait_for_completion(&info->comp);
682 /* wait for data to flushed-out before reset the prefetch */
683 tim = 0;
684 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700685 do {
686 val = readl(info->reg.gpmc_prefetch_status);
687 val = GPMC_PREFETCH_STATUS_COUNT(val);
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530688 cpu_relax();
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700689 } while (val && (tim++ < limit));
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530690
691 /* disable and stop the PFPW engine */
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700692 omap_prefetch_reset(info->gpmc_cs, info);
Sukumar Ghorai4e070372011-01-28 15:42:06 +0530693 return;
694
695out_copy:
696 if (info->nand.options & NAND_BUSWIDTH_16)
697 omap_write_buf16(mtd, buf, len);
698 else
699 omap_write_buf8(mtd, buf, len);
700}
701
Vimal Singh67ce04b2009-05-12 13:47:03 -0700702/**
703 * omap_verify_buf - Verify chip data against buffer
704 * @mtd: MTD device structure
705 * @buf: buffer containing the data to compare
706 * @len: number of bytes to compare
707 */
708static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
709{
710 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
711 mtd);
712 u16 *p = (u16 *) buf;
713
714 len >>= 1;
715 while (len--) {
716 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
717 return -EFAULT;
718 }
719
720 return 0;
721}
722
Vimal Singh67ce04b2009-05-12 13:47:03 -0700723/**
724 * gen_true_ecc - This function will generate true ECC value
725 * @ecc_buf: buffer to store ecc code
726 *
727 * This generated true ECC value can be used when correcting
728 * data read from NAND flash memory core
729 */
730static void gen_true_ecc(u8 *ecc_buf)
731{
732 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
733 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
734
735 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
736 P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
737 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
738 P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
739 ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
740 P1e(tmp) | P2048o(tmp) | P2048e(tmp));
741}
742
743/**
744 * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
745 * @ecc_data1: ecc code from nand spare area
746 * @ecc_data2: ecc code from hardware register obtained from hardware ecc
747 * @page_data: page data
748 *
749 * This function compares two ECC's and indicates if there is an error.
750 * If the error can be corrected it will be corrected to the buffer.
John Ogness74f1b722011-02-28 13:12:46 +0100751 * If there is no error, %0 is returned. If there is an error but it
752 * was corrected, %1 is returned. Otherwise, %-1 is returned.
Vimal Singh67ce04b2009-05-12 13:47:03 -0700753 */
754static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
755 u8 *ecc_data2, /* read from register */
756 u8 *page_data)
757{
758 uint i;
759 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
760 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
761 u8 ecc_bit[24];
762 u8 ecc_sum = 0;
763 u8 find_bit = 0;
764 uint find_byte = 0;
765 int isEccFF;
766
767 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
768
769 gen_true_ecc(ecc_data1);
770 gen_true_ecc(ecc_data2);
771
772 for (i = 0; i <= 2; i++) {
773 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
774 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
775 }
776
777 for (i = 0; i < 8; i++) {
778 tmp0_bit[i] = *ecc_data1 % 2;
779 *ecc_data1 = *ecc_data1 / 2;
780 }
781
782 for (i = 0; i < 8; i++) {
783 tmp1_bit[i] = *(ecc_data1 + 1) % 2;
784 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
785 }
786
787 for (i = 0; i < 8; i++) {
788 tmp2_bit[i] = *(ecc_data1 + 2) % 2;
789 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
790 }
791
792 for (i = 0; i < 8; i++) {
793 comp0_bit[i] = *ecc_data2 % 2;
794 *ecc_data2 = *ecc_data2 / 2;
795 }
796
797 for (i = 0; i < 8; i++) {
798 comp1_bit[i] = *(ecc_data2 + 1) % 2;
799 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
800 }
801
802 for (i = 0; i < 8; i++) {
803 comp2_bit[i] = *(ecc_data2 + 2) % 2;
804 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
805 }
806
807 for (i = 0; i < 6; i++)
808 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
809
810 for (i = 0; i < 8; i++)
811 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
812
813 for (i = 0; i < 8; i++)
814 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
815
816 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
817 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
818
819 for (i = 0; i < 24; i++)
820 ecc_sum += ecc_bit[i];
821
822 switch (ecc_sum) {
823 case 0:
824 /* Not reached because this function is not called if
825 * ECC values are equal
826 */
827 return 0;
828
829 case 1:
830 /* Uncorrectable error */
Brian Norris289c0522011-07-19 10:06:09 -0700831 pr_debug("ECC UNCORRECTED_ERROR 1\n");
Vimal Singh67ce04b2009-05-12 13:47:03 -0700832 return -1;
833
834 case 11:
835 /* UN-Correctable error */
Brian Norris289c0522011-07-19 10:06:09 -0700836 pr_debug("ECC UNCORRECTED_ERROR B\n");
Vimal Singh67ce04b2009-05-12 13:47:03 -0700837 return -1;
838
839 case 12:
840 /* Correctable error */
841 find_byte = (ecc_bit[23] << 8) +
842 (ecc_bit[21] << 7) +
843 (ecc_bit[19] << 6) +
844 (ecc_bit[17] << 5) +
845 (ecc_bit[15] << 4) +
846 (ecc_bit[13] << 3) +
847 (ecc_bit[11] << 2) +
848 (ecc_bit[9] << 1) +
849 ecc_bit[7];
850
851 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
852
Brian Norris0a32a102011-07-19 10:06:10 -0700853 pr_debug("Correcting single bit ECC error at offset: "
854 "%d, bit: %d\n", find_byte, find_bit);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700855
856 page_data[find_byte] ^= (1 << find_bit);
857
John Ogness74f1b722011-02-28 13:12:46 +0100858 return 1;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700859 default:
860 if (isEccFF) {
861 if (ecc_data2[0] == 0 &&
862 ecc_data2[1] == 0 &&
863 ecc_data2[2] == 0)
864 return 0;
865 }
Brian Norris289c0522011-07-19 10:06:09 -0700866 pr_debug("UNCORRECTED_ERROR default\n");
Vimal Singh67ce04b2009-05-12 13:47:03 -0700867 return -1;
868 }
869}
870
871/**
872 * omap_correct_data - Compares the ECC read with HW generated ECC
873 * @mtd: MTD device structure
874 * @dat: page data
875 * @read_ecc: ecc read from nand flash
876 * @calc_ecc: ecc read from HW ECC registers
877 *
878 * Compares the ecc read from nand spare area with ECC registers values
John Ogness74f1b722011-02-28 13:12:46 +0100879 * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
880 * detection and correction. If there are no errors, %0 is returned. If
881 * there were errors and all of the errors were corrected, the number of
882 * corrected errors is returned. If uncorrectable errors exist, %-1 is
883 * returned.
Vimal Singh67ce04b2009-05-12 13:47:03 -0700884 */
885static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
886 u_char *read_ecc, u_char *calc_ecc)
887{
888 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
889 mtd);
890 int blockCnt = 0, i = 0, ret = 0;
John Ogness74f1b722011-02-28 13:12:46 +0100891 int stat = 0;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700892
893 /* Ex NAND_ECC_HW12_2048 */
894 if ((info->nand.ecc.mode == NAND_ECC_HW) &&
895 (info->nand.ecc.size == 2048))
896 blockCnt = 4;
897 else
898 blockCnt = 1;
899
900 for (i = 0; i < blockCnt; i++) {
901 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
902 ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
903 if (ret < 0)
904 return ret;
John Ogness74f1b722011-02-28 13:12:46 +0100905 /* keep track of the number of corrected errors */
906 stat += ret;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700907 }
908 read_ecc += 3;
909 calc_ecc += 3;
910 dat += 512;
911 }
John Ogness74f1b722011-02-28 13:12:46 +0100912 return stat;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700913}
914
915/**
916 * omap_calcuate_ecc - Generate non-inverted ECC bytes.
917 * @mtd: MTD device structure
918 * @dat: The pointer to data on which ecc is computed
919 * @ecc_code: The ecc_code buffer
920 *
921 * Using noninverted ECC can be considered ugly since writing a blank
922 * page ie. padding will clear the ECC bytes. This is no problem as long
923 * nobody is trying to write data on the seemingly unused page. Reading
924 * an erased page will produce an ECC mismatch between generated and read
925 * ECC bytes that has to be dealt with separately.
926 */
927static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
928 u_char *ecc_code)
929{
930 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
931 mtd);
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700932 u32 val;
933
934 val = readl(info->reg.gpmc_ecc_config);
935 if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
936 return -EINVAL;
937
938 /* read ecc result */
939 val = readl(info->reg.gpmc_ecc1_result);
940 *ecc_code++ = val; /* P128e, ..., P1e */
941 *ecc_code++ = val >> 16; /* P128o, ..., P1o */
942 /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
943 *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
944
945 return 0;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700946}
947
948/**
949 * omap_enable_hwecc - This function enables the hardware ecc functionality
950 * @mtd: MTD device structure
951 * @mode: Read/Write mode
952 */
953static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
954{
955 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
956 mtd);
957 struct nand_chip *chip = mtd->priv;
958 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700959 u32 val;
Vimal Singh67ce04b2009-05-12 13:47:03 -0700960
Afzal Mohammed65b97cf2012-08-30 12:53:22 -0700961 /* clear ecc and enable bits */
962 val = ECCCLEAR | ECC1;
963 writel(val, info->reg.gpmc_ecc_control);
964
965 /* program ecc and result sizes */
966 val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
967 ECC1RESULTSIZE);
968 writel(val, info->reg.gpmc_ecc_size_config);
969
970 switch (mode) {
971 case NAND_ECC_READ:
972 case NAND_ECC_WRITE:
973 writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
974 break;
975 case NAND_ECC_READSYN:
976 writel(ECCCLEAR, info->reg.gpmc_ecc_control);
977 break;
978 default:
979 dev_info(&info->pdev->dev,
980 "error: unrecognized Mode[%d]!\n", mode);
981 break;
982 }
983
984 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
985 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
986 writel(val, info->reg.gpmc_ecc_config);
Vimal Singh67ce04b2009-05-12 13:47:03 -0700987}
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +0000988
Vimal Singh67ce04b2009-05-12 13:47:03 -0700989/**
990 * omap_wait - wait until the command is done
991 * @mtd: MTD device structure
992 * @chip: NAND Chip structure
993 *
994 * Wait function is called during Program and erase operations and
995 * the way it is called from MTD layer, we should wait till the NAND
996 * chip is ready after the programming/erase operation has completed.
997 *
998 * Erase can take up to 400ms and program up to 20ms according to
999 * general NAND and SmartMedia specs
1000 */
1001static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
1002{
1003 struct nand_chip *this = mtd->priv;
1004 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1005 mtd);
1006 unsigned long timeo = jiffies;
Ivan Djelica9c465f2012-04-17 13:11:53 +02001007 int status, state = this->state;
Vimal Singh67ce04b2009-05-12 13:47:03 -07001008
1009 if (state == FL_ERASING)
1010 timeo += (HZ * 400) / 1000;
1011 else
1012 timeo += (HZ * 20) / 1000;
1013
Afzal Mohammed65b97cf2012-08-30 12:53:22 -07001014 writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
Vimal Singh67ce04b2009-05-12 13:47:03 -07001015 while (time_before(jiffies, timeo)) {
Afzal Mohammed65b97cf2012-08-30 12:53:22 -07001016 status = readb(info->reg.gpmc_nand_data);
vimal singhc276aca2009-06-27 11:07:06 +05301017 if (status & NAND_STATUS_READY)
Vimal Singh67ce04b2009-05-12 13:47:03 -07001018 break;
vimal singhc276aca2009-06-27 11:07:06 +05301019 cond_resched();
Vimal Singh67ce04b2009-05-12 13:47:03 -07001020 }
Ivan Djelica9c465f2012-04-17 13:11:53 +02001021
1022 status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
Vimal Singh67ce04b2009-05-12 13:47:03 -07001023 return status;
1024}
1025
1026/**
1027 * omap_dev_ready - calls the platform specific dev_ready function
1028 * @mtd: MTD device structure
1029 */
1030static int omap_dev_ready(struct mtd_info *mtd)
1031{
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +00001032 unsigned int val = 0;
Vimal Singh67ce04b2009-05-12 13:47:03 -07001033 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1034 mtd);
Vimal Singh67ce04b2009-05-12 13:47:03 -07001035
Afzal Mohammed65b97cf2012-08-30 12:53:22 -07001036 val = readl(info->reg.gpmc_status);
Vimal Singh67ce04b2009-05-12 13:47:03 -07001037
Afzal Mohammed65b97cf2012-08-30 12:53:22 -07001038 if ((val & 0x100) == 0x100) {
1039 return 1;
1040 } else {
1041 return 0;
1042 }
Vimal Singh67ce04b2009-05-12 13:47:03 -07001043}
1044
Ivan Djelic0e618ef2012-04-30 12:17:18 +02001045#ifdef CONFIG_MTD_NAND_OMAP_BCH
1046
1047/**
1048 * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
1049 * @mtd: MTD device structure
1050 * @mode: Read/Write mode
1051 */
1052static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
1053{
1054 int nerrors;
1055 unsigned int dev_width;
1056 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1057 mtd);
1058 struct nand_chip *chip = mtd->priv;
1059
1060 nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
1061 dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
1062 /*
1063 * Program GPMC to perform correction on one 512-byte sector at a time.
1064 * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
1065 * gives a slight (5%) performance gain (but requires additional code).
1066 */
1067 (void)gpmc_enable_hwecc_bch(info->gpmc_cs, mode, dev_width, 1, nerrors);
1068}
1069
1070/**
1071 * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
1072 * @mtd: MTD device structure
1073 * @dat: The pointer to data on which ecc is computed
1074 * @ecc_code: The ecc_code buffer
1075 */
1076static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat,
1077 u_char *ecc_code)
1078{
1079 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1080 mtd);
1081 return gpmc_calculate_ecc_bch4(info->gpmc_cs, dat, ecc_code);
1082}
1083
1084/**
1085 * omap3_calculate_ecc_bch8 - Generate 13 bytes of ECC bytes
1086 * @mtd: MTD device structure
1087 * @dat: The pointer to data on which ecc is computed
1088 * @ecc_code: The ecc_code buffer
1089 */
1090static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
1091 u_char *ecc_code)
1092{
1093 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1094 mtd);
1095 return gpmc_calculate_ecc_bch8(info->gpmc_cs, dat, ecc_code);
1096}
1097
1098/**
1099 * omap3_correct_data_bch - Decode received data and correct errors
1100 * @mtd: MTD device structure
1101 * @data: page data
1102 * @read_ecc: ecc read from nand flash
1103 * @calc_ecc: ecc read from HW ECC registers
1104 */
1105static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
1106 u_char *read_ecc, u_char *calc_ecc)
1107{
1108 int i, count;
1109 /* cannot correct more than 8 errors */
1110 unsigned int errloc[8];
1111 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1112 mtd);
1113
1114 count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
1115 errloc);
1116 if (count > 0) {
1117 /* correct errors */
1118 for (i = 0; i < count; i++) {
1119 /* correct data only, not ecc bytes */
1120 if (errloc[i] < 8*512)
1121 data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
1122 pr_debug("corrected bitflip %u\n", errloc[i]);
1123 }
1124 } else if (count < 0) {
1125 pr_err("ecc unrecoverable error\n");
1126 }
1127 return count;
1128}
1129
1130/**
1131 * omap3_free_bch - Release BCH ecc resources
1132 * @mtd: MTD device structure
1133 */
1134static void omap3_free_bch(struct mtd_info *mtd)
1135{
1136 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1137 mtd);
1138 if (info->bch) {
1139 free_bch(info->bch);
1140 info->bch = NULL;
1141 }
1142}
1143
1144/**
1145 * omap3_init_bch - Initialize BCH ECC
1146 * @mtd: MTD device structure
1147 * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW)
1148 */
1149static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
1150{
1151 int ret, max_errors;
1152 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1153 mtd);
1154#ifdef CONFIG_MTD_NAND_OMAP_BCH8
1155 const int hw_errors = 8;
1156#else
1157 const int hw_errors = 4;
1158#endif
1159 info->bch = NULL;
1160
1161 max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4;
1162 if (max_errors != hw_errors) {
1163 pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
1164 max_errors, hw_errors);
1165 goto fail;
1166 }
1167
1168 /* initialize GPMC BCH engine */
1169 ret = gpmc_init_hwecc_bch(info->gpmc_cs, 1, max_errors);
1170 if (ret)
1171 goto fail;
1172
1173 /* software bch library is only used to detect and locate errors */
1174 info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
1175 if (!info->bch)
1176 goto fail;
1177
1178 info->nand.ecc.size = 512;
1179 info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
1180 info->nand.ecc.correct = omap3_correct_data_bch;
1181 info->nand.ecc.mode = NAND_ECC_HW;
1182
1183 /*
1184 * The number of corrected errors in an ecc block that will trigger
1185 * block scrubbing defaults to the ecc strength (4 or 8).
1186 * Set mtd->bitflip_threshold here to define a custom threshold.
1187 */
1188
1189 if (max_errors == 8) {
1190 info->nand.ecc.strength = 8;
1191 info->nand.ecc.bytes = 13;
1192 info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
1193 } else {
1194 info->nand.ecc.strength = 4;
1195 info->nand.ecc.bytes = 7;
1196 info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
1197 }
1198
1199 pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
1200 return 0;
1201fail:
1202 omap3_free_bch(mtd);
1203 return -1;
1204}
1205
1206/**
1207 * omap3_init_bch_tail - Build an oob layout for BCH ECC correction.
1208 * @mtd: MTD device structure
1209 */
1210static int omap3_init_bch_tail(struct mtd_info *mtd)
1211{
1212 int i, steps;
1213 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1214 mtd);
1215 struct nand_ecclayout *layout = &info->ecclayout;
1216
1217 /* build oob layout */
1218 steps = mtd->writesize/info->nand.ecc.size;
1219 layout->eccbytes = steps*info->nand.ecc.bytes;
1220
1221 /* do not bother creating special oob layouts for small page devices */
1222 if (mtd->oobsize < 64) {
1223 pr_err("BCH ecc is not supported on small page devices\n");
1224 goto fail;
1225 }
1226
1227 /* reserve 2 bytes for bad block marker */
1228 if (layout->eccbytes+2 > mtd->oobsize) {
1229 pr_err("no oob layout available for oobsize %d eccbytes %u\n",
1230 mtd->oobsize, layout->eccbytes);
1231 goto fail;
1232 }
1233
1234 /* put ecc bytes at oob tail */
1235 for (i = 0; i < layout->eccbytes; i++)
1236 layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
1237
1238 layout->oobfree[0].offset = 2;
1239 layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
1240 info->nand.ecc.layout = layout;
1241
1242 if (!(info->nand.options & NAND_BUSWIDTH_16))
1243 info->nand.badblock_pattern = &bb_descrip_flashbased;
1244 return 0;
1245fail:
1246 omap3_free_bch(mtd);
1247 return -1;
1248}
1249
1250#else
1251static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
1252{
1253 pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
1254 return -1;
1255}
1256static int omap3_init_bch_tail(struct mtd_info *mtd)
1257{
1258 return -1;
1259}
1260static void omap3_free_bch(struct mtd_info *mtd)
1261{
1262}
1263#endif /* CONFIG_MTD_NAND_OMAP_BCH */
1264
Vimal Singh67ce04b2009-05-12 13:47:03 -07001265static int __devinit omap_nand_probe(struct platform_device *pdev)
1266{
1267 struct omap_nand_info *info;
1268 struct omap_nand_platform_data *pdata;
1269 int err;
Sukumar Ghoraif040d332011-01-28 15:42:09 +05301270 int i, offset;
Russell King763e7352012-04-25 00:16:00 +01001271 dma_cap_mask_t mask;
1272 unsigned sig;
Vimal Singh67ce04b2009-05-12 13:47:03 -07001273
1274 pdata = pdev->dev.platform_data;
1275 if (pdata == NULL) {
1276 dev_err(&pdev->dev, "platform data missing\n");
1277 return -ENODEV;
1278 }
1279
1280 info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
1281 if (!info)
1282 return -ENOMEM;
1283
1284 platform_set_drvdata(pdev, info);
1285
1286 spin_lock_init(&info->controller.lock);
1287 init_waitqueue_head(&info->controller.wq);
1288
1289 info->pdev = pdev;
1290
1291 info->gpmc_cs = pdata->cs;
Vimal Singh2f70a1e2010-02-15 10:03:33 -08001292 info->phys_base = pdata->phys_base;
Afzal Mohammed65b97cf2012-08-30 12:53:22 -07001293 info->reg = pdata->reg;
Vimal Singh67ce04b2009-05-12 13:47:03 -07001294
1295 info->mtd.priv = &info->nand;
1296 info->mtd.name = dev_name(&pdev->dev);
1297 info->mtd.owner = THIS_MODULE;
1298
Sukumar Ghoraid5ce2b62011-01-28 15:42:03 +05301299 info->nand.options = pdata->devsize;
Vimal Singh2f70a1e2010-02-15 10:03:33 -08001300 info->nand.options |= NAND_SKIP_BBTSCAN;
Vimal Singh67ce04b2009-05-12 13:47:03 -07001301
1302 /* NAND write protect off */
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +00001303 gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
Vimal Singh67ce04b2009-05-12 13:47:03 -07001304
1305 if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
1306 pdev->dev.driver->name)) {
1307 err = -EBUSY;
Vimal Singh2f70a1e2010-02-15 10:03:33 -08001308 goto out_free_info;
Vimal Singh67ce04b2009-05-12 13:47:03 -07001309 }
1310
1311 info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
1312 if (!info->nand.IO_ADDR_R) {
1313 err = -ENOMEM;
1314 goto out_release_mem_region;
1315 }
vimal singh59e9c5a2009-07-13 16:26:24 +05301316
Vimal Singh67ce04b2009-05-12 13:47:03 -07001317 info->nand.controller = &info->controller;
1318
1319 info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
1320 info->nand.cmd_ctrl = omap_hwcontrol;
1321
Vimal Singh67ce04b2009-05-12 13:47:03 -07001322 /*
1323 * If RDY/BSY line is connected to OMAP then use the omap ready
1324 * funcrtion and the generic nand_wait function which reads the status
1325 * register after monitoring the RDY/BSY line.Otherwise use a standard
1326 * chip delay which is slightly more than tR (AC Timing) of the NAND
1327 * device and read status register until you get a failure or success
1328 */
1329 if (pdata->dev_ready) {
1330 info->nand.dev_ready = omap_dev_ready;
1331 info->nand.chip_delay = 0;
1332 } else {
1333 info->nand.waitfunc = omap_wait;
1334 info->nand.chip_delay = 50;
1335 }
1336
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +05301337 switch (pdata->xfer_type) {
1338 case NAND_OMAP_PREFETCH_POLLED:
vimal singh59e9c5a2009-07-13 16:26:24 +05301339 info->nand.read_buf = omap_read_buf_pref;
1340 info->nand.write_buf = omap_write_buf_pref;
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +05301341 break;
vimal singhdfe32892009-07-13 16:29:16 +05301342
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +05301343 case NAND_OMAP_POLLED:
vimal singh59e9c5a2009-07-13 16:26:24 +05301344 if (info->nand.options & NAND_BUSWIDTH_16) {
1345 info->nand.read_buf = omap_read_buf16;
1346 info->nand.write_buf = omap_write_buf16;
1347 } else {
1348 info->nand.read_buf = omap_read_buf8;
1349 info->nand.write_buf = omap_write_buf8;
1350 }
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +05301351 break;
1352
1353 case NAND_OMAP_PREFETCH_DMA:
Russell King763e7352012-04-25 00:16:00 +01001354 dma_cap_zero(mask);
1355 dma_cap_set(DMA_SLAVE, mask);
1356 sig = OMAP24XX_DMA_GPMC;
1357 info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
1358 if (!info->dma) {
Russell King2df41d02012-04-25 00:19:39 +01001359 dev_err(&pdev->dev, "DMA engine request failed\n");
1360 err = -ENXIO;
1361 goto out_release_mem_region;
Russell King763e7352012-04-25 00:16:00 +01001362 } else {
1363 struct dma_slave_config cfg;
Russell King763e7352012-04-25 00:16:00 +01001364
1365 memset(&cfg, 0, sizeof(cfg));
1366 cfg.src_addr = info->phys_base;
1367 cfg.dst_addr = info->phys_base;
1368 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1369 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1370 cfg.src_maxburst = 16;
1371 cfg.dst_maxburst = 16;
Arnd Bergmannd680e2c2012-08-04 11:05:25 +00001372 err = dmaengine_slave_config(info->dma, &cfg);
1373 if (err) {
Russell King763e7352012-04-25 00:16:00 +01001374 dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
Arnd Bergmannd680e2c2012-08-04 11:05:25 +00001375 err);
Russell King763e7352012-04-25 00:16:00 +01001376 goto out_release_mem_region;
1377 }
1378 info->nand.read_buf = omap_read_buf_dma_pref;
1379 info->nand.write_buf = omap_write_buf_dma_pref;
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +05301380 }
1381 break;
1382
Sukumar Ghorai4e070372011-01-28 15:42:06 +05301383 case NAND_OMAP_PREFETCH_IRQ:
1384 err = request_irq(pdata->gpmc_irq,
1385 omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
1386 if (err) {
1387 dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1388 pdata->gpmc_irq, err);
1389 goto out_release_mem_region;
1390 } else {
1391 info->gpmc_irq = pdata->gpmc_irq;
1392 info->nand.read_buf = omap_read_buf_irq_pref;
1393 info->nand.write_buf = omap_write_buf_irq_pref;
1394 }
1395 break;
1396
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +05301397 default:
1398 dev_err(&pdev->dev,
1399 "xfer_type(%d) not supported!\n", pdata->xfer_type);
1400 err = -EINVAL;
1401 goto out_release_mem_region;
vimal singh59e9c5a2009-07-13 16:26:24 +05301402 }
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +05301403
vimal singh59e9c5a2009-07-13 16:26:24 +05301404 info->nand.verify_buf = omap_verify_buf;
1405
Sukumar Ghoraif3d73f32011-01-28 15:42:08 +05301406 /* selsect the ecc type */
1407 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT)
1408 info->nand.ecc.mode = NAND_ECC_SOFT;
Sukumar Ghoraif040d332011-01-28 15:42:09 +05301409 else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) ||
1410 (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) {
Sukumar Ghoraif3d73f32011-01-28 15:42:08 +05301411 info->nand.ecc.bytes = 3;
1412 info->nand.ecc.size = 512;
Mike Dunn6a918ba2012-03-11 14:21:11 -07001413 info->nand.ecc.strength = 1;
Sukumar Ghoraif3d73f32011-01-28 15:42:08 +05301414 info->nand.ecc.calculate = omap_calculate_ecc;
1415 info->nand.ecc.hwctl = omap_enable_hwecc;
1416 info->nand.ecc.correct = omap_correct_data;
1417 info->nand.ecc.mode = NAND_ECC_HW;
Ivan Djelic0e618ef2012-04-30 12:17:18 +02001418 } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
1419 (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
1420 err = omap3_init_bch(&info->mtd, pdata->ecc_opt);
1421 if (err) {
1422 err = -EINVAL;
1423 goto out_release_mem_region;
1424 }
Sukumar Ghoraif3d73f32011-01-28 15:42:08 +05301425 }
Vimal Singh67ce04b2009-05-12 13:47:03 -07001426
1427 /* DIP switches on some boards change between 8 and 16 bit
1428 * bus widths for flash. Try the other width if the first try fails.
1429 */
Jan Weitzela80f1c12011-04-19 16:15:34 +02001430 if (nand_scan_ident(&info->mtd, 1, NULL)) {
Vimal Singh67ce04b2009-05-12 13:47:03 -07001431 info->nand.options ^= NAND_BUSWIDTH_16;
Jan Weitzela80f1c12011-04-19 16:15:34 +02001432 if (nand_scan_ident(&info->mtd, 1, NULL)) {
Vimal Singh67ce04b2009-05-12 13:47:03 -07001433 err = -ENXIO;
1434 goto out_release_mem_region;
1435 }
1436 }
1437
Sukumar Ghoraif040d332011-01-28 15:42:09 +05301438 /* rom code layout */
1439 if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) {
1440
1441 if (info->nand.options & NAND_BUSWIDTH_16)
1442 offset = 2;
1443 else {
1444 offset = 1;
1445 info->nand.badblock_pattern = &bb_descrip_flashbased;
1446 }
1447 omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16);
1448 for (i = 0; i < omap_oobinfo.eccbytes; i++)
1449 omap_oobinfo.eccpos[i] = i+offset;
1450
1451 omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes;
1452 omap_oobinfo.oobfree->length = info->mtd.oobsize -
1453 (offset + omap_oobinfo.eccbytes);
1454
1455 info->nand.ecc.layout = &omap_oobinfo;
Ivan Djelic0e618ef2012-04-30 12:17:18 +02001456 } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
1457 (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
1458 /* build OOB layout for BCH ECC correction */
1459 err = omap3_init_bch_tail(&info->mtd);
1460 if (err) {
1461 err = -EINVAL;
1462 goto out_release_mem_region;
1463 }
Sukumar Ghoraif040d332011-01-28 15:42:09 +05301464 }
Sukumar Ghorai1b0b323c2011-01-28 15:42:04 +05301465
Jan Weitzela80f1c12011-04-19 16:15:34 +02001466 /* second phase scan */
1467 if (nand_scan_tail(&info->mtd)) {
1468 err = -ENXIO;
1469 goto out_release_mem_region;
1470 }
1471
Artem Bityutskiy42d7fbe2012-03-09 19:24:26 +02001472 mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts,
1473 pdata->nr_parts);
Vimal Singh67ce04b2009-05-12 13:47:03 -07001474
1475 platform_set_drvdata(pdev, &info->mtd);
1476
1477 return 0;
1478
1479out_release_mem_region:
Russell King763e7352012-04-25 00:16:00 +01001480 if (info->dma)
1481 dma_release_channel(info->dma);
Vimal Singh67ce04b2009-05-12 13:47:03 -07001482 release_mem_region(info->phys_base, NAND_IO_SIZE);
Vimal Singh67ce04b2009-05-12 13:47:03 -07001483out_free_info:
1484 kfree(info);
1485
1486 return err;
1487}
1488
1489static int omap_nand_remove(struct platform_device *pdev)
1490{
1491 struct mtd_info *mtd = platform_get_drvdata(pdev);
Vimal Singhf35b6ed2010-01-05 16:01:08 +05301492 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1493 mtd);
Ivan Djelic0e618ef2012-04-30 12:17:18 +02001494 omap3_free_bch(&info->mtd);
Vimal Singh67ce04b2009-05-12 13:47:03 -07001495
1496 platform_set_drvdata(pdev, NULL);
Russell King763e7352012-04-25 00:16:00 +01001497 if (info->dma)
1498 dma_release_channel(info->dma);
1499
Sukumar Ghorai4e070372011-01-28 15:42:06 +05301500 if (info->gpmc_irq)
1501 free_irq(info->gpmc_irq, info);
1502
Vimal Singh67ce04b2009-05-12 13:47:03 -07001503 /* Release NAND device, its internal structures and partitions */
1504 nand_release(&info->mtd);
Sukumar Ghorai2c01946c2010-07-09 09:14:45 +00001505 iounmap(info->nand.IO_ADDR_R);
Vimal Singh67ce04b2009-05-12 13:47:03 -07001506 kfree(&info->mtd);
1507 return 0;
1508}
1509
1510static struct platform_driver omap_nand_driver = {
1511 .probe = omap_nand_probe,
1512 .remove = omap_nand_remove,
1513 .driver = {
1514 .name = DRIVER_NAME,
1515 .owner = THIS_MODULE,
1516 },
1517};
1518
Axel Linf99640d2011-11-27 20:45:03 +08001519module_platform_driver(omap_nand_driver);
Vimal Singh67ce04b2009-05-12 13:47:03 -07001520
Axel Linc804c732011-03-07 11:04:24 +08001521MODULE_ALIAS("platform:" DRIVER_NAME);
Vimal Singh67ce04b2009-05-12 13:47:03 -07001522MODULE_LICENSE("GPL");
1523MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");