blob: 7b26e53b95b1188c96dd348fa4efcd0dc575679f [file] [log] [blame]
David Brownellff4569c2009-03-04 12:01:37 -08001/*
2 * davinci_nand.c - NAND Flash Driver for DaVinci family chips
3 *
4 * Copyright © 2006 Texas Instruments.
5 *
6 * Port to 2.6.23 Copyright © 2008 by:
7 * Sander Huijsen <Shuijsen@optelecom-nkf.com>
8 * Troy Kisky <troy.kisky@boundarydevices.com>
9 * Dirk Behme <Dirk.Behme@gmail.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/kernel.h>
David Brownellff4569c2009-03-04 12:01:37 -080027#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/err.h>
30#include <linux/clk.h>
31#include <linux/io.h>
32#include <linux/mtd/nand.h>
33#include <linux/mtd/partitions.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Heiko Schochercdeadd72012-07-30 09:22:24 +020035#include <linux/of_device.h>
Sachin Kamatc4f8cde2013-03-14 15:37:01 +053036#include <linux/of.h>
David Brownellff4569c2009-03-04 12:01:37 -080037
Arnd Bergmannec2a0832012-08-24 15:11:34 +020038#include <linux/platform_data/mtd-davinci.h>
39#include <linux/platform_data/mtd-davinci-aemif.h>
David Brownellff4569c2009-03-04 12:01:37 -080040
David Brownellff4569c2009-03-04 12:01:37 -080041/*
42 * This is a device driver for the NAND flash controller found on the
43 * various DaVinci family chips. It handles up to four SoC chipselects,
44 * and some flavors of secondary chipselect (e.g. based on A12) as used
45 * with multichip packages.
46 *
David Brownell6a4123e2009-04-21 19:58:13 -070047 * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
David Brownellff4569c2009-03-04 12:01:37 -080048 * available on chips like the DM355 and OMAP-L137 and needed with the
49 * more error-prone MLC NAND chips.
50 *
51 * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
52 * outputs in a "wire-AND" configuration, with no per-chip signals.
53 */
54struct davinci_nand_info {
David Brownellff4569c2009-03-04 12:01:37 -080055 struct nand_chip chip;
56
57 struct device *dev;
58 struct clk *clk;
David Brownellff4569c2009-03-04 12:01:37 -080059
David Brownell6a4123e2009-04-21 19:58:13 -070060 bool is_readmode;
61
David Brownellff4569c2009-03-04 12:01:37 -080062 void __iomem *base;
63 void __iomem *vaddr;
64
65 uint32_t ioaddr;
66 uint32_t current_cs;
67
68 uint32_t mask_chipsel;
69 uint32_t mask_ale;
70 uint32_t mask_cle;
71
72 uint32_t core_chipsel;
Sekhar Noria88dbc52010-08-09 15:46:36 +053073
74 struct davinci_aemif_timing *timing;
David Brownellff4569c2009-03-04 12:01:37 -080075};
76
77static DEFINE_SPINLOCK(davinci_nand_lock);
David Brownell6a4123e2009-04-21 19:58:13 -070078static bool ecc4_busy;
David Brownellff4569c2009-03-04 12:01:37 -080079
Boris BREZILLONa5cfb4d2015-12-10 08:59:58 +010080static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
81{
82 return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
83}
David Brownellff4569c2009-03-04 12:01:37 -080084
85static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
86 int offset)
87{
88 return __raw_readl(info->base + offset);
89}
90
91static inline void davinci_nand_writel(struct davinci_nand_info *info,
92 int offset, unsigned long value)
93{
94 __raw_writel(value, info->base + offset);
95}
96
97/*----------------------------------------------------------------------*/
98
99/*
100 * Access to hardware control lines: ALE, CLE, secondary chipselect.
101 */
102
103static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
104 unsigned int ctrl)
105{
106 struct davinci_nand_info *info = to_davinci_nand(mtd);
107 uint32_t addr = info->current_cs;
Boris BREZILLON4bd4ebc2015-12-01 12:03:04 +0100108 struct nand_chip *nand = mtd_to_nand(mtd);
David Brownellff4569c2009-03-04 12:01:37 -0800109
110 /* Did the control lines change? */
111 if (ctrl & NAND_CTRL_CHANGE) {
112 if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
113 addr |= info->mask_cle;
114 else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
115 addr |= info->mask_ale;
116
117 nand->IO_ADDR_W = (void __iomem __force *)addr;
118 }
119
120 if (cmd != NAND_CMD_NONE)
121 iowrite8(cmd, nand->IO_ADDR_W);
122}
123
124static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
125{
126 struct davinci_nand_info *info = to_davinci_nand(mtd);
127 uint32_t addr = info->ioaddr;
128
129 /* maybe kick in a second chipselect */
130 if (chip > 0)
131 addr |= info->mask_chipsel;
132 info->current_cs = addr;
133
134 info->chip.IO_ADDR_W = (void __iomem __force *)addr;
135 info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
136}
137
138/*----------------------------------------------------------------------*/
139
140/*
141 * 1-bit hardware ECC ... context maintained for each core chipselect
142 */
143
144static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
145{
146 struct davinci_nand_info *info = to_davinci_nand(mtd);
147
148 return davinci_nand_readl(info, NANDF1ECC_OFFSET
149 + 4 * info->core_chipsel);
150}
151
152static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
153{
154 struct davinci_nand_info *info;
155 uint32_t nandcfr;
156 unsigned long flags;
157
158 info = to_davinci_nand(mtd);
159
160 /* Reset ECC hardware */
161 nand_davinci_readecc_1bit(mtd);
162
163 spin_lock_irqsave(&davinci_nand_lock, flags);
164
165 /* Restart ECC hardware */
166 nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
167 nandcfr |= BIT(8 + info->core_chipsel);
168 davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
169
170 spin_unlock_irqrestore(&davinci_nand_lock, flags);
171}
172
173/*
174 * Read hardware ECC value and pack into three bytes
175 */
176static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
177 const u_char *dat, u_char *ecc_code)
178{
179 unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
180 unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
181
182 /* invert so that erased block ecc is correct */
183 ecc24 = ~ecc24;
184 ecc_code[0] = (u_char)(ecc24);
185 ecc_code[1] = (u_char)(ecc24 >> 8);
186 ecc_code[2] = (u_char)(ecc24 >> 16);
187
188 return 0;
189}
190
191static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
192 u_char *read_ecc, u_char *calc_ecc)
193{
Boris BREZILLON4bd4ebc2015-12-01 12:03:04 +0100194 struct nand_chip *chip = mtd_to_nand(mtd);
David Brownellff4569c2009-03-04 12:01:37 -0800195 uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
196 (read_ecc[2] << 16);
197 uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
198 (calc_ecc[2] << 16);
199 uint32_t diff = eccCalc ^ eccNand;
200
201 if (diff) {
202 if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
203 /* Correctable error */
204 if ((diff >> (12 + 3)) < chip->ecc.size) {
205 dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
206 return 1;
207 } else {
Boris BREZILLON6e941192015-12-30 20:32:03 +0100208 return -EBADMSG;
David Brownellff4569c2009-03-04 12:01:37 -0800209 }
210 } else if (!(diff & (diff - 1))) {
211 /* Single bit ECC error in the ECC itself,
212 * nothing to fix */
213 return 1;
214 } else {
215 /* Uncorrectable error */
Boris BREZILLON6e941192015-12-30 20:32:03 +0100216 return -EBADMSG;
David Brownellff4569c2009-03-04 12:01:37 -0800217 }
218
219 }
220 return 0;
221}
222
223/*----------------------------------------------------------------------*/
224
225/*
David Brownell6a4123e2009-04-21 19:58:13 -0700226 * 4-bit hardware ECC ... context maintained over entire AEMIF
227 *
228 * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
229 * since that forces use of a problematic "infix OOB" layout.
230 * Among other things, it trashes manufacturer bad block markers.
231 * Also, and specific to this hardware, it ECC-protects the "prepad"
232 * in the OOB ... while having ECC protection for parts of OOB would
233 * seem useful, the current MTD stack sometimes wants to update the
234 * OOB without recomputing ECC.
235 */
236
237static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
238{
239 struct davinci_nand_info *info = to_davinci_nand(mtd);
240 unsigned long flags;
241 u32 val;
242
Karl Beldanf6d7c1b2016-08-29 07:45:49 +0000243 /* Reset ECC hardware */
244 davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
245
David Brownell6a4123e2009-04-21 19:58:13 -0700246 spin_lock_irqsave(&davinci_nand_lock, flags);
247
248 /* Start 4-bit ECC calculation for read/write */
249 val = davinci_nand_readl(info, NANDFCR_OFFSET);
250 val &= ~(0x03 << 4);
251 val |= (info->core_chipsel << 4) | BIT(12);
252 davinci_nand_writel(info, NANDFCR_OFFSET, val);
253
254 info->is_readmode = (mode == NAND_ECC_READ);
255
256 spin_unlock_irqrestore(&davinci_nand_lock, flags);
257}
258
259/* Read raw ECC code after writing to NAND. */
260static void
261nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
262{
263 const u32 mask = 0x03ff03ff;
264
265 code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
266 code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
267 code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
268 code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
269}
270
271/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
272static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
273 const u_char *dat, u_char *ecc_code)
274{
275 struct davinci_nand_info *info = to_davinci_nand(mtd);
276 u32 raw_ecc[4], *p;
277 unsigned i;
278
279 /* After a read, terminate ECC calculation by a dummy read
280 * of some 4-bit ECC register. ECC covers everything that
281 * was read; correct() just uses the hardware state, so
282 * ecc_code is not needed.
283 */
284 if (info->is_readmode) {
285 davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
286 return 0;
287 }
288
289 /* Pack eight raw 10-bit ecc values into ten bytes, making
290 * two passes which each convert four values (in upper and
291 * lower halves of two 32-bit words) into five bytes. The
292 * ROM boot loader uses this same packing scheme.
293 */
294 nand_davinci_readecc_4bit(info, raw_ecc);
295 for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
296 *ecc_code++ = p[0] & 0xff;
297 *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
298 *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
299 *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
300 *ecc_code++ = (p[1] >> 18) & 0xff;
301 }
302
303 return 0;
304}
305
306/* Correct up to 4 bits in data we just read, using state left in the
307 * hardware plus the ecc_code computed when it was first written.
308 */
309static int nand_davinci_correct_4bit(struct mtd_info *mtd,
310 u_char *data, u_char *ecc_code, u_char *null)
311{
312 int i;
313 struct davinci_nand_info *info = to_davinci_nand(mtd);
314 unsigned short ecc10[8];
315 unsigned short *ecc16;
316 u32 syndrome[4];
Sudhakar Rajashekhara1c3275b2010-07-20 15:24:01 -0700317 u32 ecc_state;
David Brownell6a4123e2009-04-21 19:58:13 -0700318 unsigned num_errors, corrected;
Wolfram Sang2bdb0532010-09-03 12:35:37 +0200319 unsigned long timeo;
David Brownell6a4123e2009-04-21 19:58:13 -0700320
David Brownell6a4123e2009-04-21 19:58:13 -0700321 /* Unpack ten bytes into eight 10 bit values. We know we're
322 * little-endian, and use type punning for less shifting/masking.
323 */
324 if (WARN_ON(0x01 & (unsigned) ecc_code))
325 return -EINVAL;
326 ecc16 = (unsigned short *)ecc_code;
327
328 ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
329 ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
330 ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
331 ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
332 ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
333 ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
334 ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
335 ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
336
337 /* Tell ECC controller about the expected ECC codes. */
338 for (i = 7; i >= 0; i--)
339 davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
340
341 /* Allow time for syndrome calculation ... then read it.
342 * A syndrome of all zeroes 0 means no detected errors.
343 */
344 davinci_nand_readl(info, NANDFSR_OFFSET);
345 nand_davinci_readecc_4bit(info, syndrome);
346 if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
347 return 0;
348
Sneha Narnakajef12a9472009-09-18 12:51:48 -0700349 /*
350 * Clear any previous address calculation by doing a dummy read of an
351 * error address register.
352 */
353 davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
354
David Brownell6a4123e2009-04-21 19:58:13 -0700355 /* Start address calculation, and wait for it to complete.
356 * We _could_ start reading more data while this is working,
357 * to speed up the overall page read.
358 */
359 davinci_nand_writel(info, NANDFCR_OFFSET,
360 davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
Sudhakar Rajashekhara1c3275b2010-07-20 15:24:01 -0700361
362 /*
363 * ECC_STATE field reads 0x3 (Error correction complete) immediately
364 * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
365 * begin trying to poll for the state, you may fall right out of your
366 * loop without any of the correction calculations having taken place.
Wolfram Sangeea116e2010-08-25 14:18:20 +0200367 * The recommendation from the hardware team is to initially delay as
368 * long as ECC_STATE reads less than 4. After that, ECC HW has entered
369 * correction state.
Sudhakar Rajashekhara1c3275b2010-07-20 15:24:01 -0700370 */
Wolfram Sang2bdb0532010-09-03 12:35:37 +0200371 timeo = jiffies + usecs_to_jiffies(100);
Sudhakar Rajashekhara1c3275b2010-07-20 15:24:01 -0700372 do {
373 ecc_state = (davinci_nand_readl(info,
374 NANDFSR_OFFSET) >> 8) & 0x0f;
375 cpu_relax();
376 } while ((ecc_state < 4) && time_before(jiffies, timeo));
377
David Brownell6a4123e2009-04-21 19:58:13 -0700378 for (;;) {
379 u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
380
381 switch ((fsr >> 8) & 0x0f) {
382 case 0: /* no error, should not happen */
Sneha Narnakajef12a9472009-09-18 12:51:48 -0700383 davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
David Brownell6a4123e2009-04-21 19:58:13 -0700384 return 0;
385 case 1: /* five or more errors detected */
Sneha Narnakajef12a9472009-09-18 12:51:48 -0700386 davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
Boris BREZILLON6e941192015-12-30 20:32:03 +0100387 return -EBADMSG;
David Brownell6a4123e2009-04-21 19:58:13 -0700388 case 2: /* error addresses computed */
389 case 3:
390 num_errors = 1 + ((fsr >> 16) & 0x03);
391 goto correct;
392 default: /* still working on it */
393 cpu_relax();
394 continue;
395 }
396 }
397
398correct:
399 /* correct each error */
400 for (i = 0, corrected = 0; i < num_errors; i++) {
401 int error_address, error_value;
402
403 if (i > 1) {
404 error_address = davinci_nand_readl(info,
405 NAND_ERR_ADD2_OFFSET);
406 error_value = davinci_nand_readl(info,
407 NAND_ERR_ERRVAL2_OFFSET);
408 } else {
409 error_address = davinci_nand_readl(info,
410 NAND_ERR_ADD1_OFFSET);
411 error_value = davinci_nand_readl(info,
412 NAND_ERR_ERRVAL1_OFFSET);
413 }
414
415 if (i & 1) {
416 error_address >>= 16;
417 error_value >>= 16;
418 }
419 error_address &= 0x3ff;
420 error_address = (512 + 7) - error_address;
421
422 if (error_address < 512) {
423 data[error_address] ^= error_value;
424 corrected++;
425 }
426 }
427
428 return corrected;
429}
430
431/*----------------------------------------------------------------------*/
432
433/*
David Brownellff4569c2009-03-04 12:01:37 -0800434 * NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
435 * how these chips are normally wired. This translates to both 8 and 16
436 * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
437 *
438 * For now we assume that configuration, or any other one which ignores
439 * the two LSBs for NAND access ... so we can issue 32-bit reads/writes
440 * and have that transparently morphed into multiple NAND operations.
441 */
442static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
443{
Boris BREZILLON4bd4ebc2015-12-01 12:03:04 +0100444 struct nand_chip *chip = mtd_to_nand(mtd);
David Brownellff4569c2009-03-04 12:01:37 -0800445
446 if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
447 ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
448 else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
449 ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
450 else
451 ioread8_rep(chip->IO_ADDR_R, buf, len);
452}
453
454static void nand_davinci_write_buf(struct mtd_info *mtd,
455 const uint8_t *buf, int len)
456{
Boris BREZILLON4bd4ebc2015-12-01 12:03:04 +0100457 struct nand_chip *chip = mtd_to_nand(mtd);
David Brownellff4569c2009-03-04 12:01:37 -0800458
459 if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
460 iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
461 else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
462 iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
463 else
464 iowrite8_rep(chip->IO_ADDR_R, buf, len);
465}
466
467/*
468 * Check hardware register for wait status. Returns 1 if device is ready,
469 * 0 if it is still busy.
470 */
471static int nand_davinci_dev_ready(struct mtd_info *mtd)
472{
473 struct davinci_nand_info *info = to_davinci_nand(mtd);
474
475 return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
476}
477
David Brownellff4569c2009-03-04 12:01:37 -0800478/*----------------------------------------------------------------------*/
479
David Brownell6a4123e2009-04-21 19:58:13 -0700480/* An ECC layout for using 4-bit ECC with small-page flash, storing
481 * ten ECC bytes plus the manufacturer's bad block marker byte, and
482 * and not overlapping the default BBT markers.
483 */
Boris Brezillone4aacaa2016-02-03 19:59:58 +0100484static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
485 struct mtd_oob_region *oobregion)
486{
487 if (section > 2)
488 return -ERANGE;
David Brownell6a4123e2009-04-21 19:58:13 -0700489
Boris Brezillone4aacaa2016-02-03 19:59:58 +0100490 if (!section) {
491 oobregion->offset = 0;
492 oobregion->length = 5;
493 } else if (section == 1) {
494 oobregion->offset = 6;
495 oobregion->length = 2;
496 } else {
497 oobregion->offset = 13;
498 oobregion->length = 3;
499 }
David Brownell6a4123e2009-04-21 19:58:13 -0700500
Boris Brezillone4aacaa2016-02-03 19:59:58 +0100501 return 0;
502}
503
504static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
505 struct mtd_oob_region *oobregion)
506{
507 if (section > 1)
508 return -ERANGE;
509
510 if (!section) {
511 oobregion->offset = 8;
512 oobregion->length = 5;
513 } else {
514 oobregion->offset = 16;
515 oobregion->length = mtd->oobsize - 16;
516 }
517
518 return 0;
519}
520
521static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
522 .ecc = hwecc4_ooblayout_small_ecc,
523 .free = hwecc4_ooblayout_small_free,
Sandeep Paulraja11244c2014-08-19 15:31:54 +0300524};
525
Heiko Schochercdeadd72012-07-30 09:22:24 +0200526#if defined(CONFIG_OF)
527static const struct of_device_id davinci_nand_of_match[] = {
528 {.compatible = "ti,davinci-nand", },
Murali Karicheri28c015a2014-03-20 22:08:32 +0200529 {.compatible = "ti,keystone-nand", },
Heiko Schochercdeadd72012-07-30 09:22:24 +0200530 {},
Sergei Shtylyov13daa222013-01-03 21:27:34 +0300531};
Heiko Schochercdeadd72012-07-30 09:22:24 +0200532MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
533
534static struct davinci_nand_pdata
535 *nand_davinci_get_pdata(struct platform_device *pdev)
536{
Jingoo Han453810b2013-07-30 17:18:33 +0900537 if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
Heiko Schochercdeadd72012-07-30 09:22:24 +0200538 struct davinci_nand_pdata *pdata;
539 const char *mode;
540 u32 prop;
Heiko Schochercdeadd72012-07-30 09:22:24 +0200541
542 pdata = devm_kzalloc(&pdev->dev,
543 sizeof(struct davinci_nand_pdata),
544 GFP_KERNEL);
545 pdev->dev.platform_data = pdata;
546 if (!pdata)
Ivan Khoronzhukf735a4d2013-12-17 15:36:05 +0200547 return ERR_PTR(-ENOMEM);
Heiko Schochercdeadd72012-07-30 09:22:24 +0200548 if (!of_property_read_u32(pdev->dev.of_node,
549 "ti,davinci-chipselect", &prop))
550 pdev->id = prop;
Ivan Khoronzhuk05103822013-12-17 15:36:44 +0200551 else
552 return ERR_PTR(-EINVAL);
553
Heiko Schochercdeadd72012-07-30 09:22:24 +0200554 if (!of_property_read_u32(pdev->dev.of_node,
555 "ti,davinci-mask-ale", &prop))
556 pdata->mask_ale = prop;
557 if (!of_property_read_u32(pdev->dev.of_node,
558 "ti,davinci-mask-cle", &prop))
559 pdata->mask_cle = prop;
560 if (!of_property_read_u32(pdev->dev.of_node,
561 "ti,davinci-mask-chipsel", &prop))
562 pdata->mask_chipsel = prop;
563 if (!of_property_read_string(pdev->dev.of_node,
564 "ti,davinci-ecc-mode", &mode)) {
565 if (!strncmp("none", mode, 4))
566 pdata->ecc_mode = NAND_ECC_NONE;
567 if (!strncmp("soft", mode, 4))
568 pdata->ecc_mode = NAND_ECC_SOFT;
569 if (!strncmp("hw", mode, 2))
570 pdata->ecc_mode = NAND_ECC_HW;
571 }
572 if (!of_property_read_u32(pdev->dev.of_node,
573 "ti,davinci-ecc-bits", &prop))
574 pdata->ecc_bits = prop;
Ivan Khoronzhuk75be1ea2013-12-17 15:37:56 +0200575
Boris Brezillon363b5db2016-04-01 14:54:25 +0200576 if (!of_property_read_u32(pdev->dev.of_node,
577 "ti,davinci-nand-buswidth", &prop) && prop == 16)
578 pdata->options |= NAND_BUSWIDTH_16;
579
Ivan Khoronzhuk75be1ea2013-12-17 15:37:56 +0200580 if (of_property_read_bool(pdev->dev.of_node,
Ivan Khoronzhuk75be1ea2013-12-17 15:37:56 +0200581 "ti,davinci-nand-use-bbt"))
Heiko Schochercdeadd72012-07-30 09:22:24 +0200582 pdata->bbt_options = NAND_BBT_USE_FLASH;
Murali Karicheri28c015a2014-03-20 22:08:32 +0200583
Sekhar Nori65a2c1c2017-03-30 20:09:30 +0530584 /*
585 * Since kernel v4.8, this driver has been fixed to enable
586 * use of 4-bit hardware ECC with subpages and verified on
587 * TI's keystone EVMs (K2L, K2HK and K2E).
588 * However, in the interest of not breaking systems using
589 * existing UBI partitions, sub-page writes are not being
590 * (re)enabled. If you want to use subpage writes on Keystone
591 * platforms (i.e. do not have any existing UBI partitions),
592 * then use "ti,davinci-nand" as the compatible in your
593 * device-tree file.
594 */
Murali Karicheri28c015a2014-03-20 22:08:32 +0200595 if (of_device_is_compatible(pdev->dev.of_node,
596 "ti,keystone-nand")) {
597 pdata->options |= NAND_NO_SUBPAGE_WRITE;
598 }
Heiko Schochercdeadd72012-07-30 09:22:24 +0200599 }
600
Jingoo Han453810b2013-07-30 17:18:33 +0900601 return dev_get_platdata(&pdev->dev);
Heiko Schochercdeadd72012-07-30 09:22:24 +0200602}
603#else
Heiko Schochercdeadd72012-07-30 09:22:24 +0200604static struct davinci_nand_pdata
605 *nand_davinci_get_pdata(struct platform_device *pdev)
606{
Jingoo Han453810b2013-07-30 17:18:33 +0900607 return dev_get_platdata(&pdev->dev);
Heiko Schochercdeadd72012-07-30 09:22:24 +0200608}
609#endif
610
Ivan Khoronzhukeaaa4a92013-12-17 15:33:50 +0200611static int nand_davinci_probe(struct platform_device *pdev)
David Brownellff4569c2009-03-04 12:01:37 -0800612{
Heiko Schochercdeadd72012-07-30 09:22:24 +0200613 struct davinci_nand_pdata *pdata;
David Brownellff4569c2009-03-04 12:01:37 -0800614 struct davinci_nand_info *info;
615 struct resource *res1;
616 struct resource *res2;
617 void __iomem *vaddr;
618 void __iomem *base;
619 int ret;
620 uint32_t val;
Boris BREZILLONa5cfb4d2015-12-10 08:59:58 +0100621 struct mtd_info *mtd;
David Brownellff4569c2009-03-04 12:01:37 -0800622
Heiko Schochercdeadd72012-07-30 09:22:24 +0200623 pdata = nand_davinci_get_pdata(pdev);
Ivan Khoronzhukf735a4d2013-12-17 15:36:05 +0200624 if (IS_ERR(pdata))
625 return PTR_ERR(pdata);
626
David Brownell533a0142009-04-21 19:51:31 -0700627 /* insist on board-specific configuration */
628 if (!pdata)
629 return -ENODEV;
630
David Brownellff4569c2009-03-04 12:01:37 -0800631 /* which external chipselect will we be managing? */
632 if (pdev->id < 0 || pdev->id > 3)
633 return -ENODEV;
634
Mrugesh Katepallewaref4e0c22013-02-07 16:03:15 +0530635 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
Jingoo Han00669232013-12-26 12:13:59 +0900636 if (!info)
Ivan Khoronzhuk30a39702013-12-17 15:37:00 +0200637 return -ENOMEM;
David Brownellff4569c2009-03-04 12:01:37 -0800638
639 platform_set_drvdata(pdev, info);
640
641 res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
642 res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
643 if (!res1 || !res2) {
644 dev_err(&pdev->dev, "resource missing\n");
Ivan Khoronzhuk30a39702013-12-17 15:37:00 +0200645 return -EINVAL;
David Brownellff4569c2009-03-04 12:01:37 -0800646 }
647
Laurent Navet59bff7f2013-05-02 15:56:10 +0200648 vaddr = devm_ioremap_resource(&pdev->dev, res1);
Ivan Khoronzhuk30a39702013-12-17 15:37:00 +0200649 if (IS_ERR(vaddr))
650 return PTR_ERR(vaddr);
651
Ivan Khoronzhuk0966a412013-12-17 15:38:31 +0200652 /*
653 * This registers range is used to setup NAND settings. In case with
654 * TI AEMIF driver, the same memory address range is requested already
655 * by AEMIF, so we cannot request it twice, just ioremap.
656 * The AEMIF and NAND drivers not use the same registers in this range.
657 */
658 base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
659 if (!base) {
660 dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
661 return -EADDRNOTAVAIL;
662 }
David Brownellff4569c2009-03-04 12:01:37 -0800663
664 info->dev = &pdev->dev;
665 info->base = base;
666 info->vaddr = vaddr;
667
Boris BREZILLONa5cfb4d2015-12-10 08:59:58 +0100668 mtd = nand_to_mtd(&info->chip);
Boris BREZILLONa5cfb4d2015-12-10 08:59:58 +0100669 mtd->dev.parent = &pdev->dev;
Brian Norrisa61ae812015-10-30 20:33:25 -0700670 nand_set_flash_node(&info->chip, pdev->dev.of_node);
David Brownell87f39f02009-03-26 00:42:50 -0700671
David Brownellff4569c2009-03-04 12:01:37 -0800672 info->chip.IO_ADDR_R = vaddr;
673 info->chip.IO_ADDR_W = vaddr;
674 info->chip.chip_delay = 0;
675 info->chip.select_chip = nand_davinci_select_chip;
676
Brian Norrisbb9ebd42011-05-31 16:31:23 -0700677 /* options such as NAND_BBT_USE_FLASH */
Brian Norrisa40f7342011-05-31 16:31:22 -0700678 info->chip.bbt_options = pdata->bbt_options;
679 /* options such as 16-bit widths */
David Brownell533a0142009-04-21 19:51:31 -0700680 info->chip.options = pdata->options;
Mark A. Greerf611a792009-10-12 16:16:37 -0700681 info->chip.bbt_td = pdata->bbt_td;
682 info->chip.bbt_md = pdata->bbt_md;
Sekhar Noria88dbc52010-08-09 15:46:36 +0530683 info->timing = pdata->timing;
David Brownellff4569c2009-03-04 12:01:37 -0800684
685 info->ioaddr = (uint32_t __force) vaddr;
686
687 info->current_cs = info->ioaddr;
688 info->core_chipsel = pdev->id;
689 info->mask_chipsel = pdata->mask_chipsel;
690
691 /* use nandboot-capable ALE/CLE masks by default */
Hemant Pedanekar5cd0be82009-10-01 19:55:06 +0530692 info->mask_ale = pdata->mask_ale ? : MASK_ALE;
David Brownell533a0142009-04-21 19:51:31 -0700693 info->mask_cle = pdata->mask_cle ? : MASK_CLE;
David Brownellff4569c2009-03-04 12:01:37 -0800694
695 /* Set address of hardware control function */
696 info->chip.cmd_ctrl = nand_davinci_hwcontrol;
697 info->chip.dev_ready = nand_davinci_dev_ready;
698
699 /* Speed up buffer I/O */
700 info->chip.read_buf = nand_davinci_read_buf;
701 info->chip.write_buf = nand_davinci_write_buf;
702
David Brownell533a0142009-04-21 19:51:31 -0700703 /* Use board-specific ECC config */
Boris Brezillon363b5db2016-04-01 14:54:25 +0200704 info->chip.ecc.mode = pdata->ecc_mode;
David Brownellff4569c2009-03-04 12:01:37 -0800705
David Brownell6a4123e2009-04-21 19:58:13 -0700706 ret = -EINVAL;
Boris Brezillon363b5db2016-04-01 14:54:25 +0200707
708 info->clk = devm_clk_get(&pdev->dev, "aemif");
709 if (IS_ERR(info->clk)) {
710 ret = PTR_ERR(info->clk);
711 dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
712 return ret;
713 }
714
715 ret = clk_prepare_enable(info->clk);
716 if (ret < 0) {
717 dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
718 ret);
719 goto err_clk_enable;
720 }
721
722 spin_lock_irq(&davinci_nand_lock);
723
724 /* put CSxNAND into NAND mode */
725 val = davinci_nand_readl(info, NANDFCR_OFFSET);
726 val |= BIT(info->core_chipsel);
727 davinci_nand_writel(info, NANDFCR_OFFSET, val);
728
729 spin_unlock_irq(&davinci_nand_lock);
730
731 /* Scan to find existence of the device(s) */
732 ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
733 if (ret < 0) {
734 dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
735 goto err;
736 }
737
738 switch (info->chip.ecc.mode) {
David Brownellff4569c2009-03-04 12:01:37 -0800739 case NAND_ECC_NONE:
Rafał Miłecki867f9872016-04-17 22:52:58 +0200740 pdata->ecc_bits = 0;
741 break;
David Brownellff4569c2009-03-04 12:01:37 -0800742 case NAND_ECC_SOFT:
David Brownell6a4123e2009-04-21 19:58:13 -0700743 pdata->ecc_bits = 0;
Rafał Miłecki867f9872016-04-17 22:52:58 +0200744 /*
745 * This driver expects Hamming based ECC when ecc_mode is set
746 * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
747 * avoid adding an extra ->ecc_algo field to
748 * davinci_nand_pdata.
749 */
750 info->chip.ecc.algo = NAND_ECC_HAMMING;
David Brownellff4569c2009-03-04 12:01:37 -0800751 break;
752 case NAND_ECC_HW:
David Brownell6a4123e2009-04-21 19:58:13 -0700753 if (pdata->ecc_bits == 4) {
754 /* No sanity checks: CPUs must support this,
755 * and the chips may not use NAND_BUSWIDTH_16.
756 */
David Brownellff4569c2009-03-04 12:01:37 -0800757
David Brownell6a4123e2009-04-21 19:58:13 -0700758 /* No sharing 4-bit hardware between chipselects yet */
759 spin_lock_irq(&davinci_nand_lock);
760 if (ecc4_busy)
761 ret = -EBUSY;
762 else
763 ecc4_busy = true;
764 spin_unlock_irq(&davinci_nand_lock);
765
766 if (ret == -EBUSY)
Ivan Khoronzhuk30a39702013-12-17 15:37:00 +0200767 return ret;
David Brownell6a4123e2009-04-21 19:58:13 -0700768
769 info->chip.ecc.calculate = nand_davinci_calculate_4bit;
770 info->chip.ecc.correct = nand_davinci_correct_4bit;
771 info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
772 info->chip.ecc.bytes = 10;
Boris BREZILLONbc29c952015-12-30 20:32:05 +0100773 info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
Alexander Couzens19d8ccc2017-05-02 11:47:36 +0200774 info->chip.ecc.algo = NAND_ECC_BCH;
David Brownell6a4123e2009-04-21 19:58:13 -0700775 } else {
Alexander Couzens19d8ccc2017-05-02 11:47:36 +0200776 /* 1bit ecc hamming */
David Brownell6a4123e2009-04-21 19:58:13 -0700777 info->chip.ecc.calculate = nand_davinci_calculate_1bit;
778 info->chip.ecc.correct = nand_davinci_correct_1bit;
779 info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
780 info->chip.ecc.bytes = 3;
Alexander Couzens19d8ccc2017-05-02 11:47:36 +0200781 info->chip.ecc.algo = NAND_ECC_HAMMING;
David Brownell6a4123e2009-04-21 19:58:13 -0700782 }
783 info->chip.ecc.size = 512;
Mike Dunn6a918ba2012-03-11 14:21:11 -0700784 info->chip.ecc.strength = pdata->ecc_bits;
David Brownell6a4123e2009-04-21 19:58:13 -0700785 break;
David Brownellff4569c2009-03-04 12:01:37 -0800786 default:
Ivan Khoronzhuk30a39702013-12-17 15:37:00 +0200787 return -EINVAL;
David Brownellff4569c2009-03-04 12:01:37 -0800788 }
David Brownellff4569c2009-03-04 12:01:37 -0800789
David Brownell6a4123e2009-04-21 19:58:13 -0700790 /* Update ECC layout if needed ... for 1-bit HW ECC, the default
791 * is OK, but it allocates 6 bytes when only 3 are needed (for
792 * each 512 bytes). For the 4-bit HW ECC, that default is not
793 * usable: 10 bytes are needed, not 6.
794 */
795 if (pdata->ecc_bits == 4) {
Boris BREZILLONa5cfb4d2015-12-10 08:59:58 +0100796 int chunks = mtd->writesize / 512;
David Brownell6a4123e2009-04-21 19:58:13 -0700797
Boris BREZILLONa5cfb4d2015-12-10 08:59:58 +0100798 if (!chunks || mtd->oobsize < 16) {
David Brownell6a4123e2009-04-21 19:58:13 -0700799 dev_dbg(&pdev->dev, "too small\n");
800 ret = -EINVAL;
Ivan Khoronzhuk30a39702013-12-17 15:37:00 +0200801 goto err;
David Brownell6a4123e2009-04-21 19:58:13 -0700802 }
803
804 /* For small page chips, preserve the manufacturer's
805 * badblock marking data ... and make sure a flash BBT
806 * table marker fits in the free bytes.
807 */
808 if (chunks == 1) {
Boris Brezillone4aacaa2016-02-03 19:59:58 +0100809 mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
810 } else if (chunks == 4 || chunks == 8) {
811 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
Sneha Narnakajef12a9472009-09-18 12:51:48 -0700812 info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
Boris Brezillone4aacaa2016-02-03 19:59:58 +0100813 } else {
814 ret = -EIO;
815 goto err;
Sneha Narnakajef12a9472009-09-18 12:51:48 -0700816 }
David Brownell6a4123e2009-04-21 19:58:13 -0700817 }
818
Boris BREZILLONa5cfb4d2015-12-10 08:59:58 +0100819 ret = nand_scan_tail(mtd);
David Brownell6a4123e2009-04-21 19:58:13 -0700820 if (ret < 0)
Ivan Khoronzhuk30a39702013-12-17 15:37:00 +0200821 goto err;
David Brownell6a4123e2009-04-21 19:58:13 -0700822
Murali Karicheri192afdb2012-11-02 10:22:41 -0400823 if (pdata->parts)
Boris BREZILLONa5cfb4d2015-12-10 08:59:58 +0100824 ret = mtd_device_parse_register(mtd, NULL, NULL,
Murali Karicheri192afdb2012-11-02 10:22:41 -0400825 pdata->parts, pdata->nr_parts);
Brian Norrisa61ae812015-10-30 20:33:25 -0700826 else
Boris BREZILLONa5cfb4d2015-12-10 08:59:58 +0100827 ret = mtd_device_register(mtd, NULL, 0);
David Brownellff4569c2009-03-04 12:01:37 -0800828 if (ret < 0)
Ivan Khoronzhuk30a39702013-12-17 15:37:00 +0200829 goto err;
David Brownellff4569c2009-03-04 12:01:37 -0800830
831 val = davinci_nand_readl(info, NRCSR_OFFSET);
832 dev_info(&pdev->dev, "controller rev. %d.%d\n",
833 (val >> 8) & 0xff, val & 0xff);
834
835 return 0;
836
Ivan Khoronzhuk30a39702013-12-17 15:37:00 +0200837err:
m-karicheri2@ti.comea73fe72012-09-12 21:06:19 +0000838 clk_disable_unprepare(info->clk);
David Brownellff4569c2009-03-04 12:01:37 -0800839
840err_clk_enable:
David Brownell6a4123e2009-04-21 19:58:13 -0700841 spin_lock_irq(&davinci_nand_lock);
Boris Brezillon363b5db2016-04-01 14:54:25 +0200842 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
David Brownell6a4123e2009-04-21 19:58:13 -0700843 ecc4_busy = false;
844 spin_unlock_irq(&davinci_nand_lock);
David Brownellff4569c2009-03-04 12:01:37 -0800845 return ret;
846}
847
Ivan Khoronzhukeaaa4a92013-12-17 15:33:50 +0200848static int nand_davinci_remove(struct platform_device *pdev)
David Brownellff4569c2009-03-04 12:01:37 -0800849{
850 struct davinci_nand_info *info = platform_get_drvdata(pdev);
David Brownellff4569c2009-03-04 12:01:37 -0800851
David Brownell6a4123e2009-04-21 19:58:13 -0700852 spin_lock_irq(&davinci_nand_lock);
853 if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
854 ecc4_busy = false;
855 spin_unlock_irq(&davinci_nand_lock);
856
Boris BREZILLONa5cfb4d2015-12-10 08:59:58 +0100857 nand_release(nand_to_mtd(&info->chip));
David Brownellff4569c2009-03-04 12:01:37 -0800858
m-karicheri2@ti.comea73fe72012-09-12 21:06:19 +0000859 clk_disable_unprepare(info->clk);
David Brownellff4569c2009-03-04 12:01:37 -0800860
861 return 0;
862}
863
864static struct platform_driver nand_davinci_driver = {
Ivan Khoronzhukeaaa4a92013-12-17 15:33:50 +0200865 .probe = nand_davinci_probe,
866 .remove = nand_davinci_remove,
David Brownellff4569c2009-03-04 12:01:37 -0800867 .driver = {
868 .name = "davinci_nand",
Sachin Kamatc4f8cde2013-03-14 15:37:01 +0530869 .of_match_table = of_match_ptr(davinci_nand_of_match),
David Brownellff4569c2009-03-04 12:01:37 -0800870 },
871};
872MODULE_ALIAS("platform:davinci_nand");
873
Ivan Khoronzhukeaaa4a92013-12-17 15:33:50 +0200874module_platform_driver(nand_davinci_driver);
David Brownellff4569c2009-03-04 12:01:37 -0800875
876MODULE_LICENSE("GPL");
877MODULE_AUTHOR("Texas Instruments");
878MODULE_DESCRIPTION("Davinci NAND flash driver");
879