blob: e428669f41f1e7b03e5917a7128629d3b242a707 [file] [log] [blame]
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001/*
2 * MTK NAND Flash controller driver.
3 * Copyright (C) 2016 MediaTek Inc.
4 * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
5 * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/interrupt.h>
20#include <linux/delay.h>
21#include <linux/clk.h>
22#include <linux/mtd/nand.h>
23#include <linux/mtd/mtd.h>
24#include <linux/module.h>
25#include <linux/iopoll.h>
26#include <linux/of.h>
Xiaolei Li7ec4a372017-05-31 16:26:40 +080027#include <linux/of_device.h>
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -040028#include "mtk_ecc.h"
29
30/* NAND controller register definition */
31#define NFI_CNFG (0x00)
32#define CNFG_AHB BIT(0)
33#define CNFG_READ_EN BIT(1)
34#define CNFG_DMA_BURST_EN BIT(2)
35#define CNFG_BYTE_RW BIT(6)
36#define CNFG_HW_ECC_EN BIT(8)
37#define CNFG_AUTO_FMT_EN BIT(9)
38#define CNFG_OP_CUST (6 << 12)
39#define NFI_PAGEFMT (0x04)
40#define PAGEFMT_FDM_ECC_SHIFT (12)
41#define PAGEFMT_FDM_SHIFT (8)
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -040042#define PAGEFMT_SEC_SEL_512 BIT(2)
43#define PAGEFMT_512_2K (0)
44#define PAGEFMT_2K_4K (1)
45#define PAGEFMT_4K_8K (2)
46#define PAGEFMT_8K_16K (3)
47/* NFI control */
48#define NFI_CON (0x08)
49#define CON_FIFO_FLUSH BIT(0)
50#define CON_NFI_RST BIT(1)
51#define CON_BRD BIT(8) /* burst read */
52#define CON_BWR BIT(9) /* burst write */
53#define CON_SEC_SHIFT (12)
54/* Timming control register */
55#define NFI_ACCCON (0x0C)
56#define NFI_INTR_EN (0x10)
57#define INTR_AHB_DONE_EN BIT(6)
58#define NFI_INTR_STA (0x14)
59#define NFI_CMD (0x20)
60#define NFI_ADDRNOB (0x30)
61#define NFI_COLADDR (0x34)
62#define NFI_ROWADDR (0x38)
63#define NFI_STRDATA (0x40)
64#define STAR_EN (1)
65#define STAR_DE (0)
66#define NFI_CNRNB (0x44)
67#define NFI_DATAW (0x50)
68#define NFI_DATAR (0x54)
69#define NFI_PIO_DIRDY (0x58)
70#define PIO_DI_RDY (0x01)
71#define NFI_STA (0x60)
72#define STA_CMD BIT(0)
73#define STA_ADDR BIT(1)
74#define STA_BUSY BIT(8)
75#define STA_EMP_PAGE BIT(12)
76#define NFI_FSM_CUSTDATA (0xe << 16)
77#define NFI_FSM_MASK (0xf << 16)
78#define NFI_ADDRCNTR (0x70)
79#define CNTR_MASK GENMASK(16, 12)
RogerCC Lin559e58e2016-09-19 10:53:26 +080080#define ADDRCNTR_SEC_SHIFT (12)
81#define ADDRCNTR_SEC(val) \
82 (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -040083#define NFI_STRADDR (0x80)
84#define NFI_BYTELEN (0x84)
85#define NFI_CSEL (0x90)
86#define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2)
87#define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2)
88#define NFI_FDM_MAX_SIZE (8)
89#define NFI_FDM_MIN_SIZE (1)
90#define NFI_MASTER_STA (0x224)
91#define MASTER_STA_MASK (0x0FFF)
92#define NFI_EMPTY_THRESH (0x23C)
93
94#define MTK_NAME "mtk-nand"
95#define KB(x) ((x) * 1024UL)
96#define MB(x) (KB(x) * 1024UL)
97
98#define MTK_TIMEOUT (500000)
99#define MTK_RESET_TIMEOUT (1000000)
100#define MTK_MAX_SECTOR (16)
101#define MTK_NAND_MAX_NSELS (2)
Xiaolei Li7ec4a372017-05-31 16:26:40 +0800102#define MTK_NFC_MIN_SPARE (16)
103
104struct mtk_nfc_caps {
105 const u8 *spare_size;
106 u8 num_spare_size;
107 u8 pageformat_spare_shift;
108};
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -0400109
110struct mtk_nfc_bad_mark_ctl {
111 void (*bm_swap)(struct mtd_info *, u8 *buf, int raw);
112 u32 sec;
113 u32 pos;
114};
115
116/*
117 * FDM: region used to store free OOB data
118 */
119struct mtk_nfc_fdm {
120 u32 reg_size;
121 u32 ecc_size;
122};
123
124struct mtk_nfc_nand_chip {
125 struct list_head node;
126 struct nand_chip nand;
127
128 struct mtk_nfc_bad_mark_ctl bad_mark;
129 struct mtk_nfc_fdm fdm;
130 u32 spare_per_sector;
131
132 int nsels;
133 u8 sels[0];
134 /* nothing after this field */
135};
136
137struct mtk_nfc_clk {
138 struct clk *nfi_clk;
139 struct clk *pad_clk;
140};
141
142struct mtk_nfc {
143 struct nand_hw_control controller;
144 struct mtk_ecc_config ecc_cfg;
145 struct mtk_nfc_clk clk;
146 struct mtk_ecc *ecc;
147
148 struct device *dev;
Xiaolei Li7ec4a372017-05-31 16:26:40 +0800149 const struct mtk_nfc_caps *caps;
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -0400150 void __iomem *regs;
151
152 struct completion done;
153 struct list_head chips;
154
155 u8 *buffer;
156};
157
Xiaolei Li7ec4a372017-05-31 16:26:40 +0800158/*
159 * supported spare size of each IP.
160 * order should be the same with the spare size bitfiled defination of
161 * register NFI_PAGEFMT.
162 */
163static const u8 spare_size_mt2701[] = {
164 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 63, 64
165};
166
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -0400167static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
168{
169 return container_of(nand, struct mtk_nfc_nand_chip, nand);
170}
171
172static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i)
173{
174 return (u8 *)p + i * chip->ecc.size;
175}
176
177static inline u8 *oob_ptr(struct nand_chip *chip, int i)
178{
179 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
180 u8 *poi;
181
182 /* map the sector's FDM data to free oob:
183 * the beginning of the oob area stores the FDM data of bad mark sectors
184 */
185
186 if (i < mtk_nand->bad_mark.sec)
187 poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
188 else if (i == mtk_nand->bad_mark.sec)
189 poi = chip->oob_poi;
190 else
191 poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
192
193 return poi;
194}
195
196static inline int mtk_data_len(struct nand_chip *chip)
197{
198 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
199
200 return chip->ecc.size + mtk_nand->spare_per_sector;
201}
202
203static inline u8 *mtk_data_ptr(struct nand_chip *chip, int i)
204{
205 struct mtk_nfc *nfc = nand_get_controller_data(chip);
206
207 return nfc->buffer + i * mtk_data_len(chip);
208}
209
210static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i)
211{
212 struct mtk_nfc *nfc = nand_get_controller_data(chip);
213
214 return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size;
215}
216
217static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
218{
219 writel(val, nfc->regs + reg);
220}
221
222static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
223{
224 writew(val, nfc->regs + reg);
225}
226
227static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
228{
229 writeb(val, nfc->regs + reg);
230}
231
232static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
233{
234 return readl_relaxed(nfc->regs + reg);
235}
236
237static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
238{
239 return readw_relaxed(nfc->regs + reg);
240}
241
242static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
243{
244 return readb_relaxed(nfc->regs + reg);
245}
246
247static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
248{
249 struct device *dev = nfc->dev;
250 u32 val;
251 int ret;
252
253 /* reset all registers and force the NFI master to terminate */
254 nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
255
256 /* wait for the master to finish the last transaction */
257 ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val,
258 !(val & MASTER_STA_MASK), 50,
259 MTK_RESET_TIMEOUT);
260 if (ret)
261 dev_warn(dev, "master active in reset [0x%x] = 0x%x\n",
262 NFI_MASTER_STA, val);
263
264 /* ensure any status register affected by the NFI master is reset */
265 nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
266 nfi_writew(nfc, STAR_DE, NFI_STRDATA);
267}
268
269static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
270{
271 struct device *dev = nfc->dev;
272 u32 val;
273 int ret;
274
275 nfi_writel(nfc, command, NFI_CMD);
276
277 ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
278 !(val & STA_CMD), 10, MTK_TIMEOUT);
279 if (ret) {
280 dev_warn(dev, "nfi core timed out entering command mode\n");
281 return -EIO;
282 }
283
284 return 0;
285}
286
287static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
288{
289 struct device *dev = nfc->dev;
290 u32 val;
291 int ret;
292
293 nfi_writel(nfc, addr, NFI_COLADDR);
294 nfi_writel(nfc, 0, NFI_ROWADDR);
295 nfi_writew(nfc, 1, NFI_ADDRNOB);
296
297 ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
298 !(val & STA_ADDR), 10, MTK_TIMEOUT);
299 if (ret) {
300 dev_warn(dev, "nfi core timed out entering address mode\n");
301 return -EIO;
302 }
303
304 return 0;
305}
306
307static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
308{
309 struct nand_chip *chip = mtd_to_nand(mtd);
310 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
311 struct mtk_nfc *nfc = nand_get_controller_data(chip);
Xiaolei Li7ec4a372017-05-31 16:26:40 +0800312 u32 fmt, spare, i;
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -0400313
314 if (!mtd->writesize)
315 return 0;
316
317 spare = mtk_nand->spare_per_sector;
318
319 switch (mtd->writesize) {
320 case 512:
321 fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
322 break;
323 case KB(2):
324 if (chip->ecc.size == 512)
325 fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
326 else
327 fmt = PAGEFMT_512_2K;
328 break;
329 case KB(4):
330 if (chip->ecc.size == 512)
331 fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
332 else
333 fmt = PAGEFMT_2K_4K;
334 break;
335 case KB(8):
336 if (chip->ecc.size == 512)
337 fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
338 else
339 fmt = PAGEFMT_4K_8K;
340 break;
341 case KB(16):
342 fmt = PAGEFMT_8K_16K;
343 break;
344 default:
345 dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize);
346 return -EINVAL;
347 }
348
349 /*
350 * the hardware will double the value for this eccsize, so we need to
351 * halve it
352 */
353 if (chip->ecc.size == 1024)
354 spare >>= 1;
355
Xiaolei Li7ec4a372017-05-31 16:26:40 +0800356 for (i = 0; i < nfc->caps->num_spare_size; i++) {
357 if (nfc->caps->spare_size[i] == spare)
358 break;
359 }
360
361 if (i == nfc->caps->num_spare_size) {
362 dev_err(nfc->dev, "invalid spare size %d\n", spare);
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -0400363 return -EINVAL;
364 }
365
Xiaolei Li7ec4a372017-05-31 16:26:40 +0800366 fmt |= i << nfc->caps->pageformat_spare_shift;
367
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -0400368 fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
369 fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
Xiaolei Li582212c2017-05-31 16:26:39 +0800370 nfi_writel(nfc, fmt, NFI_PAGEFMT);
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -0400371
372 nfc->ecc_cfg.strength = chip->ecc.strength;
373 nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size;
374
375 return 0;
376}
377
378static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip)
379{
380 struct nand_chip *nand = mtd_to_nand(mtd);
381 struct mtk_nfc *nfc = nand_get_controller_data(nand);
382 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
383
384 if (chip < 0)
385 return;
386
387 mtk_nfc_hw_runtime_config(mtd);
388
389 nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL);
390}
391
392static int mtk_nfc_dev_ready(struct mtd_info *mtd)
393{
394 struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
395
396 if (nfi_readl(nfc, NFI_STA) & STA_BUSY)
397 return 0;
398
399 return 1;
400}
401
402static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
403{
404 struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
405
406 if (ctrl & NAND_ALE) {
407 mtk_nfc_send_address(nfc, dat);
408 } else if (ctrl & NAND_CLE) {
409 mtk_nfc_hw_reset(nfc);
410
411 nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
412 mtk_nfc_send_command(nfc, dat);
413 }
414}
415
416static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
417{
418 int rc;
419 u8 val;
420
421 rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val,
422 val & PIO_DI_RDY, 10, MTK_TIMEOUT);
423 if (rc < 0)
424 dev_err(nfc->dev, "data not ready\n");
425}
426
427static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
428{
429 struct nand_chip *chip = mtd_to_nand(mtd);
430 struct mtk_nfc *nfc = nand_get_controller_data(chip);
431 u32 reg;
432
433 /* after each byte read, the NFI_STA reg is reset by the hardware */
434 reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
435 if (reg != NFI_FSM_CUSTDATA) {
436 reg = nfi_readw(nfc, NFI_CNFG);
437 reg |= CNFG_BYTE_RW | CNFG_READ_EN;
438 nfi_writew(nfc, reg, NFI_CNFG);
439
440 /*
441 * set to max sector to allow the HW to continue reading over
442 * unaligned accesses
443 */
444 reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
445 nfi_writel(nfc, reg, NFI_CON);
446
447 /* trigger to fetch data */
448 nfi_writew(nfc, STAR_EN, NFI_STRDATA);
449 }
450
451 mtk_nfc_wait_ioready(nfc);
452
453 return nfi_readb(nfc, NFI_DATAR);
454}
455
456static void mtk_nfc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
457{
458 int i;
459
460 for (i = 0; i < len; i++)
461 buf[i] = mtk_nfc_read_byte(mtd);
462}
463
464static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
465{
466 struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
467 u32 reg;
468
469 reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
470
471 if (reg != NFI_FSM_CUSTDATA) {
472 reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
473 nfi_writew(nfc, reg, NFI_CNFG);
474
475 reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
476 nfi_writel(nfc, reg, NFI_CON);
477
478 nfi_writew(nfc, STAR_EN, NFI_STRDATA);
479 }
480
481 mtk_nfc_wait_ioready(nfc);
482 nfi_writeb(nfc, byte, NFI_DATAW);
483}
484
485static void mtk_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
486{
487 int i;
488
489 for (i = 0; i < len; i++)
490 mtk_nfc_write_byte(mtd, buf[i]);
491}
492
493static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
494{
495 struct mtk_nfc *nfc = nand_get_controller_data(chip);
496 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
497 int size = chip->ecc.size + mtk_nand->fdm.reg_size;
498
499 nfc->ecc_cfg.mode = ECC_DMA_MODE;
500 nfc->ecc_cfg.op = ECC_ENCODE;
501
502 return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size);
503}
504
505static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c)
506{
507 /* nop */
508}
509
510static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw)
511{
512 struct nand_chip *chip = mtd_to_nand(mtd);
513 struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
514 u32 bad_pos = nand->bad_mark.pos;
515
516 if (raw)
517 bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
518 else
519 bad_pos += nand->bad_mark.sec * chip->ecc.size;
520
521 swap(chip->oob_poi[0], buf[bad_pos]);
522}
523
524static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset,
525 u32 len, const u8 *buf)
526{
527 struct nand_chip *chip = mtd_to_nand(mtd);
528 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
529 struct mtk_nfc *nfc = nand_get_controller_data(chip);
530 struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
531 u32 start, end;
532 int i, ret;
533
534 start = offset / chip->ecc.size;
535 end = DIV_ROUND_UP(offset + len, chip->ecc.size);
536
537 memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
538 for (i = 0; i < chip->ecc.steps; i++) {
539 memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
540 chip->ecc.size);
541
542 if (start > i || i >= end)
543 continue;
544
545 if (i == mtk_nand->bad_mark.sec)
546 mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
547
548 memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
549
550 /* program the CRC back to the OOB */
551 ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i));
552 if (ret < 0)
553 return ret;
554 }
555
556 return 0;
557}
558
559static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf)
560{
561 struct nand_chip *chip = mtd_to_nand(mtd);
562 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
563 struct mtk_nfc *nfc = nand_get_controller_data(chip);
564 struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
565 u32 i;
566
567 memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
568 for (i = 0; i < chip->ecc.steps; i++) {
569 if (buf)
570 memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
571 chip->ecc.size);
572
573 if (i == mtk_nand->bad_mark.sec)
574 mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
575
576 memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
577 }
578}
579
580static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start,
581 u32 sectors)
582{
583 struct mtk_nfc *nfc = nand_get_controller_data(chip);
584 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
585 struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
586 u32 vall, valm;
587 u8 *oobptr;
588 int i, j;
589
590 for (i = 0; i < sectors; i++) {
591 oobptr = oob_ptr(chip, start + i);
592 vall = nfi_readl(nfc, NFI_FDML(i));
593 valm = nfi_readl(nfc, NFI_FDMM(i));
594
595 for (j = 0; j < fdm->reg_size; j++)
596 oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
597 }
598}
599
600static inline void mtk_nfc_write_fdm(struct nand_chip *chip)
601{
602 struct mtk_nfc *nfc = nand_get_controller_data(chip);
603 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
604 struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
605 u32 vall, valm;
606 u8 *oobptr;
607 int i, j;
608
609 for (i = 0; i < chip->ecc.steps; i++) {
610 oobptr = oob_ptr(chip, i);
611 vall = 0;
612 valm = 0;
613 for (j = 0; j < 8; j++) {
614 if (j < 4)
615 vall |= (j < fdm->reg_size ? oobptr[j] : 0xff)
616 << (j * 8);
617 else
618 valm |= (j < fdm->reg_size ? oobptr[j] : 0xff)
619 << ((j - 4) * 8);
620 }
621 nfi_writel(nfc, vall, NFI_FDML(i));
622 nfi_writel(nfc, valm, NFI_FDMM(i));
623 }
624}
625
626static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
627 const u8 *buf, int page, int len)
628{
629 struct mtk_nfc *nfc = nand_get_controller_data(chip);
630 struct device *dev = nfc->dev;
631 dma_addr_t addr;
632 u32 reg;
633 int ret;
634
635 addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE);
636 ret = dma_mapping_error(nfc->dev, addr);
637 if (ret) {
638 dev_err(nfc->dev, "dma mapping error\n");
639 return -EINVAL;
640 }
641
642 reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
643 nfi_writew(nfc, reg, NFI_CNFG);
644
645 nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON);
646 nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
647 nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
648
649 init_completion(&nfc->done);
650
651 reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
652 nfi_writel(nfc, reg, NFI_CON);
653 nfi_writew(nfc, STAR_EN, NFI_STRDATA);
654
655 ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
656 if (!ret) {
657 dev_err(dev, "program ahb done timeout\n");
658 nfi_writew(nfc, 0, NFI_INTR_EN);
659 ret = -ETIMEDOUT;
660 goto timeout;
661 }
662
663 ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
RogerCC Lin559e58e2016-09-19 10:53:26 +0800664 ADDRCNTR_SEC(reg) >= chip->ecc.steps,
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -0400665 10, MTK_TIMEOUT);
666 if (ret)
667 dev_err(dev, "hwecc write timeout\n");
668
669timeout:
670
671 dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE);
672 nfi_writel(nfc, 0, NFI_CON);
673
674 return ret;
675}
676
677static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
678 const u8 *buf, int page, int raw)
679{
680 struct mtk_nfc *nfc = nand_get_controller_data(chip);
681 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
682 size_t len;
683 const u8 *bufpoi;
684 u32 reg;
685 int ret;
686
687 if (!raw) {
688 /* OOB => FDM: from register, ECC: from HW */
689 reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
690 nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
691
692 nfc->ecc_cfg.op = ECC_ENCODE;
693 nfc->ecc_cfg.mode = ECC_NFI_MODE;
694 ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
695 if (ret) {
696 /* clear NFI config */
697 reg = nfi_readw(nfc, NFI_CNFG);
698 reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
699 nfi_writew(nfc, reg, NFI_CNFG);
700
701 return ret;
702 }
703
704 memcpy(nfc->buffer, buf, mtd->writesize);
705 mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw);
706 bufpoi = nfc->buffer;
707
708 /* write OOB into the FDM registers (OOB area in MTK NAND) */
709 mtk_nfc_write_fdm(chip);
710 } else {
711 bufpoi = buf;
712 }
713
714 len = mtd->writesize + (raw ? mtd->oobsize : 0);
715 ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
716
717 if (!raw)
718 mtk_ecc_disable(nfc->ecc);
719
720 return ret;
721}
722
723static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
724 struct nand_chip *chip, const u8 *buf,
725 int oob_on, int page)
726{
727 return mtk_nfc_write_page(mtd, chip, buf, page, 0);
728}
729
730static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
731 const u8 *buf, int oob_on, int pg)
732{
733 struct mtk_nfc *nfc = nand_get_controller_data(chip);
734
735 mtk_nfc_format_page(mtd, buf);
736 return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
737}
738
739static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
740 struct nand_chip *chip, u32 offset,
741 u32 data_len, const u8 *buf,
742 int oob_on, int page)
743{
744 struct mtk_nfc *nfc = nand_get_controller_data(chip);
745 int ret;
746
747 ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf);
748 if (ret < 0)
749 return ret;
750
751 /* use the data in the private buffer (now with FDM and CRC) */
752 return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
753}
754
755static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
756 int page)
757{
758 int ret;
759
760 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
761
762 ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
763 if (ret < 0)
764 return -EIO;
765
766 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
767 ret = chip->waitfunc(mtd, chip);
768
769 return ret & NAND_STATUS_FAIL ? -EIO : 0;
770}
771
772static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
773{
774 struct nand_chip *chip = mtd_to_nand(mtd);
775 struct mtk_nfc *nfc = nand_get_controller_data(chip);
776 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
777 struct mtk_ecc_stats stats;
778 int rc, i;
779
780 rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
781 if (rc) {
782 memset(buf, 0xff, sectors * chip->ecc.size);
783 for (i = 0; i < sectors; i++)
784 memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
785 return 0;
786 }
787
788 mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
789 mtd->ecc_stats.corrected += stats.corrected;
790 mtd->ecc_stats.failed += stats.failed;
791
792 return stats.bitflips;
793}
794
795static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
796 u32 data_offs, u32 readlen,
797 u8 *bufpoi, int page, int raw)
798{
799 struct mtk_nfc *nfc = nand_get_controller_data(chip);
800 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
801 u32 spare = mtk_nand->spare_per_sector;
802 u32 column, sectors, start, end, reg;
803 dma_addr_t addr;
804 int bitflips;
805 size_t len;
806 u8 *buf;
807 int rc;
808
809 start = data_offs / chip->ecc.size;
810 end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
811
812 sectors = end - start;
813 column = start * (chip->ecc.size + spare);
814
815 len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
816 buf = bufpoi + start * chip->ecc.size;
817
818 if (column != 0)
819 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
820
821 addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
822 rc = dma_mapping_error(nfc->dev, addr);
823 if (rc) {
824 dev_err(nfc->dev, "dma mapping error\n");
825
826 return -EINVAL;
827 }
828
829 reg = nfi_readw(nfc, NFI_CNFG);
830 reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB;
831 if (!raw) {
832 reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
833 nfi_writew(nfc, reg, NFI_CNFG);
834
835 nfc->ecc_cfg.mode = ECC_NFI_MODE;
836 nfc->ecc_cfg.sectors = sectors;
837 nfc->ecc_cfg.op = ECC_DECODE;
838 rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
839 if (rc) {
840 dev_err(nfc->dev, "ecc enable\n");
841 /* clear NFI_CNFG */
842 reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
843 CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
844 nfi_writew(nfc, reg, NFI_CNFG);
845 dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
846
847 return rc;
848 }
849 } else {
850 nfi_writew(nfc, reg, NFI_CNFG);
851 }
852
853 nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
854 nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
855 nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
856
857 init_completion(&nfc->done);
858 reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
859 nfi_writel(nfc, reg, NFI_CON);
860 nfi_writew(nfc, STAR_EN, NFI_STRDATA);
861
862 rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
863 if (!rc)
864 dev_warn(nfc->dev, "read ahb/dma done timeout\n");
865
866 rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
RogerCC Lin559e58e2016-09-19 10:53:26 +0800867 ADDRCNTR_SEC(reg) >= sectors, 10,
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -0400868 MTK_TIMEOUT);
869 if (rc < 0) {
870 dev_err(nfc->dev, "subpage done timeout\n");
871 bitflips = -EIO;
872 } else {
873 bitflips = 0;
874 if (!raw) {
875 rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
876 bitflips = rc < 0 ? -ETIMEDOUT :
877 mtk_nfc_update_ecc_stats(mtd, buf, sectors);
878 mtk_nfc_read_fdm(chip, start, sectors);
879 }
880 }
881
882 dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
883
884 if (raw)
885 goto done;
886
887 mtk_ecc_disable(nfc->ecc);
888
889 if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
890 mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw);
891done:
892 nfi_writel(nfc, 0, NFI_CON);
893
894 return bitflips;
895}
896
897static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd,
898 struct nand_chip *chip, u32 off,
899 u32 len, u8 *p, int pg)
900{
901 return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0);
902}
903
904static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd,
905 struct nand_chip *chip, u8 *p,
906 int oob_on, int pg)
907{
908 return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
909}
910
911static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
912 u8 *buf, int oob_on, int page)
913{
914 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
915 struct mtk_nfc *nfc = nand_get_controller_data(chip);
916 struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
917 int i, ret;
918
919 memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
920 ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer,
921 page, 1);
922 if (ret < 0)
923 return ret;
924
925 for (i = 0; i < chip->ecc.steps; i++) {
926 memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
927
928 if (i == mtk_nand->bad_mark.sec)
929 mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
930
931 if (buf)
932 memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
933 chip->ecc.size);
934 }
935
936 return ret;
937}
938
939static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
940 int page)
941{
942 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
943
944 return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
945}
946
947static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
948{
949 /*
950 * ACCON: access timing control register
951 * -------------------------------------
952 * 31:28: minimum required time for CS post pulling down after accessing
953 * the device
954 * 27:22: minimum required time for CS pre pulling down before accessing
955 * the device
956 * 21:16: minimum required time from NCEB low to NREB low
957 * 15:12: minimum required time from NWEB high to NREB low.
958 * 11:08: write enable hold time
959 * 07:04: write wait states
960 * 03:00: read wait states
961 */
962 nfi_writel(nfc, 0x10804211, NFI_ACCCON);
963
964 /*
965 * CNRNB: nand ready/busy register
966 * -------------------------------
967 * 7:4: timeout register for polling the NAND busy/ready signal
968 * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles.
969 */
970 nfi_writew(nfc, 0xf1, NFI_CNRNB);
Xiaolei Li582212c2017-05-31 16:26:39 +0800971 nfi_writel(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -0400972
973 mtk_nfc_hw_reset(nfc);
974
975 nfi_readl(nfc, NFI_INTR_STA);
976 nfi_writel(nfc, 0, NFI_INTR_EN);
977}
978
979static irqreturn_t mtk_nfc_irq(int irq, void *id)
980{
981 struct mtk_nfc *nfc = id;
982 u16 sta, ien;
983
984 sta = nfi_readw(nfc, NFI_INTR_STA);
985 ien = nfi_readw(nfc, NFI_INTR_EN);
986
987 if (!(sta & ien))
988 return IRQ_NONE;
989
990 nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
991 complete(&nfc->done);
992
993 return IRQ_HANDLED;
994}
995
996static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
997{
998 int ret;
999
1000 ret = clk_prepare_enable(clk->nfi_clk);
1001 if (ret) {
1002 dev_err(dev, "failed to enable nfi clk\n");
1003 return ret;
1004 }
1005
1006 ret = clk_prepare_enable(clk->pad_clk);
1007 if (ret) {
1008 dev_err(dev, "failed to enable pad clk\n");
1009 clk_disable_unprepare(clk->nfi_clk);
1010 return ret;
1011 }
1012
1013 return 0;
1014}
1015
1016static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
1017{
1018 clk_disable_unprepare(clk->nfi_clk);
1019 clk_disable_unprepare(clk->pad_clk);
1020}
1021
1022static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
1023 struct mtd_oob_region *oob_region)
1024{
1025 struct nand_chip *chip = mtd_to_nand(mtd);
1026 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1027 struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
1028 u32 eccsteps;
1029
1030 eccsteps = mtd->writesize / chip->ecc.size;
1031
1032 if (section >= eccsteps)
1033 return -ERANGE;
1034
1035 oob_region->length = fdm->reg_size - fdm->ecc_size;
1036 oob_region->offset = section * fdm->reg_size + fdm->ecc_size;
1037
1038 return 0;
1039}
1040
1041static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
1042 struct mtd_oob_region *oob_region)
1043{
1044 struct nand_chip *chip = mtd_to_nand(mtd);
1045 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
1046 u32 eccsteps;
1047
1048 if (section)
1049 return -ERANGE;
1050
1051 eccsteps = mtd->writesize / chip->ecc.size;
1052 oob_region->offset = mtk_nand->fdm.reg_size * eccsteps;
1053 oob_region->length = mtd->oobsize - oob_region->offset;
1054
1055 return 0;
1056}
1057
1058static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
1059 .free = mtk_nfc_ooblayout_free,
1060 .ecc = mtk_nfc_ooblayout_ecc,
1061};
1062
1063static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
1064{
1065 struct nand_chip *nand = mtd_to_nand(mtd);
1066 struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
1067 u32 ecc_bytes;
1068
1069 ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
1070
1071 fdm->reg_size = chip->spare_per_sector - ecc_bytes;
1072 if (fdm->reg_size > NFI_FDM_MAX_SIZE)
1073 fdm->reg_size = NFI_FDM_MAX_SIZE;
1074
1075 /* bad block mark storage */
1076 fdm->ecc_size = 1;
1077}
1078
1079static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
1080 struct mtd_info *mtd)
1081{
1082 struct nand_chip *nand = mtd_to_nand(mtd);
1083
1084 if (mtd->writesize == 512) {
1085 bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
1086 } else {
1087 bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
1088 bm_ctl->sec = mtd->writesize / mtk_data_len(nand);
1089 bm_ctl->pos = mtd->writesize % mtk_data_len(nand);
1090 }
1091}
1092
Xiaolei Li7ec4a372017-05-31 16:26:40 +08001093static int mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001094{
1095 struct nand_chip *nand = mtd_to_nand(mtd);
Xiaolei Li7ec4a372017-05-31 16:26:40 +08001096 struct mtk_nfc *nfc = nand_get_controller_data(nand);
1097 const u8 *spare = nfc->caps->spare_size;
1098 u32 eccsteps, i, closest_spare = 0;
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001099
1100 eccsteps = mtd->writesize / nand->ecc.size;
1101 *sps = mtd->oobsize / eccsteps;
1102
1103 if (nand->ecc.size == 1024)
1104 *sps >>= 1;
1105
Xiaolei Li7ec4a372017-05-31 16:26:40 +08001106 if (*sps < MTK_NFC_MIN_SPARE)
1107 return -EINVAL;
1108
1109 for (i = 0; i < nfc->caps->num_spare_size; i++) {
1110 if (*sps >= spare[i] && spare[i] >= spare[closest_spare]) {
1111 closest_spare = i;
1112 if (*sps == spare[i])
1113 break;
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001114 }
1115 }
1116
Xiaolei Li7ec4a372017-05-31 16:26:40 +08001117 *sps = spare[closest_spare];
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001118
1119 if (nand->ecc.size == 1024)
1120 *sps <<= 1;
Xiaolei Li7ec4a372017-05-31 16:26:40 +08001121
1122 return 0;
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001123}
1124
1125static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
1126{
1127 struct nand_chip *nand = mtd_to_nand(mtd);
Xiaolei Li7ec4a372017-05-31 16:26:40 +08001128 struct mtk_nfc *nfc = nand_get_controller_data(nand);
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001129 u32 spare;
Xiaolei Li7ec4a372017-05-31 16:26:40 +08001130 int free, ret;
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001131
1132 /* support only ecc hw mode */
1133 if (nand->ecc.mode != NAND_ECC_HW) {
1134 dev_err(dev, "ecc.mode not supported\n");
1135 return -EINVAL;
1136 }
1137
1138 /* if optional dt settings not present */
1139 if (!nand->ecc.size || !nand->ecc.strength) {
1140 /* use datasheet requirements */
1141 nand->ecc.strength = nand->ecc_strength_ds;
1142 nand->ecc.size = nand->ecc_step_ds;
1143
1144 /*
1145 * align eccstrength and eccsize
1146 * this controller only supports 512 and 1024 sizes
1147 */
1148 if (nand->ecc.size < 1024) {
1149 if (mtd->writesize > 512) {
1150 nand->ecc.size = 1024;
1151 nand->ecc.strength <<= 1;
1152 } else {
1153 nand->ecc.size = 512;
1154 }
1155 } else {
1156 nand->ecc.size = 1024;
1157 }
1158
Xiaolei Li7ec4a372017-05-31 16:26:40 +08001159 ret = mtk_nfc_set_spare_per_sector(&spare, mtd);
1160 if (ret)
1161 return ret;
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001162
1163 /* calculate oob bytes except ecc parity data */
1164 free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3;
1165 free = spare - free;
1166
1167 /*
1168 * enhance ecc strength if oob left is bigger than max FDM size
1169 * or reduce ecc strength if oob size is not enough for ecc
1170 * parity data.
1171 */
1172 if (free > NFI_FDM_MAX_SIZE) {
1173 spare -= NFI_FDM_MAX_SIZE;
1174 nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
1175 } else if (free < 0) {
1176 spare -= NFI_FDM_MIN_SIZE;
1177 nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
1178 }
1179 }
1180
Xiaolei Li7ec4a372017-05-31 16:26:40 +08001181 mtk_ecc_adjust_strength(nfc->ecc, &nand->ecc.strength);
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001182
1183 dev_info(dev, "eccsize %d eccstrength %d\n",
1184 nand->ecc.size, nand->ecc.strength);
1185
1186 return 0;
1187}
1188
1189static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
1190 struct device_node *np)
1191{
1192 struct mtk_nfc_nand_chip *chip;
1193 struct nand_chip *nand;
1194 struct mtd_info *mtd;
1195 int nsels, len;
1196 u32 tmp;
1197 int ret;
1198 int i;
1199
1200 if (!of_get_property(np, "reg", &nsels))
1201 return -ENODEV;
1202
1203 nsels /= sizeof(u32);
1204 if (!nsels || nsels > MTK_NAND_MAX_NSELS) {
1205 dev_err(dev, "invalid reg property size %d\n", nsels);
1206 return -EINVAL;
1207 }
1208
1209 chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8),
1210 GFP_KERNEL);
1211 if (!chip)
1212 return -ENOMEM;
1213
1214 chip->nsels = nsels;
1215 for (i = 0; i < nsels; i++) {
1216 ret = of_property_read_u32_index(np, "reg", i, &tmp);
1217 if (ret) {
1218 dev_err(dev, "reg property failure : %d\n", ret);
1219 return ret;
1220 }
1221 chip->sels[i] = tmp;
1222 }
1223
1224 nand = &chip->nand;
1225 nand->controller = &nfc->controller;
1226
1227 nand_set_flash_node(nand, np);
1228 nand_set_controller_data(nand, nfc);
1229
1230 nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
1231 nand->dev_ready = mtk_nfc_dev_ready;
1232 nand->select_chip = mtk_nfc_select_chip;
1233 nand->write_byte = mtk_nfc_write_byte;
1234 nand->write_buf = mtk_nfc_write_buf;
1235 nand->read_byte = mtk_nfc_read_byte;
1236 nand->read_buf = mtk_nfc_read_buf;
1237 nand->cmd_ctrl = mtk_nfc_cmd_ctrl;
1238
1239 /* set default mode in case dt entry is missing */
1240 nand->ecc.mode = NAND_ECC_HW;
1241
1242 nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
1243 nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
1244 nand->ecc.write_page = mtk_nfc_write_page_hwecc;
1245 nand->ecc.write_oob_raw = mtk_nfc_write_oob_std;
1246 nand->ecc.write_oob = mtk_nfc_write_oob_std;
1247
1248 nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc;
1249 nand->ecc.read_page_raw = mtk_nfc_read_page_raw;
1250 nand->ecc.read_page = mtk_nfc_read_page_hwecc;
1251 nand->ecc.read_oob_raw = mtk_nfc_read_oob_std;
1252 nand->ecc.read_oob = mtk_nfc_read_oob_std;
1253
1254 mtd = nand_to_mtd(nand);
1255 mtd->owner = THIS_MODULE;
1256 mtd->dev.parent = dev;
1257 mtd->name = MTK_NAME;
1258 mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
1259
1260 mtk_nfc_hw_init(nfc);
1261
1262 ret = nand_scan_ident(mtd, nsels, NULL);
1263 if (ret)
Masahiro Yamadaf0dbe4a2016-11-04 19:43:02 +09001264 return ret;
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001265
1266 /* store bbt magic in page, cause OOB is not protected */
1267 if (nand->bbt_options & NAND_BBT_USE_FLASH)
1268 nand->bbt_options |= NAND_BBT_NO_OOB;
1269
1270 ret = mtk_nfc_ecc_init(dev, mtd);
1271 if (ret)
1272 return -EINVAL;
1273
1274 if (nand->options & NAND_BUSWIDTH_16) {
1275 dev_err(dev, "16bits buswidth not supported");
1276 return -EINVAL;
1277 }
1278
Xiaolei Li7ec4a372017-05-31 16:26:40 +08001279 ret = mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd);
1280 if (ret)
1281 return ret;
1282
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001283 mtk_nfc_set_fdm(&chip->fdm, mtd);
1284 mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd);
1285
1286 len = mtd->writesize + mtd->oobsize;
1287 nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
1288 if (!nfc->buffer)
1289 return -ENOMEM;
1290
1291 ret = nand_scan_tail(mtd);
1292 if (ret)
Masahiro Yamadaf0dbe4a2016-11-04 19:43:02 +09001293 return ret;
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001294
1295 ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
1296 if (ret) {
1297 dev_err(dev, "mtd parse partition error\n");
1298 nand_release(mtd);
1299 return ret;
1300 }
1301
1302 list_add_tail(&chip->node, &nfc->chips);
1303
1304 return 0;
1305}
1306
1307static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
1308{
1309 struct device_node *np = dev->of_node;
1310 struct device_node *nand_np;
1311 int ret;
1312
1313 for_each_child_of_node(np, nand_np) {
1314 ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
1315 if (ret) {
1316 of_node_put(nand_np);
1317 return ret;
1318 }
1319 }
1320
1321 return 0;
1322}
1323
Xiaolei Li7ec4a372017-05-31 16:26:40 +08001324static const struct mtk_nfc_caps mtk_nfc_caps_mt2701 = {
1325 .spare_size = spare_size_mt2701,
1326 .num_spare_size = 16,
1327 .pageformat_spare_shift = 4,
1328};
1329
1330static const struct of_device_id mtk_nfc_id_table[] = {
1331 {
1332 .compatible = "mediatek,mt2701-nfc",
1333 .data = &mtk_nfc_caps_mt2701,
1334 },
1335 {}
1336};
1337MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
1338
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001339static int mtk_nfc_probe(struct platform_device *pdev)
1340{
1341 struct device *dev = &pdev->dev;
1342 struct device_node *np = dev->of_node;
1343 struct mtk_nfc *nfc;
1344 struct resource *res;
Xiaolei Li7ec4a372017-05-31 16:26:40 +08001345 const struct of_device_id *of_nfc_id = NULL;
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001346 int ret, irq;
1347
1348 nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
1349 if (!nfc)
1350 return -ENOMEM;
1351
1352 spin_lock_init(&nfc->controller.lock);
1353 init_waitqueue_head(&nfc->controller.wq);
1354 INIT_LIST_HEAD(&nfc->chips);
1355
1356 /* probe defer if not ready */
1357 nfc->ecc = of_mtk_ecc_get(np);
1358 if (IS_ERR(nfc->ecc))
1359 return PTR_ERR(nfc->ecc);
1360 else if (!nfc->ecc)
1361 return -ENODEV;
1362
1363 nfc->dev = dev;
1364
1365 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1366 nfc->regs = devm_ioremap_resource(dev, res);
1367 if (IS_ERR(nfc->regs)) {
1368 ret = PTR_ERR(nfc->regs);
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001369 goto release_ecc;
1370 }
1371
1372 nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
1373 if (IS_ERR(nfc->clk.nfi_clk)) {
1374 dev_err(dev, "no clk\n");
1375 ret = PTR_ERR(nfc->clk.nfi_clk);
1376 goto release_ecc;
1377 }
1378
1379 nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
1380 if (IS_ERR(nfc->clk.pad_clk)) {
1381 dev_err(dev, "no pad clk\n");
1382 ret = PTR_ERR(nfc->clk.pad_clk);
1383 goto release_ecc;
1384 }
1385
1386 ret = mtk_nfc_enable_clk(dev, &nfc->clk);
1387 if (ret)
1388 goto release_ecc;
1389
1390 irq = platform_get_irq(pdev, 0);
1391 if (irq < 0) {
1392 dev_err(dev, "no nfi irq resource\n");
1393 ret = -EINVAL;
1394 goto clk_disable;
1395 }
1396
1397 ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
1398 if (ret) {
1399 dev_err(dev, "failed to request nfi irq\n");
1400 goto clk_disable;
1401 }
1402
1403 ret = dma_set_mask(dev, DMA_BIT_MASK(32));
1404 if (ret) {
1405 dev_err(dev, "failed to set dma mask\n");
1406 goto clk_disable;
1407 }
1408
Xiaolei Li7ec4a372017-05-31 16:26:40 +08001409 of_nfc_id = of_match_device(mtk_nfc_id_table, &pdev->dev);
1410 if (!of_nfc_id) {
1411 ret = -ENODEV;
1412 goto clk_disable;
1413 }
1414
1415 nfc->caps = of_nfc_id->data;
1416
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001417 platform_set_drvdata(pdev, nfc);
1418
1419 ret = mtk_nfc_nand_chips_init(dev, nfc);
1420 if (ret) {
1421 dev_err(dev, "failed to init nand chips\n");
1422 goto clk_disable;
1423 }
1424
1425 return 0;
1426
1427clk_disable:
1428 mtk_nfc_disable_clk(&nfc->clk);
1429
1430release_ecc:
1431 mtk_ecc_release(nfc->ecc);
1432
1433 return ret;
1434}
1435
1436static int mtk_nfc_remove(struct platform_device *pdev)
1437{
1438 struct mtk_nfc *nfc = platform_get_drvdata(pdev);
1439 struct mtk_nfc_nand_chip *chip;
1440
1441 while (!list_empty(&nfc->chips)) {
1442 chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
1443 node);
1444 nand_release(nand_to_mtd(&chip->nand));
1445 list_del(&chip->node);
1446 }
1447
1448 mtk_ecc_release(nfc->ecc);
1449 mtk_nfc_disable_clk(&nfc->clk);
1450
1451 return 0;
1452}
1453
1454#ifdef CONFIG_PM_SLEEP
1455static int mtk_nfc_suspend(struct device *dev)
1456{
1457 struct mtk_nfc *nfc = dev_get_drvdata(dev);
1458
1459 mtk_nfc_disable_clk(&nfc->clk);
1460
1461 return 0;
1462}
1463
1464static int mtk_nfc_resume(struct device *dev)
1465{
1466 struct mtk_nfc *nfc = dev_get_drvdata(dev);
1467 struct mtk_nfc_nand_chip *chip;
1468 struct nand_chip *nand;
1469 struct mtd_info *mtd;
1470 int ret;
1471 u32 i;
1472
1473 udelay(200);
1474
1475 ret = mtk_nfc_enable_clk(dev, &nfc->clk);
1476 if (ret)
1477 return ret;
1478
1479 mtk_nfc_hw_init(nfc);
1480
1481 /* reset NAND chip if VCC was powered off */
1482 list_for_each_entry(chip, &nfc->chips, node) {
1483 nand = &chip->nand;
1484 mtd = nand_to_mtd(nand);
1485 for (i = 0; i < chip->nsels; i++) {
1486 nand->select_chip(mtd, i);
1487 nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1488 }
1489 }
1490
1491 return 0;
1492}
1493
1494static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
1495#endif
1496
Jorge Ramirez-Ortiz1d6b1e42016-06-14 11:50:51 -04001497static struct platform_driver mtk_nfc_driver = {
1498 .probe = mtk_nfc_probe,
1499 .remove = mtk_nfc_remove,
1500 .driver = {
1501 .name = MTK_NAME,
1502 .of_match_table = mtk_nfc_id_table,
1503#ifdef CONFIG_PM_SLEEP
1504 .pm = &mtk_nfc_pm_ops,
1505#endif
1506 },
1507};
1508
1509module_platform_driver(mtk_nfc_driver);
1510
1511MODULE_LICENSE("GPL");
1512MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
1513MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");