blob: cc21f9633eddde90971ba183266c42a37dcf628b [file] [log] [blame]
Miquel Raynal02f26ec2018-01-09 11:36:33 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Marvell NAND flash controller driver
4 *
5 * Copyright (C) 2017 Marvell
6 * Author: Miquel RAYNAL <miquel.raynal@free-electrons.com>
7 *
8 */
9
10#include <linux/module.h>
11#include <linux/clk.h>
12#include <linux/mtd/rawnand.h>
13#include <linux/of_platform.h>
14#include <linux/iopoll.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/mfd/syscon.h>
18#include <linux/regmap.h>
19#include <asm/unaligned.h>
20
21#include <linux/dmaengine.h>
22#include <linux/dma-mapping.h>
23#include <linux/dma/pxa-dma.h>
24#include <linux/platform_data/mtd-nand-pxa3xx.h>
25
26/* Data FIFO granularity, FIFO reads/writes must be a multiple of this length */
27#define FIFO_DEPTH 8
28#define FIFO_REP(x) (x / sizeof(u32))
29#define BCH_SEQ_READS (32 / FIFO_DEPTH)
30/* NFC does not support transfers of larger chunks at a time */
31#define MAX_CHUNK_SIZE 2112
32/* NFCv1 cannot read more that 7 bytes of ID */
33#define NFCV1_READID_LEN 7
34/* Polling is done at a pace of POLL_PERIOD us until POLL_TIMEOUT is reached */
35#define POLL_PERIOD 0
36#define POLL_TIMEOUT 100000
37/* Interrupt maximum wait period in ms */
38#define IRQ_TIMEOUT 1000
39/* Latency in clock cycles between SoC pins and NFC logic */
40#define MIN_RD_DEL_CNT 3
41/* Maximum number of contiguous address cycles */
42#define MAX_ADDRESS_CYC_NFCV1 5
43#define MAX_ADDRESS_CYC_NFCV2 7
44/* System control registers/bits to enable the NAND controller on some SoCs */
45#define GENCONF_SOC_DEVICE_MUX 0x208
46#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
47#define GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST BIT(20)
48#define GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST BIT(21)
49#define GENCONF_SOC_DEVICE_MUX_NFC_INT_EN BIT(25)
50#define GENCONF_CLK_GATING_CTRL 0x220
51#define GENCONF_CLK_GATING_CTRL_ND_GATE BIT(2)
52#define GENCONF_ND_CLK_CTRL 0x700
53#define GENCONF_ND_CLK_CTRL_EN BIT(0)
54
55/* NAND controller data flash control register */
56#define NDCR 0x00
57#define NDCR_ALL_INT GENMASK(11, 0)
58#define NDCR_CS1_CMDDM BIT(7)
59#define NDCR_CS0_CMDDM BIT(8)
60#define NDCR_RDYM BIT(11)
61#define NDCR_ND_ARB_EN BIT(12)
62#define NDCR_RA_START BIT(15)
63#define NDCR_RD_ID_CNT(x) (min_t(unsigned int, x, 0x7) << 16)
64#define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0)
65#define NDCR_DWIDTH_M BIT(26)
66#define NDCR_DWIDTH_C BIT(27)
67#define NDCR_ND_RUN BIT(28)
68#define NDCR_DMA_EN BIT(29)
69#define NDCR_ECC_EN BIT(30)
70#define NDCR_SPARE_EN BIT(31)
71#define NDCR_GENERIC_FIELDS_MASK (~(NDCR_RA_START | NDCR_PAGE_SZ(2048) | \
72 NDCR_DWIDTH_M | NDCR_DWIDTH_C))
73
74/* NAND interface timing parameter 0 register */
75#define NDTR0 0x04
76#define NDTR0_TRP(x) ((min_t(unsigned int, x, 0xF) & 0x7) << 0)
77#define NDTR0_TRH(x) (min_t(unsigned int, x, 0x7) << 3)
78#define NDTR0_ETRP(x) ((min_t(unsigned int, x, 0xF) & 0x8) << 3)
79#define NDTR0_SEL_NRE_EDGE BIT(7)
80#define NDTR0_TWP(x) (min_t(unsigned int, x, 0x7) << 8)
81#define NDTR0_TWH(x) (min_t(unsigned int, x, 0x7) << 11)
82#define NDTR0_TCS(x) (min_t(unsigned int, x, 0x7) << 16)
83#define NDTR0_TCH(x) (min_t(unsigned int, x, 0x7) << 19)
84#define NDTR0_RD_CNT_DEL(x) (min_t(unsigned int, x, 0xF) << 22)
85#define NDTR0_SELCNTR BIT(26)
86#define NDTR0_TADL(x) (min_t(unsigned int, x, 0x1F) << 27)
87
88/* NAND interface timing parameter 1 register */
89#define NDTR1 0x0C
90#define NDTR1_TAR(x) (min_t(unsigned int, x, 0xF) << 0)
91#define NDTR1_TWHR(x) (min_t(unsigned int, x, 0xF) << 4)
92#define NDTR1_TRHW(x) (min_t(unsigned int, x / 16, 0x3) << 8)
93#define NDTR1_PRESCALE BIT(14)
94#define NDTR1_WAIT_MODE BIT(15)
95#define NDTR1_TR(x) (min_t(unsigned int, x, 0xFFFF) << 16)
96
97/* NAND controller status register */
98#define NDSR 0x14
99#define NDSR_WRCMDREQ BIT(0)
100#define NDSR_RDDREQ BIT(1)
101#define NDSR_WRDREQ BIT(2)
102#define NDSR_CORERR BIT(3)
103#define NDSR_UNCERR BIT(4)
104#define NDSR_CMDD(cs) BIT(8 - cs)
105#define NDSR_RDY(rb) BIT(11 + rb)
106#define NDSR_ERRCNT(x) ((x >> 16) & 0x1F)
107
108/* NAND ECC control register */
109#define NDECCCTRL 0x28
110#define NDECCCTRL_BCH_EN BIT(0)
111
112/* NAND controller data buffer register */
113#define NDDB 0x40
114
115/* NAND controller command buffer 0 register */
116#define NDCB0 0x48
117#define NDCB0_CMD1(x) ((x & 0xFF) << 0)
118#define NDCB0_CMD2(x) ((x & 0xFF) << 8)
119#define NDCB0_ADDR_CYC(x) ((x & 0x7) << 16)
120#define NDCB0_ADDR_GET_NUM_CYC(x) (((x) >> 16) & 0x7)
121#define NDCB0_DBC BIT(19)
122#define NDCB0_CMD_TYPE(x) ((x & 0x7) << 21)
123#define NDCB0_CSEL BIT(24)
124#define NDCB0_RDY_BYP BIT(27)
125#define NDCB0_LEN_OVRD BIT(28)
126#define NDCB0_CMD_XTYPE(x) ((x & 0x7) << 29)
127
128/* NAND controller command buffer 1 register */
129#define NDCB1 0x4C
130#define NDCB1_COLS(x) ((x & 0xFFFF) << 0)
131#define NDCB1_ADDRS_PAGE(x) (x << 16)
132
133/* NAND controller command buffer 2 register */
134#define NDCB2 0x50
135#define NDCB2_ADDR5_PAGE(x) (((x >> 16) & 0xFF) << 0)
136#define NDCB2_ADDR5_CYC(x) ((x & 0xFF) << 0)
137
138/* NAND controller command buffer 3 register */
139#define NDCB3 0x54
140#define NDCB3_ADDR6_CYC(x) ((x & 0xFF) << 16)
141#define NDCB3_ADDR7_CYC(x) ((x & 0xFF) << 24)
142
143/* NAND controller command buffer 0 register 'type' and 'xtype' fields */
144#define TYPE_READ 0
145#define TYPE_WRITE 1
146#define TYPE_ERASE 2
147#define TYPE_READ_ID 3
148#define TYPE_STATUS 4
149#define TYPE_RESET 5
150#define TYPE_NAKED_CMD 6
151#define TYPE_NAKED_ADDR 7
152#define TYPE_MASK 7
153#define XTYPE_MONOLITHIC_RW 0
154#define XTYPE_LAST_NAKED_RW 1
155#define XTYPE_FINAL_COMMAND 3
156#define XTYPE_READ 4
157#define XTYPE_WRITE_DISPATCH 4
158#define XTYPE_NAKED_RW 5
159#define XTYPE_COMMAND_DISPATCH 6
160#define XTYPE_MASK 7
161
162/**
163 * Marvell ECC engine works differently than the others, in order to limit the
164 * size of the IP, hardware engineers chose to set a fixed strength at 16 bits
165 * per subpage, and depending on a the desired strength needed by the NAND chip,
166 * a particular layout mixing data/spare/ecc is defined, with a possible last
167 * chunk smaller that the others.
168 *
169 * @writesize: Full page size on which the layout applies
170 * @chunk: Desired ECC chunk size on which the layout applies
171 * @strength: Desired ECC strength (per chunk size bytes) on which the
172 * layout applies
173 * @nchunks: Total number of chunks
174 * @full_chunk_cnt: Number of full-sized chunks, which is the number of
175 * repetitions of the pattern:
176 * (data_bytes + spare_bytes + ecc_bytes).
177 * @data_bytes: Number of data bytes per chunk
178 * @spare_bytes: Number of spare bytes per chunk
179 * @ecc_bytes: Number of ecc bytes per chunk
180 * @last_data_bytes: Number of data bytes in the last chunk
181 * @last_spare_bytes: Number of spare bytes in the last chunk
182 * @last_ecc_bytes: Number of ecc bytes in the last chunk
183 */
184struct marvell_hw_ecc_layout {
185 /* Constraints */
186 int writesize;
187 int chunk;
188 int strength;
189 /* Corresponding layout */
190 int nchunks;
191 int full_chunk_cnt;
192 int data_bytes;
193 int spare_bytes;
194 int ecc_bytes;
195 int last_data_bytes;
196 int last_spare_bytes;
197 int last_ecc_bytes;
198};
199
200#define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb) \
201 { \
202 .writesize = ws, \
203 .chunk = dc, \
204 .strength = ds, \
205 .nchunks = nc, \
206 .full_chunk_cnt = fcc, \
207 .data_bytes = db, \
208 .spare_bytes = sb, \
209 .ecc_bytes = eb, \
210 .last_data_bytes = ldb, \
211 .last_spare_bytes = lsb, \
212 .last_ecc_bytes = leb, \
213 }
214
215/* Layouts explained in AN-379_Marvell_SoC_NFC_ECC */
216static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
217 MARVELL_LAYOUT( 512, 512, 1, 1, 1, 512, 8, 8, 0, 0, 0),
218 MARVELL_LAYOUT( 2048, 512, 1, 1, 1, 2048, 40, 24, 0, 0, 0),
219 MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
220 MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
221 MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
222};
223
224/**
225 * The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection
226 * is made by a field in NDCB0 register, and in another field in NDCB2 register.
227 * The datasheet describes the logic with an error: ADDR5 field is once
228 * declared at the beginning of NDCB2, and another time at its end. Because the
229 * ADDR5 field of NDCB2 may be used by other bytes, it would be more logical
230 * to use the last bit of this field instead of the first ones.
231 *
232 * @cs: Wanted CE lane.
233 * @ndcb0_csel: Value of the NDCB0 register with or without the flag
234 * selecting the wanted CE lane. This is set once when
235 * the Device Tree is probed.
236 * @rb: Ready/Busy pin for the flash chip
237 */
238struct marvell_nand_chip_sel {
239 unsigned int cs;
240 u32 ndcb0_csel;
241 unsigned int rb;
242};
243
244/**
245 * NAND chip structure: stores NAND chip device related information
246 *
247 * @chip: Base NAND chip structure
248 * @node: Used to store NAND chips into a list
249 * @layout NAND layout when using hardware ECC
250 * @ndcr: Controller register value for this NAND chip
251 * @ndtr0: Timing registers 0 value for this NAND chip
252 * @ndtr1: Timing registers 1 value for this NAND chip
253 * @selected_die: Current active CS
254 * @nsels: Number of CS lines required by the NAND chip
255 * @sels: Array of CS lines descriptions
256 */
257struct marvell_nand_chip {
258 struct nand_chip chip;
259 struct list_head node;
260 const struct marvell_hw_ecc_layout *layout;
261 u32 ndcr;
262 u32 ndtr0;
263 u32 ndtr1;
264 int addr_cyc;
265 int selected_die;
266 unsigned int nsels;
267 struct marvell_nand_chip_sel sels[0];
268};
269
270static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
271{
272 return container_of(chip, struct marvell_nand_chip, chip);
273}
274
275static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
276 *nand)
277{
278 return &nand->sels[nand->selected_die];
279}
280
281/**
282 * NAND controller capabilities for distinction between compatible strings
283 *
284 * @max_cs_nb: Number of Chip Select lines available
285 * @max_rb_nb: Number of Ready/Busy lines available
286 * @need_system_controller: Indicates if the SoC needs to have access to the
287 * system controller (ie. to enable the NAND controller)
288 * @legacy_of_bindings: Indicates if DT parsing must be done using the old
289 * fashion way
290 * @is_nfcv2: NFCv2 has numerous enhancements compared to NFCv1, ie.
291 * BCH error detection and correction algorithm,
292 * NDCB3 register has been added
293 * @use_dma: Use dma for data transfers
294 */
295struct marvell_nfc_caps {
296 unsigned int max_cs_nb;
297 unsigned int max_rb_nb;
298 bool need_system_controller;
299 bool legacy_of_bindings;
300 bool is_nfcv2;
301 bool use_dma;
302};
303
304/**
305 * NAND controller structure: stores Marvell NAND controller information
306 *
307 * @controller: Base controller structure
308 * @dev: Parent device (used to print error messages)
309 * @regs: NAND controller registers
310 * @ecc_clk: ECC block clock, two times the NAND controller clock
311 * @complete: Completion object to wait for NAND controller events
312 * @assigned_cs: Bitmask describing already assigned CS lines
313 * @chips: List containing all the NAND chips attached to
314 * this NAND controller
315 * @caps: NAND controller capabilities for each compatible string
316 * @dma_chan: DMA channel (NFCv1 only)
317 * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only)
318 */
319struct marvell_nfc {
320 struct nand_hw_control controller;
321 struct device *dev;
322 void __iomem *regs;
323 struct clk *ecc_clk;
324 struct completion complete;
325 unsigned long assigned_cs;
326 struct list_head chips;
327 struct nand_chip *selected_chip;
328 const struct marvell_nfc_caps *caps;
329
330 /* DMA (NFCv1 only) */
331 bool use_dma;
332 struct dma_chan *dma_chan;
333 u8 *dma_buf;
334};
335
336static inline struct marvell_nfc *to_marvell_nfc(struct nand_hw_control *ctrl)
337{
338 return container_of(ctrl, struct marvell_nfc, controller);
339}
340
341/**
342 * NAND controller timings expressed in NAND Controller clock cycles
343 *
344 * @tRP: ND_nRE pulse width
345 * @tRH: ND_nRE high duration
346 * @tWP: ND_nWE pulse time
347 * @tWH: ND_nWE high duration
348 * @tCS: Enable signal setup time
349 * @tCH: Enable signal hold time
350 * @tADL: Address to write data delay
351 * @tAR: ND_ALE low to ND_nRE low delay
352 * @tWHR: ND_nWE high to ND_nRE low for status read
353 * @tRHW: ND_nRE high duration, read to write delay
354 * @tR: ND_nWE high to ND_nRE low for read
355 */
356struct marvell_nfc_timings {
357 /* NDTR0 fields */
358 unsigned int tRP;
359 unsigned int tRH;
360 unsigned int tWP;
361 unsigned int tWH;
362 unsigned int tCS;
363 unsigned int tCH;
364 unsigned int tADL;
365 /* NDTR1 fields */
366 unsigned int tAR;
367 unsigned int tWHR;
368 unsigned int tRHW;
369 unsigned int tR;
370};
371
372/**
373 * Derives a duration in numbers of clock cycles.
374 *
375 * @ps: Duration in pico-seconds
376 * @period_ns: Clock period in nano-seconds
377 *
378 * Convert the duration in nano-seconds, then divide by the period and
379 * return the number of clock periods.
380 */
381#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns))
Miquel Raynal07ad5a72018-01-17 00:19:34 +0100382#define TO_CYCLES64(ps, period_ns) (DIV_ROUND_UP_ULL(div_u64(ps, 1000), \
383 period_ns))
Miquel Raynal02f26ec2018-01-09 11:36:33 +0100384
385/**
386 * NAND driver structure filled during the parsing of the ->exec_op() subop
387 * subset of instructions.
388 *
389 * @ndcb: Array of values written to NDCBx registers
390 * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle
391 * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
392 * @rdy_delay_ns: Optional delay after waiting for the RB pin
393 * @data_delay_ns: Optional delay after the data xfer
394 * @data_instr_idx: Index of the data instruction in the subop
395 * @data_instr: Pointer to the data instruction in the subop
396 */
397struct marvell_nfc_op {
398 u32 ndcb[4];
399 unsigned int cle_ale_delay_ns;
400 unsigned int rdy_timeout_ms;
401 unsigned int rdy_delay_ns;
402 unsigned int data_delay_ns;
403 unsigned int data_instr_idx;
404 const struct nand_op_instr *data_instr;
405};
406
407/*
408 * Internal helper to conditionnally apply a delay (from the above structure,
409 * most of the time).
410 */
411static void cond_delay(unsigned int ns)
412{
413 if (!ns)
414 return;
415
416 if (ns < 10000)
417 ndelay(ns);
418 else
419 udelay(DIV_ROUND_UP(ns, 1000));
420}
421
422/*
423 * The controller has many flags that could generate interrupts, most of them
424 * are disabled and polling is used. For the very slow signals, using interrupts
425 * may relax the CPU charge.
426 */
427static void marvell_nfc_disable_int(struct marvell_nfc *nfc, u32 int_mask)
428{
429 u32 reg;
430
431 /* Writing 1 disables the interrupt */
432 reg = readl_relaxed(nfc->regs + NDCR);
433 writel_relaxed(reg | int_mask, nfc->regs + NDCR);
434}
435
436static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
437{
438 u32 reg;
439
440 /* Writing 0 enables the interrupt */
441 reg = readl_relaxed(nfc->regs + NDCR);
442 writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
443}
444
445static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
446{
447 writel_relaxed(int_mask, nfc->regs + NDSR);
448}
449
450static void marvell_nfc_force_byte_access(struct nand_chip *chip,
451 bool force_8bit)
452{
453 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
454 u32 ndcr;
455
456 /*
457 * Callers of this function do not verify if the NAND is using a 16-bit
458 * an 8-bit bus for normal operations, so we need to take care of that
459 * here by leaving the configuration unchanged if the NAND does not have
460 * the NAND_BUSWIDTH_16 flag set.
461 */
462 if (!(chip->options & NAND_BUSWIDTH_16))
463 return;
464
465 ndcr = readl_relaxed(nfc->regs + NDCR);
466
467 if (force_8bit)
468 ndcr &= ~(NDCR_DWIDTH_M | NDCR_DWIDTH_C);
469 else
470 ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
471
472 writel_relaxed(ndcr, nfc->regs + NDCR);
473}
474
475static int marvell_nfc_wait_ndrun(struct nand_chip *chip)
476{
477 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
478 u32 val;
479 int ret;
480
481 /*
482 * The command is being processed, wait for the ND_RUN bit to be
483 * cleared by the NFC. If not, we must clear it by hand.
484 */
485 ret = readl_relaxed_poll_timeout(nfc->regs + NDCR, val,
486 (val & NDCR_ND_RUN) == 0,
487 POLL_PERIOD, POLL_TIMEOUT);
488 if (ret) {
489 dev_err(nfc->dev, "Timeout on NAND controller run mode\n");
490 writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
491 nfc->regs + NDCR);
492 return ret;
493 }
494
495 return 0;
496}
497
498/*
499 * Any time a command has to be sent to the controller, the following sequence
500 * has to be followed:
501 * - call marvell_nfc_prepare_cmd()
502 * -> activate the ND_RUN bit that will kind of 'start a job'
503 * -> wait the signal indicating the NFC is waiting for a command
504 * - send the command (cmd and address cycles)
505 * - enventually send or receive the data
506 * - call marvell_nfc_end_cmd() with the corresponding flag
507 * -> wait the flag to be triggered or cancel the job with a timeout
508 *
509 * The following helpers are here to factorize the code a bit so that
510 * specialized functions responsible for executing the actual NAND
511 * operations do not have to replicate the same code blocks.
512 */
513static int marvell_nfc_prepare_cmd(struct nand_chip *chip)
514{
515 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
516 u32 ndcr, val;
517 int ret;
518
519 /* Poll ND_RUN and clear NDSR before issuing any command */
520 ret = marvell_nfc_wait_ndrun(chip);
521 if (ret) {
Colin Ian Kinga76497d2018-01-19 07:55:31 +0000522 dev_err(nfc->dev, "Last operation did not succeed\n");
Miquel Raynal02f26ec2018-01-09 11:36:33 +0100523 return ret;
524 }
525
526 ndcr = readl_relaxed(nfc->regs + NDCR);
527 writel_relaxed(readl(nfc->regs + NDSR), nfc->regs + NDSR);
528
529 /* Assert ND_RUN bit and wait the NFC to be ready */
530 writel_relaxed(ndcr | NDCR_ND_RUN, nfc->regs + NDCR);
531 ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
532 val & NDSR_WRCMDREQ,
533 POLL_PERIOD, POLL_TIMEOUT);
534 if (ret) {
535 dev_err(nfc->dev, "Timeout on WRCMDRE\n");
536 return -ETIMEDOUT;
537 }
538
539 /* Command may be written, clear WRCMDREQ status bit */
540 writel_relaxed(NDSR_WRCMDREQ, nfc->regs + NDSR);
541
542 return 0;
543}
544
545static void marvell_nfc_send_cmd(struct nand_chip *chip,
546 struct marvell_nfc_op *nfc_op)
547{
548 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
549 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
550
551 dev_dbg(nfc->dev, "\nNDCR: 0x%08x\n"
552 "NDCB0: 0x%08x\nNDCB1: 0x%08x\nNDCB2: 0x%08x\nNDCB3: 0x%08x\n",
553 (u32)readl_relaxed(nfc->regs + NDCR), nfc_op->ndcb[0],
554 nfc_op->ndcb[1], nfc_op->ndcb[2], nfc_op->ndcb[3]);
555
556 writel_relaxed(to_nand_sel(marvell_nand)->ndcb0_csel | nfc_op->ndcb[0],
557 nfc->regs + NDCB0);
558 writel_relaxed(nfc_op->ndcb[1], nfc->regs + NDCB0);
559 writel(nfc_op->ndcb[2], nfc->regs + NDCB0);
560
561 /*
562 * Write NDCB0 four times only if LEN_OVRD is set or if ADDR6 or ADDR7
563 * fields are used (only available on NFCv2).
564 */
565 if (nfc_op->ndcb[0] & NDCB0_LEN_OVRD ||
566 NDCB0_ADDR_GET_NUM_CYC(nfc_op->ndcb[0]) >= 6) {
567 if (!WARN_ON_ONCE(!nfc->caps->is_nfcv2))
568 writel(nfc_op->ndcb[3], nfc->regs + NDCB0);
569 }
570}
571
572static int marvell_nfc_end_cmd(struct nand_chip *chip, int flag,
573 const char *label)
574{
575 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
576 u32 val;
577 int ret;
578
579 ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
580 val & flag,
581 POLL_PERIOD, POLL_TIMEOUT);
582
583 if (ret) {
584 dev_err(nfc->dev, "Timeout on %s (NDSR: 0x%08x)\n",
585 label, val);
586 if (nfc->dma_chan)
587 dmaengine_terminate_all(nfc->dma_chan);
588 return ret;
589 }
590
591 /*
592 * DMA function uses this helper to poll on CMDD bits without wanting
593 * them to be cleared.
594 */
595 if (nfc->use_dma && (readl_relaxed(nfc->regs + NDCR) & NDCR_DMA_EN))
596 return 0;
597
598 writel_relaxed(flag, nfc->regs + NDSR);
599
600 return 0;
601}
602
603static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
604{
605 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
606 int cs_flag = NDSR_CMDD(to_nand_sel(marvell_nand)->ndcb0_csel);
607
608 return marvell_nfc_end_cmd(chip, cs_flag, "CMDD");
609}
610
611static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
612{
613 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
614 int ret;
615
616 /* Timeout is expressed in ms */
617 if (!timeout_ms)
618 timeout_ms = IRQ_TIMEOUT;
619
620 init_completion(&nfc->complete);
621
622 marvell_nfc_enable_int(nfc, NDCR_RDYM);
623 ret = wait_for_completion_timeout(&nfc->complete,
624 msecs_to_jiffies(timeout_ms));
625 marvell_nfc_disable_int(nfc, NDCR_RDYM);
626 marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
627 if (!ret) {
628 dev_err(nfc->dev, "Timeout waiting for RB signal\n");
629 return -ETIMEDOUT;
630 }
631
632 return 0;
633}
634
635static void marvell_nfc_select_chip(struct mtd_info *mtd, int die_nr)
636{
637 struct nand_chip *chip = mtd_to_nand(mtd);
638 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
639 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
640 u32 ndcr_generic;
641
642 if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
643 return;
644
645 if (die_nr < 0 || die_nr >= marvell_nand->nsels) {
646 nfc->selected_chip = NULL;
647 marvell_nand->selected_die = -1;
648 return;
649 }
650
651 /*
652 * Do not change the timing registers when using the DT property
653 * marvell,nand-keep-config; in that case ->ndtr0 and ->ndtr1 from the
654 * marvell_nand structure are supposedly empty.
655 */
656 writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
657 writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
658
659 /*
660 * Reset the NDCR register to a clean state for this particular chip,
661 * also clear ND_RUN bit.
662 */
663 ndcr_generic = readl_relaxed(nfc->regs + NDCR) &
664 NDCR_GENERIC_FIELDS_MASK & ~NDCR_ND_RUN;
665 writel_relaxed(ndcr_generic | marvell_nand->ndcr, nfc->regs + NDCR);
666
667 /* Also reset the interrupt status register */
668 marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
669
670 nfc->selected_chip = chip;
671 marvell_nand->selected_die = die_nr;
672}
673
674static irqreturn_t marvell_nfc_isr(int irq, void *dev_id)
675{
676 struct marvell_nfc *nfc = dev_id;
677 u32 st = readl_relaxed(nfc->regs + NDSR);
678 u32 ien = (~readl_relaxed(nfc->regs + NDCR)) & NDCR_ALL_INT;
679
680 /*
681 * RDY interrupt mask is one bit in NDCR while there are two status
682 * bit in NDSR (RDY[cs0/cs2] and RDY[cs1/cs3]).
683 */
684 if (st & NDSR_RDY(1))
685 st |= NDSR_RDY(0);
686
687 if (!(st & ien))
688 return IRQ_NONE;
689
690 marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
691
692 if (!(st & (NDSR_RDDREQ | NDSR_WRDREQ | NDSR_WRCMDREQ)))
693 complete(&nfc->complete);
694
695 return IRQ_HANDLED;
696}
697
698/* HW ECC related functions */
699static void marvell_nfc_enable_hw_ecc(struct nand_chip *chip)
700{
701 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
702 u32 ndcr = readl_relaxed(nfc->regs + NDCR);
703
704 if (!(ndcr & NDCR_ECC_EN)) {
705 writel_relaxed(ndcr | NDCR_ECC_EN, nfc->regs + NDCR);
706
707 /*
708 * When enabling BCH, set threshold to 0 to always know the
709 * number of corrected bitflips.
710 */
711 if (chip->ecc.algo == NAND_ECC_BCH)
712 writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL);
713 }
714}
715
716static void marvell_nfc_disable_hw_ecc(struct nand_chip *chip)
717{
718 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
719 u32 ndcr = readl_relaxed(nfc->regs + NDCR);
720
721 if (ndcr & NDCR_ECC_EN) {
722 writel_relaxed(ndcr & ~NDCR_ECC_EN, nfc->regs + NDCR);
723 if (chip->ecc.algo == NAND_ECC_BCH)
724 writel_relaxed(0, nfc->regs + NDECCCTRL);
725 }
726}
727
728/* DMA related helpers */
729static void marvell_nfc_enable_dma(struct marvell_nfc *nfc)
730{
731 u32 reg;
732
733 reg = readl_relaxed(nfc->regs + NDCR);
734 writel_relaxed(reg | NDCR_DMA_EN, nfc->regs + NDCR);
735}
736
737static void marvell_nfc_disable_dma(struct marvell_nfc *nfc)
738{
739 u32 reg;
740
741 reg = readl_relaxed(nfc->regs + NDCR);
742 writel_relaxed(reg & ~NDCR_DMA_EN, nfc->regs + NDCR);
743}
744
745/* Read/write PIO/DMA accessors */
746static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
747 enum dma_data_direction direction,
748 unsigned int len)
749{
750 unsigned int dma_len = min_t(int, ALIGN(len, 32), MAX_CHUNK_SIZE);
751 struct dma_async_tx_descriptor *tx;
752 struct scatterlist sg;
753 dma_cookie_t cookie;
754 int ret;
755
756 marvell_nfc_enable_dma(nfc);
757 /* Prepare the DMA transfer */
758 sg_init_one(&sg, nfc->dma_buf, dma_len);
759 dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
760 tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
761 direction == DMA_FROM_DEVICE ?
762 DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
763 DMA_PREP_INTERRUPT);
764 if (!tx) {
765 dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
766 return -ENXIO;
767 }
768
769 /* Do the task and wait for it to finish */
770 cookie = dmaengine_submit(tx);
771 ret = dma_submit_error(cookie);
772 if (ret)
773 return -EIO;
774
775 dma_async_issue_pending(nfc->dma_chan);
776 ret = marvell_nfc_wait_cmdd(nfc->selected_chip);
777 dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
778 marvell_nfc_disable_dma(nfc);
779 if (ret) {
780 dev_err(nfc->dev, "Timeout waiting for DMA (status: %d)\n",
781 dmaengine_tx_status(nfc->dma_chan, cookie, NULL));
782 dmaengine_terminate_all(nfc->dma_chan);
783 return -ETIMEDOUT;
784 }
785
786 return 0;
787}
788
789static int marvell_nfc_xfer_data_in_pio(struct marvell_nfc *nfc, u8 *in,
790 unsigned int len)
791{
792 unsigned int last_len = len % FIFO_DEPTH;
793 unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
794 int i;
795
796 for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
797 ioread32_rep(nfc->regs + NDDB, in + i, FIFO_REP(FIFO_DEPTH));
798
799 if (last_len) {
800 u8 tmp_buf[FIFO_DEPTH];
801
802 ioread32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
803 memcpy(in + last_full_offset, tmp_buf, last_len);
804 }
805
806 return 0;
807}
808
809static int marvell_nfc_xfer_data_out_pio(struct marvell_nfc *nfc, const u8 *out,
810 unsigned int len)
811{
812 unsigned int last_len = len % FIFO_DEPTH;
813 unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
814 int i;
815
816 for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
817 iowrite32_rep(nfc->regs + NDDB, out + i, FIFO_REP(FIFO_DEPTH));
818
819 if (last_len) {
820 u8 tmp_buf[FIFO_DEPTH];
821
822 memcpy(tmp_buf, out + last_full_offset, last_len);
823 iowrite32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
824 }
825
826 return 0;
827}
828
829static void marvell_nfc_check_empty_chunk(struct nand_chip *chip,
830 u8 *data, int data_len,
831 u8 *spare, int spare_len,
832 u8 *ecc, int ecc_len,
833 unsigned int *max_bitflips)
834{
835 struct mtd_info *mtd = nand_to_mtd(chip);
836 int bf;
837
838 /*
839 * Blank pages (all 0xFF) that have not been written may be recognized
840 * as bad if bitflips occur, so whenever an uncorrectable error occurs,
841 * check if the entire page (with ECC bytes) is actually blank or not.
842 */
843 if (!data)
844 data_len = 0;
845 if (!spare)
846 spare_len = 0;
847 if (!ecc)
848 ecc_len = 0;
849
850 bf = nand_check_erased_ecc_chunk(data, data_len, ecc, ecc_len,
851 spare, spare_len, chip->ecc.strength);
852 if (bf < 0) {
853 mtd->ecc_stats.failed++;
854 return;
855 }
856
857 /* Update the stats and max_bitflips */
858 mtd->ecc_stats.corrected += bf;
859 *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
860}
861
862/*
863 * Check a chunk is correct or not according to hardware ECC engine.
864 * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however
865 * mtd->ecc_stats.failure is not, the function will instead return a non-zero
866 * value indicating that a check on the emptyness of the subpage must be
867 * performed before declaring the subpage corrupted.
868 */
869static int marvell_nfc_hw_ecc_correct(struct nand_chip *chip,
870 unsigned int *max_bitflips)
871{
872 struct mtd_info *mtd = nand_to_mtd(chip);
873 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
874 int bf = 0;
875 u32 ndsr;
876
877 ndsr = readl_relaxed(nfc->regs + NDSR);
878
879 /* Check uncorrectable error flag */
880 if (ndsr & NDSR_UNCERR) {
881 writel_relaxed(ndsr, nfc->regs + NDSR);
882
883 /*
884 * Do not increment ->ecc_stats.failed now, instead, return a
885 * non-zero value to indicate that this chunk was apparently
886 * bad, and it should be check to see if it empty or not. If
887 * the chunk (with ECC bytes) is not declared empty, the calling
888 * function must increment the failure count.
889 */
890 return -EBADMSG;
891 }
892
893 /* Check correctable error flag */
894 if (ndsr & NDSR_CORERR) {
895 writel_relaxed(ndsr, nfc->regs + NDSR);
896
897 if (chip->ecc.algo == NAND_ECC_BCH)
898 bf = NDSR_ERRCNT(ndsr);
899 else
900 bf = 1;
901 }
902
903 /* Update the stats and max_bitflips */
904 mtd->ecc_stats.corrected += bf;
905 *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
906
907 return 0;
908}
909
910/* Hamming read helpers */
911static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip,
912 u8 *data_buf, u8 *oob_buf,
913 bool raw, int page)
914{
915 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
916 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
917 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
918 struct marvell_nfc_op nfc_op = {
919 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
920 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
921 NDCB0_DBC |
922 NDCB0_CMD1(NAND_CMD_READ0) |
923 NDCB0_CMD2(NAND_CMD_READSTART),
924 .ndcb[1] = NDCB1_ADDRS_PAGE(page),
925 .ndcb[2] = NDCB2_ADDR5_PAGE(page),
926 };
927 unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
928 int ret;
929
930 /* NFCv2 needs more information about the operation being executed */
931 if (nfc->caps->is_nfcv2)
932 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
933
934 ret = marvell_nfc_prepare_cmd(chip);
935 if (ret)
936 return ret;
937
938 marvell_nfc_send_cmd(chip, &nfc_op);
939 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
940 "RDDREQ while draining FIFO (data/oob)");
941 if (ret)
942 return ret;
943
944 /*
945 * Read the page then the OOB area. Unlike what is shown in current
946 * documentation, spare bytes are protected by the ECC engine, and must
947 * be at the beginning of the OOB area or running this driver on legacy
948 * systems will prevent the discovery of the BBM/BBT.
949 */
950 if (nfc->use_dma) {
951 marvell_nfc_xfer_data_dma(nfc, DMA_FROM_DEVICE,
952 lt->data_bytes + oob_bytes);
953 memcpy(data_buf, nfc->dma_buf, lt->data_bytes);
954 memcpy(oob_buf, nfc->dma_buf + lt->data_bytes, oob_bytes);
955 } else {
956 marvell_nfc_xfer_data_in_pio(nfc, data_buf, lt->data_bytes);
957 marvell_nfc_xfer_data_in_pio(nfc, oob_buf, oob_bytes);
958 }
959
960 ret = marvell_nfc_wait_cmdd(chip);
961
962 return ret;
963}
964
965static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct mtd_info *mtd,
966 struct nand_chip *chip, u8 *buf,
967 int oob_required, int page)
968{
969 return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
970 true, page);
971}
972
973static int marvell_nfc_hw_ecc_hmg_read_page(struct mtd_info *mtd,
974 struct nand_chip *chip,
975 u8 *buf, int oob_required,
976 int page)
977{
978 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
979 unsigned int full_sz = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
980 int max_bitflips = 0, ret;
981 u8 *raw_buf;
982
983 marvell_nfc_enable_hw_ecc(chip);
984 marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
985 page);
986 ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
987 marvell_nfc_disable_hw_ecc(chip);
988
989 if (!ret)
990 return max_bitflips;
991
992 /*
993 * When ECC failures are detected, check if the full page has been
994 * written or not. Ignore the failure if it is actually empty.
995 */
996 raw_buf = kmalloc(full_sz, GFP_KERNEL);
997 if (!raw_buf)
998 return -ENOMEM;
999
1000 marvell_nfc_hw_ecc_hmg_do_read_page(chip, raw_buf, raw_buf +
1001 lt->data_bytes, true, page);
1002 marvell_nfc_check_empty_chunk(chip, raw_buf, full_sz, NULL, 0, NULL, 0,
1003 &max_bitflips);
1004 kfree(raw_buf);
1005
1006 return max_bitflips;
1007}
1008
1009/*
1010 * Spare area in Hamming layouts is not protected by the ECC engine (even if
1011 * it appears before the ECC bytes when reading), the ->read_oob_raw() function
1012 * also stands for ->read_oob().
1013 */
1014static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct mtd_info *mtd,
1015 struct nand_chip *chip, int page)
1016{
1017 /* Invalidate page cache */
1018 chip->pagebuf = -1;
1019
1020 return marvell_nfc_hw_ecc_hmg_do_read_page(chip, chip->data_buf,
1021 chip->oob_poi, true, page);
1022}
1023
1024/* Hamming write helpers */
1025static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
1026 const u8 *data_buf,
1027 const u8 *oob_buf, bool raw,
1028 int page)
1029{
1030 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
1031 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1032 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1033 struct marvell_nfc_op nfc_op = {
1034 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) |
1035 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
1036 NDCB0_CMD1(NAND_CMD_SEQIN) |
1037 NDCB0_CMD2(NAND_CMD_PAGEPROG) |
1038 NDCB0_DBC,
1039 .ndcb[1] = NDCB1_ADDRS_PAGE(page),
1040 .ndcb[2] = NDCB2_ADDR5_PAGE(page),
1041 };
1042 unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
1043 int ret;
1044
1045 /* NFCv2 needs more information about the operation being executed */
1046 if (nfc->caps->is_nfcv2)
1047 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
1048
1049 ret = marvell_nfc_prepare_cmd(chip);
1050 if (ret)
1051 return ret;
1052
1053 marvell_nfc_send_cmd(chip, &nfc_op);
1054 ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
1055 "WRDREQ while loading FIFO (data)");
1056 if (ret)
1057 return ret;
1058
1059 /* Write the page then the OOB area */
1060 if (nfc->use_dma) {
1061 memcpy(nfc->dma_buf, data_buf, lt->data_bytes);
1062 memcpy(nfc->dma_buf + lt->data_bytes, oob_buf, oob_bytes);
1063 marvell_nfc_xfer_data_dma(nfc, DMA_TO_DEVICE, lt->data_bytes +
1064 lt->ecc_bytes + lt->spare_bytes);
1065 } else {
1066 marvell_nfc_xfer_data_out_pio(nfc, data_buf, lt->data_bytes);
1067 marvell_nfc_xfer_data_out_pio(nfc, oob_buf, oob_bytes);
1068 }
1069
1070 ret = marvell_nfc_wait_cmdd(chip);
1071 if (ret)
1072 return ret;
1073
1074 ret = marvell_nfc_wait_op(chip,
1075 chip->data_interface.timings.sdr.tPROG_max);
1076 return ret;
1077}
1078
1079static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct mtd_info *mtd,
1080 struct nand_chip *chip,
1081 const u8 *buf,
1082 int oob_required, int page)
1083{
1084 return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
1085 true, page);
1086}
1087
1088static int marvell_nfc_hw_ecc_hmg_write_page(struct mtd_info *mtd,
1089 struct nand_chip *chip,
1090 const u8 *buf,
1091 int oob_required, int page)
1092{
1093 int ret;
1094
1095 marvell_nfc_enable_hw_ecc(chip);
1096 ret = marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
1097 false, page);
1098 marvell_nfc_disable_hw_ecc(chip);
1099
1100 return ret;
1101}
1102
1103/*
1104 * Spare area in Hamming layouts is not protected by the ECC engine (even if
1105 * it appears before the ECC bytes when reading), the ->write_oob_raw() function
1106 * also stands for ->write_oob().
1107 */
1108static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct mtd_info *mtd,
1109 struct nand_chip *chip,
1110 int page)
1111{
1112 /* Invalidate page cache */
1113 chip->pagebuf = -1;
1114
1115 memset(chip->data_buf, 0xFF, mtd->writesize);
1116
1117 return marvell_nfc_hw_ecc_hmg_do_write_page(chip, chip->data_buf,
1118 chip->oob_poi, true, page);
1119}
1120
1121/* BCH read helpers */
1122static int marvell_nfc_hw_ecc_bch_read_page_raw(struct mtd_info *mtd,
1123 struct nand_chip *chip, u8 *buf,
1124 int oob_required, int page)
1125{
1126 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1127 u8 *oob = chip->oob_poi;
1128 int chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
1129 int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
1130 lt->last_spare_bytes;
1131 int data_len = lt->data_bytes;
1132 int spare_len = lt->spare_bytes;
1133 int ecc_len = lt->ecc_bytes;
1134 int chunk;
1135
1136 if (oob_required)
1137 memset(chip->oob_poi, 0xFF, mtd->oobsize);
1138
1139 nand_read_page_op(chip, page, 0, NULL, 0);
1140
1141 for (chunk = 0; chunk < lt->nchunks; chunk++) {
1142 /* Update last chunk length */
1143 if (chunk >= lt->full_chunk_cnt) {
1144 data_len = lt->last_data_bytes;
1145 spare_len = lt->last_spare_bytes;
1146 ecc_len = lt->last_ecc_bytes;
1147 }
1148
1149 /* Read data bytes*/
1150 nand_change_read_column_op(chip, chunk * chunk_size,
1151 buf + (lt->data_bytes * chunk),
1152 data_len, false);
1153
1154 /* Read spare bytes */
1155 nand_read_data_op(chip, oob + (lt->spare_bytes * chunk),
1156 spare_len, false);
1157
1158 /* Read ECC bytes */
1159 nand_read_data_op(chip, oob + ecc_offset +
1160 (ALIGN(lt->ecc_bytes, 32) * chunk),
1161 ecc_len, false);
1162 }
1163
1164 return 0;
1165}
1166
1167static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
1168 u8 *data, unsigned int data_len,
1169 u8 *spare, unsigned int spare_len,
1170 int page)
1171{
1172 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
1173 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1174 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1175 int i, ret;
1176 struct marvell_nfc_op nfc_op = {
1177 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
1178 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
1179 NDCB0_LEN_OVRD,
1180 .ndcb[1] = NDCB1_ADDRS_PAGE(page),
1181 .ndcb[2] = NDCB2_ADDR5_PAGE(page),
1182 .ndcb[3] = data_len + spare_len,
1183 };
1184
1185 ret = marvell_nfc_prepare_cmd(chip);
1186 if (ret)
1187 return;
1188
1189 if (chunk == 0)
1190 nfc_op.ndcb[0] |= NDCB0_DBC |
1191 NDCB0_CMD1(NAND_CMD_READ0) |
1192 NDCB0_CMD2(NAND_CMD_READSTART);
1193
1194 /*
1195 * Trigger the naked read operation only on the last chunk.
1196 * Otherwise, use monolithic read.
1197 */
1198 if (lt->nchunks == 1 || (chunk < lt->nchunks - 1))
1199 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
1200 else
1201 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1202
1203 marvell_nfc_send_cmd(chip, &nfc_op);
1204
1205 /*
1206 * According to the datasheet, when reading from NDDB
1207 * with BCH enabled, after each 32 bytes reads, we
1208 * have to make sure that the NDSR.RDDREQ bit is set.
1209 *
1210 * Drain the FIFO, 8 32-bit reads at a time, and skip
1211 * the polling on the last read.
1212 *
1213 * Length is a multiple of 32 bytes, hence it is a multiple of 8 too.
1214 */
1215 for (i = 0; i < data_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
1216 marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1217 "RDDREQ while draining FIFO (data)");
1218 marvell_nfc_xfer_data_in_pio(nfc, data,
1219 FIFO_DEPTH * BCH_SEQ_READS);
1220 data += FIFO_DEPTH * BCH_SEQ_READS;
1221 }
1222
1223 for (i = 0; i < spare_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
1224 marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1225 "RDDREQ while draining FIFO (OOB)");
1226 marvell_nfc_xfer_data_in_pio(nfc, spare,
1227 FIFO_DEPTH * BCH_SEQ_READS);
1228 spare += FIFO_DEPTH * BCH_SEQ_READS;
1229 }
1230}
1231
1232static int marvell_nfc_hw_ecc_bch_read_page(struct mtd_info *mtd,
1233 struct nand_chip *chip,
1234 u8 *buf, int oob_required,
1235 int page)
1236{
1237 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1238 int data_len = lt->data_bytes, spare_len = lt->spare_bytes, ecc_len;
1239 u8 *data = buf, *spare = chip->oob_poi, *ecc;
1240 int max_bitflips = 0;
1241 u32 failure_mask = 0;
1242 int chunk, ecc_offset_in_page, ret;
1243
1244 /*
1245 * With BCH, OOB is not fully used (and thus not read entirely), not
1246 * expected bytes could show up at the end of the OOB buffer if not
1247 * explicitly erased.
1248 */
1249 if (oob_required)
1250 memset(chip->oob_poi, 0xFF, mtd->oobsize);
1251
1252 marvell_nfc_enable_hw_ecc(chip);
1253
1254 for (chunk = 0; chunk < lt->nchunks; chunk++) {
1255 /* Update length for the last chunk */
1256 if (chunk >= lt->full_chunk_cnt) {
1257 data_len = lt->last_data_bytes;
1258 spare_len = lt->last_spare_bytes;
1259 }
1260
1261 /* Read the chunk and detect number of bitflips */
1262 marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
1263 spare, spare_len, page);
1264 ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
1265 if (ret)
1266 failure_mask |= BIT(chunk);
1267
1268 data += data_len;
1269 spare += spare_len;
1270 }
1271
1272 marvell_nfc_disable_hw_ecc(chip);
1273
1274 if (!failure_mask)
1275 return max_bitflips;
1276
1277 /*
1278 * Please note that dumping the ECC bytes during a normal read with OOB
1279 * area would add a significant overhead as ECC bytes are "consumed" by
1280 * the controller in normal mode and must be re-read in raw mode. To
1281 * avoid dropping the performances, we prefer not to include them. The
1282 * user should re-read the page in raw mode if ECC bytes are required.
1283 *
1284 * However, for any subpage read error reported by ->correct(), the ECC
1285 * bytes must be read in raw mode and the full subpage must be checked
1286 * to see if it is entirely empty of if there was an actual error.
1287 */
1288 for (chunk = 0; chunk < lt->nchunks; chunk++) {
1289 /* No failure reported for this chunk, move to the next one */
1290 if (!(failure_mask & BIT(chunk)))
1291 continue;
1292
1293 /* Derive ECC bytes positions (in page/buffer) and length */
1294 ecc = chip->oob_poi +
1295 (lt->full_chunk_cnt * lt->spare_bytes) +
1296 lt->last_spare_bytes +
1297 (chunk * ALIGN(lt->ecc_bytes, 32));
1298 ecc_offset_in_page =
1299 (chunk * (lt->data_bytes + lt->spare_bytes +
1300 lt->ecc_bytes)) +
1301 (chunk < lt->full_chunk_cnt ?
1302 lt->data_bytes + lt->spare_bytes :
1303 lt->last_data_bytes + lt->last_spare_bytes);
1304 ecc_len = chunk < lt->full_chunk_cnt ?
1305 lt->ecc_bytes : lt->last_ecc_bytes;
1306
1307 /* Do the actual raw read of the ECC bytes */
1308 nand_change_read_column_op(chip, ecc_offset_in_page,
1309 ecc, ecc_len, false);
1310
1311 /* Derive data/spare bytes positions (in buffer) and length */
1312 data = buf + (chunk * lt->data_bytes);
1313 data_len = chunk < lt->full_chunk_cnt ?
1314 lt->data_bytes : lt->last_data_bytes;
1315 spare = chip->oob_poi + (chunk * (lt->spare_bytes +
1316 lt->ecc_bytes));
1317 spare_len = chunk < lt->full_chunk_cnt ?
1318 lt->spare_bytes : lt->last_spare_bytes;
1319
1320 /* Check the entire chunk (data + spare + ecc) for emptyness */
1321 marvell_nfc_check_empty_chunk(chip, data, data_len, spare,
1322 spare_len, ecc, ecc_len,
1323 &max_bitflips);
1324 }
1325
1326 return max_bitflips;
1327}
1328
1329static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct mtd_info *mtd,
1330 struct nand_chip *chip, int page)
1331{
1332 /* Invalidate page cache */
1333 chip->pagebuf = -1;
1334
1335 return chip->ecc.read_page_raw(mtd, chip, chip->data_buf, true, page);
1336}
1337
1338static int marvell_nfc_hw_ecc_bch_read_oob(struct mtd_info *mtd,
1339 struct nand_chip *chip, int page)
1340{
1341 /* Invalidate page cache */
1342 chip->pagebuf = -1;
1343
1344 return chip->ecc.read_page(mtd, chip, chip->data_buf, true, page);
1345}
1346
1347/* BCH write helpers */
1348static int marvell_nfc_hw_ecc_bch_write_page_raw(struct mtd_info *mtd,
1349 struct nand_chip *chip,
1350 const u8 *buf,
1351 int oob_required, int page)
1352{
1353 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1354 int full_chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
1355 int data_len = lt->data_bytes;
1356 int spare_len = lt->spare_bytes;
1357 int ecc_len = lt->ecc_bytes;
Miquel Raynal02f26ec2018-01-09 11:36:33 +01001358 int spare_offset = 0;
1359 int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
1360 lt->last_spare_bytes;
1361 int chunk;
1362
1363 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1364
1365 for (chunk = 0; chunk < lt->nchunks; chunk++) {
1366 if (chunk >= lt->full_chunk_cnt) {
1367 data_len = lt->last_data_bytes;
1368 spare_len = lt->last_spare_bytes;
1369 ecc_len = lt->last_ecc_bytes;
Miquel Raynal02f26ec2018-01-09 11:36:33 +01001370 }
1371
1372 /* Point to the column of the next chunk */
1373 nand_change_write_column_op(chip, chunk * full_chunk_size,
1374 NULL, 0, false);
1375
1376 /* Write the data */
1377 nand_write_data_op(chip, buf + (chunk * lt->data_bytes),
1378 data_len, false);
1379
1380 if (!oob_required)
1381 continue;
1382
1383 /* Write the spare bytes */
1384 if (spare_len)
1385 nand_write_data_op(chip, chip->oob_poi + spare_offset,
1386 spare_len, false);
1387
1388 /* Write the ECC bytes */
1389 if (ecc_len)
1390 nand_write_data_op(chip, chip->oob_poi + ecc_offset,
1391 ecc_len, false);
1392
1393 spare_offset += spare_len;
1394 ecc_offset += ALIGN(ecc_len, 32);
1395 }
1396
1397 return nand_prog_page_end_op(chip);
1398}
1399
1400static int
1401marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
1402 const u8 *data, unsigned int data_len,
1403 const u8 *spare, unsigned int spare_len,
1404 int page)
1405{
1406 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
1407 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1408 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1409 int ret;
1410 struct marvell_nfc_op nfc_op = {
1411 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
1412 .ndcb[3] = data_len + spare_len,
1413 };
1414
1415 /*
1416 * First operation dispatches the CMD_SEQIN command, issue the address
1417 * cycles and asks for the first chunk of data.
1418 * All operations in the middle (if any) will issue a naked write and
1419 * also ask for data.
1420 * Last operation (if any) asks for the last chunk of data through a
1421 * last naked write.
1422 */
1423 if (chunk == 0) {
1424 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) |
1425 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
1426 NDCB0_CMD1(NAND_CMD_SEQIN);
1427 nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
1428 nfc_op.ndcb[2] |= NDCB2_ADDR5_PAGE(page);
1429 } else if (chunk < lt->nchunks - 1) {
1430 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
1431 } else {
1432 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1433 }
1434
1435 /* Always dispatch the PAGEPROG command on the last chunk */
1436 if (chunk == lt->nchunks - 1)
1437 nfc_op.ndcb[0] |= NDCB0_CMD2(NAND_CMD_PAGEPROG) | NDCB0_DBC;
1438
1439 ret = marvell_nfc_prepare_cmd(chip);
1440 if (ret)
1441 return ret;
1442
1443 marvell_nfc_send_cmd(chip, &nfc_op);
1444 ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
1445 "WRDREQ while loading FIFO (data)");
1446 if (ret)
1447 return ret;
1448
1449 /* Transfer the contents */
1450 iowrite32_rep(nfc->regs + NDDB, data, FIFO_REP(data_len));
1451 iowrite32_rep(nfc->regs + NDDB, spare, FIFO_REP(spare_len));
1452
1453 return 0;
1454}
1455
1456static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
1457 struct nand_chip *chip,
1458 const u8 *buf,
1459 int oob_required, int page)
1460{
1461 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1462 const u8 *data = buf;
1463 const u8 *spare = chip->oob_poi;
1464 int data_len = lt->data_bytes;
1465 int spare_len = lt->spare_bytes;
1466 int chunk, ret;
1467
1468 /* Spare data will be written anyway, so clear it to avoid garbage */
1469 if (!oob_required)
1470 memset(chip->oob_poi, 0xFF, mtd->oobsize);
1471
1472 marvell_nfc_enable_hw_ecc(chip);
1473
1474 for (chunk = 0; chunk < lt->nchunks; chunk++) {
1475 if (chunk >= lt->full_chunk_cnt) {
1476 data_len = lt->last_data_bytes;
1477 spare_len = lt->last_spare_bytes;
1478 }
1479
1480 marvell_nfc_hw_ecc_bch_write_chunk(chip, chunk, data, data_len,
1481 spare, spare_len, page);
1482 data += data_len;
1483 spare += spare_len;
1484
1485 /*
1486 * Waiting only for CMDD or PAGED is not enough, ECC are
1487 * partially written. No flag is set once the operation is
1488 * really finished but the ND_RUN bit is cleared, so wait for it
1489 * before stepping into the next command.
1490 */
1491 marvell_nfc_wait_ndrun(chip);
1492 }
1493
1494 ret = marvell_nfc_wait_op(chip,
1495 chip->data_interface.timings.sdr.tPROG_max);
1496
1497 marvell_nfc_disable_hw_ecc(chip);
1498
1499 if (ret)
1500 return ret;
1501
1502 return 0;
1503}
1504
1505static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct mtd_info *mtd,
1506 struct nand_chip *chip,
1507 int page)
1508{
1509 /* Invalidate page cache */
1510 chip->pagebuf = -1;
1511
1512 memset(chip->data_buf, 0xFF, mtd->writesize);
1513
1514 return chip->ecc.write_page_raw(mtd, chip, chip->data_buf, true, page);
1515}
1516
1517static int marvell_nfc_hw_ecc_bch_write_oob(struct mtd_info *mtd,
1518 struct nand_chip *chip, int page)
1519{
1520 /* Invalidate page cache */
1521 chip->pagebuf = -1;
1522
1523 memset(chip->data_buf, 0xFF, mtd->writesize);
1524
1525 return chip->ecc.write_page(mtd, chip, chip->data_buf, true, page);
1526}
1527
1528/* NAND framework ->exec_op() hooks and related helpers */
1529static void marvell_nfc_parse_instructions(struct nand_chip *chip,
1530 const struct nand_subop *subop,
1531 struct marvell_nfc_op *nfc_op)
1532{
1533 const struct nand_op_instr *instr = NULL;
1534 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1535 bool first_cmd = true;
1536 unsigned int op_id;
1537 int i;
1538
1539 /* Reset the input structure as most of its fields will be OR'ed */
1540 memset(nfc_op, 0, sizeof(struct marvell_nfc_op));
1541
1542 for (op_id = 0; op_id < subop->ninstrs; op_id++) {
1543 unsigned int offset, naddrs;
1544 const u8 *addrs;
1545 int len = nand_subop_get_data_len(subop, op_id);
1546
1547 instr = &subop->instrs[op_id];
1548
1549 switch (instr->type) {
1550 case NAND_OP_CMD_INSTR:
1551 if (first_cmd)
1552 nfc_op->ndcb[0] |=
1553 NDCB0_CMD1(instr->ctx.cmd.opcode);
1554 else
1555 nfc_op->ndcb[0] |=
1556 NDCB0_CMD2(instr->ctx.cmd.opcode) |
1557 NDCB0_DBC;
1558
1559 nfc_op->cle_ale_delay_ns = instr->delay_ns;
1560 first_cmd = false;
1561 break;
1562
1563 case NAND_OP_ADDR_INSTR:
1564 offset = nand_subop_get_addr_start_off(subop, op_id);
1565 naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
1566 addrs = &instr->ctx.addr.addrs[offset];
1567
1568 nfc_op->ndcb[0] |= NDCB0_ADDR_CYC(naddrs);
1569
1570 for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
1571 nfc_op->ndcb[1] |= addrs[i] << (8 * i);
1572
1573 if (naddrs >= 5)
1574 nfc_op->ndcb[2] |= NDCB2_ADDR5_CYC(addrs[4]);
1575 if (naddrs >= 6)
1576 nfc_op->ndcb[3] |= NDCB3_ADDR6_CYC(addrs[5]);
1577 if (naddrs == 7)
1578 nfc_op->ndcb[3] |= NDCB3_ADDR7_CYC(addrs[6]);
1579
1580 nfc_op->cle_ale_delay_ns = instr->delay_ns;
1581 break;
1582
1583 case NAND_OP_DATA_IN_INSTR:
1584 nfc_op->data_instr = instr;
1585 nfc_op->data_instr_idx = op_id;
1586 nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ);
1587 if (nfc->caps->is_nfcv2) {
1588 nfc_op->ndcb[0] |=
1589 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
1590 NDCB0_LEN_OVRD;
1591 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
1592 }
1593 nfc_op->data_delay_ns = instr->delay_ns;
1594 break;
1595
1596 case NAND_OP_DATA_OUT_INSTR:
1597 nfc_op->data_instr = instr;
1598 nfc_op->data_instr_idx = op_id;
1599 nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE);
1600 if (nfc->caps->is_nfcv2) {
1601 nfc_op->ndcb[0] |=
1602 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
1603 NDCB0_LEN_OVRD;
1604 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
1605 }
1606 nfc_op->data_delay_ns = instr->delay_ns;
1607 break;
1608
1609 case NAND_OP_WAITRDY_INSTR:
1610 nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
1611 nfc_op->rdy_delay_ns = instr->delay_ns;
1612 break;
1613 }
1614 }
1615}
1616
1617static int marvell_nfc_xfer_data_pio(struct nand_chip *chip,
1618 const struct nand_subop *subop,
1619 struct marvell_nfc_op *nfc_op)
1620{
1621 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1622 const struct nand_op_instr *instr = nfc_op->data_instr;
1623 unsigned int op_id = nfc_op->data_instr_idx;
1624 unsigned int len = nand_subop_get_data_len(subop, op_id);
1625 unsigned int offset = nand_subop_get_data_start_off(subop, op_id);
1626 bool reading = (instr->type == NAND_OP_DATA_IN_INSTR);
1627 int ret;
1628
1629 if (instr->ctx.data.force_8bit)
1630 marvell_nfc_force_byte_access(chip, true);
1631
1632 if (reading) {
1633 u8 *in = instr->ctx.data.buf.in + offset;
1634
1635 ret = marvell_nfc_xfer_data_in_pio(nfc, in, len);
1636 } else {
1637 const u8 *out = instr->ctx.data.buf.out + offset;
1638
1639 ret = marvell_nfc_xfer_data_out_pio(nfc, out, len);
1640 }
1641
1642 if (instr->ctx.data.force_8bit)
1643 marvell_nfc_force_byte_access(chip, false);
1644
1645 return ret;
1646}
1647
1648static int marvell_nfc_monolithic_access_exec(struct nand_chip *chip,
1649 const struct nand_subop *subop)
1650{
1651 struct marvell_nfc_op nfc_op;
1652 bool reading;
1653 int ret;
1654
1655 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1656 reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
1657
1658 ret = marvell_nfc_prepare_cmd(chip);
1659 if (ret)
1660 return ret;
1661
1662 marvell_nfc_send_cmd(chip, &nfc_op);
1663 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
1664 "RDDREQ/WRDREQ while draining raw data");
1665 if (ret)
1666 return ret;
1667
1668 cond_delay(nfc_op.cle_ale_delay_ns);
1669
1670 if (reading) {
1671 if (nfc_op.rdy_timeout_ms) {
1672 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1673 if (ret)
1674 return ret;
1675 }
1676
1677 cond_delay(nfc_op.rdy_delay_ns);
1678 }
1679
1680 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
1681 ret = marvell_nfc_wait_cmdd(chip);
1682 if (ret)
1683 return ret;
1684
1685 cond_delay(nfc_op.data_delay_ns);
1686
1687 if (!reading) {
1688 if (nfc_op.rdy_timeout_ms) {
1689 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1690 if (ret)
1691 return ret;
1692 }
1693
1694 cond_delay(nfc_op.rdy_delay_ns);
1695 }
1696
1697 /*
1698 * NDCR ND_RUN bit should be cleared automatically at the end of each
1699 * operation but experience shows that the behavior is buggy when it
1700 * comes to writes (with LEN_OVRD). Clear it by hand in this case.
1701 */
1702 if (!reading) {
1703 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1704
1705 writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
1706 nfc->regs + NDCR);
1707 }
1708
1709 return 0;
1710}
1711
1712static int marvell_nfc_naked_access_exec(struct nand_chip *chip,
1713 const struct nand_subop *subop)
1714{
1715 struct marvell_nfc_op nfc_op;
1716 int ret;
1717
1718 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1719
1720 /*
1721 * Naked access are different in that they need to be flagged as naked
1722 * by the controller. Reset the controller registers fields that inform
1723 * on the type and refill them according to the ongoing operation.
1724 */
1725 nfc_op.ndcb[0] &= ~(NDCB0_CMD_TYPE(TYPE_MASK) |
1726 NDCB0_CMD_XTYPE(XTYPE_MASK));
1727 switch (subop->instrs[0].type) {
1728 case NAND_OP_CMD_INSTR:
1729 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_CMD);
1730 break;
1731 case NAND_OP_ADDR_INSTR:
1732 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_ADDR);
1733 break;
1734 case NAND_OP_DATA_IN_INSTR:
1735 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ) |
1736 NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1737 break;
1738 case NAND_OP_DATA_OUT_INSTR:
1739 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE) |
1740 NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1741 break;
1742 default:
1743 /* This should never happen */
1744 break;
1745 }
1746
1747 ret = marvell_nfc_prepare_cmd(chip);
1748 if (ret)
1749 return ret;
1750
1751 marvell_nfc_send_cmd(chip, &nfc_op);
1752
1753 if (!nfc_op.data_instr) {
1754 ret = marvell_nfc_wait_cmdd(chip);
1755 cond_delay(nfc_op.cle_ale_delay_ns);
1756 return ret;
1757 }
1758
1759 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
1760 "RDDREQ/WRDREQ while draining raw data");
1761 if (ret)
1762 return ret;
1763
1764 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
1765 ret = marvell_nfc_wait_cmdd(chip);
1766 if (ret)
1767 return ret;
1768
1769 /*
1770 * NDCR ND_RUN bit should be cleared automatically at the end of each
1771 * operation but experience shows that the behavior is buggy when it
1772 * comes to writes (with LEN_OVRD). Clear it by hand in this case.
1773 */
1774 if (subop->instrs[0].type == NAND_OP_DATA_OUT_INSTR) {
1775 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1776
1777 writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
1778 nfc->regs + NDCR);
1779 }
1780
1781 return 0;
1782}
1783
1784static int marvell_nfc_naked_waitrdy_exec(struct nand_chip *chip,
1785 const struct nand_subop *subop)
1786{
1787 struct marvell_nfc_op nfc_op;
1788 int ret;
1789
1790 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1791
1792 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1793 cond_delay(nfc_op.rdy_delay_ns);
1794
1795 return ret;
1796}
1797
1798static int marvell_nfc_read_id_type_exec(struct nand_chip *chip,
1799 const struct nand_subop *subop)
1800{
1801 struct marvell_nfc_op nfc_op;
1802 int ret;
1803
1804 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1805 nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
1806 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ_ID);
1807
1808 ret = marvell_nfc_prepare_cmd(chip);
1809 if (ret)
1810 return ret;
1811
1812 marvell_nfc_send_cmd(chip, &nfc_op);
1813 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1814 "RDDREQ while reading ID");
1815 if (ret)
1816 return ret;
1817
1818 cond_delay(nfc_op.cle_ale_delay_ns);
1819
1820 if (nfc_op.rdy_timeout_ms) {
1821 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1822 if (ret)
1823 return ret;
1824 }
1825
1826 cond_delay(nfc_op.rdy_delay_ns);
1827
1828 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
1829 ret = marvell_nfc_wait_cmdd(chip);
1830 if (ret)
1831 return ret;
1832
1833 cond_delay(nfc_op.data_delay_ns);
1834
1835 return 0;
1836}
1837
1838static int marvell_nfc_read_status_exec(struct nand_chip *chip,
1839 const struct nand_subop *subop)
1840{
1841 struct marvell_nfc_op nfc_op;
1842 int ret;
1843
1844 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1845 nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
1846 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_STATUS);
1847
1848 ret = marvell_nfc_prepare_cmd(chip);
1849 if (ret)
1850 return ret;
1851
1852 marvell_nfc_send_cmd(chip, &nfc_op);
1853 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1854 "RDDREQ while reading status");
1855 if (ret)
1856 return ret;
1857
1858 cond_delay(nfc_op.cle_ale_delay_ns);
1859
1860 if (nfc_op.rdy_timeout_ms) {
1861 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1862 if (ret)
1863 return ret;
1864 }
1865
1866 cond_delay(nfc_op.rdy_delay_ns);
1867
1868 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
1869 ret = marvell_nfc_wait_cmdd(chip);
1870 if (ret)
1871 return ret;
1872
1873 cond_delay(nfc_op.data_delay_ns);
1874
1875 return 0;
1876}
1877
1878static int marvell_nfc_reset_cmd_type_exec(struct nand_chip *chip,
1879 const struct nand_subop *subop)
1880{
1881 struct marvell_nfc_op nfc_op;
1882 int ret;
1883
1884 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1885 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_RESET);
1886
1887 ret = marvell_nfc_prepare_cmd(chip);
1888 if (ret)
1889 return ret;
1890
1891 marvell_nfc_send_cmd(chip, &nfc_op);
1892 ret = marvell_nfc_wait_cmdd(chip);
1893 if (ret)
1894 return ret;
1895
1896 cond_delay(nfc_op.cle_ale_delay_ns);
1897
1898 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1899 if (ret)
1900 return ret;
1901
1902 cond_delay(nfc_op.rdy_delay_ns);
1903
1904 return 0;
1905}
1906
1907static int marvell_nfc_erase_cmd_type_exec(struct nand_chip *chip,
1908 const struct nand_subop *subop)
1909{
1910 struct marvell_nfc_op nfc_op;
1911 int ret;
1912
1913 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1914 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_ERASE);
1915
1916 ret = marvell_nfc_prepare_cmd(chip);
1917 if (ret)
1918 return ret;
1919
1920 marvell_nfc_send_cmd(chip, &nfc_op);
1921 ret = marvell_nfc_wait_cmdd(chip);
1922 if (ret)
1923 return ret;
1924
1925 cond_delay(nfc_op.cle_ale_delay_ns);
1926
1927 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1928 if (ret)
1929 return ret;
1930
1931 cond_delay(nfc_op.rdy_delay_ns);
1932
1933 return 0;
1934}
1935
1936static const struct nand_op_parser marvell_nfcv2_op_parser = NAND_OP_PARSER(
1937 /* Monolithic reads/writes */
1938 NAND_OP_PARSER_PATTERN(
1939 marvell_nfc_monolithic_access_exec,
1940 NAND_OP_PARSER_PAT_CMD_ELEM(false),
1941 NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC_NFCV2),
1942 NAND_OP_PARSER_PAT_CMD_ELEM(true),
1943 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
1944 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
1945 NAND_OP_PARSER_PATTERN(
1946 marvell_nfc_monolithic_access_exec,
1947 NAND_OP_PARSER_PAT_CMD_ELEM(false),
1948 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2),
1949 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE),
1950 NAND_OP_PARSER_PAT_CMD_ELEM(true),
1951 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
1952 /* Naked commands */
1953 NAND_OP_PARSER_PATTERN(
1954 marvell_nfc_naked_access_exec,
1955 NAND_OP_PARSER_PAT_CMD_ELEM(false)),
1956 NAND_OP_PARSER_PATTERN(
1957 marvell_nfc_naked_access_exec,
1958 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2)),
1959 NAND_OP_PARSER_PATTERN(
1960 marvell_nfc_naked_access_exec,
1961 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
1962 NAND_OP_PARSER_PATTERN(
1963 marvell_nfc_naked_access_exec,
1964 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE)),
1965 NAND_OP_PARSER_PATTERN(
1966 marvell_nfc_naked_waitrdy_exec,
1967 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
1968 );
1969
1970static const struct nand_op_parser marvell_nfcv1_op_parser = NAND_OP_PARSER(
1971 /* Naked commands not supported, use a function for each pattern */
1972 NAND_OP_PARSER_PATTERN(
1973 marvell_nfc_read_id_type_exec,
1974 NAND_OP_PARSER_PAT_CMD_ELEM(false),
1975 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
1976 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
1977 NAND_OP_PARSER_PATTERN(
1978 marvell_nfc_erase_cmd_type_exec,
1979 NAND_OP_PARSER_PAT_CMD_ELEM(false),
1980 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
1981 NAND_OP_PARSER_PAT_CMD_ELEM(false),
1982 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
1983 NAND_OP_PARSER_PATTERN(
1984 marvell_nfc_read_status_exec,
1985 NAND_OP_PARSER_PAT_CMD_ELEM(false),
1986 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
1987 NAND_OP_PARSER_PATTERN(
1988 marvell_nfc_reset_cmd_type_exec,
1989 NAND_OP_PARSER_PAT_CMD_ELEM(false),
1990 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
1991 NAND_OP_PARSER_PATTERN(
1992 marvell_nfc_naked_waitrdy_exec,
1993 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
1994 );
1995
1996static int marvell_nfc_exec_op(struct nand_chip *chip,
1997 const struct nand_operation *op,
1998 bool check_only)
1999{
2000 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2001
2002 if (nfc->caps->is_nfcv2)
2003 return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
2004 op, check_only);
2005 else
2006 return nand_op_parser_exec_op(chip, &marvell_nfcv1_op_parser,
2007 op, check_only);
2008}
2009
2010/*
2011 * Layouts were broken in old pxa3xx_nand driver, these are supposed to be
2012 * usable.
2013 */
2014static int marvell_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2015 struct mtd_oob_region *oobregion)
2016{
2017 struct nand_chip *chip = mtd_to_nand(mtd);
2018 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
2019
2020 if (section)
2021 return -ERANGE;
2022
2023 oobregion->length = (lt->full_chunk_cnt * lt->ecc_bytes) +
2024 lt->last_ecc_bytes;
2025 oobregion->offset = mtd->oobsize - oobregion->length;
2026
2027 return 0;
2028}
2029
2030static int marvell_nand_ooblayout_free(struct mtd_info *mtd, int section,
2031 struct mtd_oob_region *oobregion)
2032{
2033 struct nand_chip *chip = mtd_to_nand(mtd);
2034 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
2035
2036 if (section)
2037 return -ERANGE;
2038
2039 /*
2040 * Bootrom looks in bytes 0 & 5 for bad blocks for the
2041 * 4KB page / 4bit BCH combination.
2042 */
2043 if (mtd->writesize == SZ_4K && lt->data_bytes == SZ_2K)
2044 oobregion->offset = 6;
2045 else
2046 oobregion->offset = 2;
2047
2048 oobregion->length = (lt->full_chunk_cnt * lt->spare_bytes) +
2049 lt->last_spare_bytes - oobregion->offset;
2050
2051 return 0;
2052}
2053
2054static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = {
2055 .ecc = marvell_nand_ooblayout_ecc,
2056 .free = marvell_nand_ooblayout_free,
2057};
2058
2059static int marvell_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
2060 struct nand_ecc_ctrl *ecc)
2061{
2062 struct nand_chip *chip = mtd_to_nand(mtd);
2063 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2064 const struct marvell_hw_ecc_layout *l;
2065 int i;
2066
2067 if (!nfc->caps->is_nfcv2 &&
2068 (mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) {
2069 dev_err(nfc->dev,
2070 "NFCv1: writesize (%d) cannot be bigger than a chunk (%d)\n",
2071 mtd->writesize, MAX_CHUNK_SIZE - mtd->oobsize);
2072 return -ENOTSUPP;
2073 }
2074
2075 to_marvell_nand(chip)->layout = NULL;
2076 for (i = 0; i < ARRAY_SIZE(marvell_nfc_layouts); i++) {
2077 l = &marvell_nfc_layouts[i];
2078 if (mtd->writesize == l->writesize &&
2079 ecc->size == l->chunk && ecc->strength == l->strength) {
2080 to_marvell_nand(chip)->layout = l;
2081 break;
2082 }
2083 }
2084
2085 if (!to_marvell_nand(chip)->layout ||
2086 (!nfc->caps->is_nfcv2 && ecc->strength > 1)) {
2087 dev_err(nfc->dev,
2088 "ECC strength %d at page size %d is not supported\n",
2089 ecc->strength, mtd->writesize);
2090 return -ENOTSUPP;
2091 }
2092
2093 mtd_set_ooblayout(mtd, &marvell_nand_ooblayout_ops);
2094 ecc->steps = l->nchunks;
2095 ecc->size = l->data_bytes;
2096
2097 if (ecc->strength == 1) {
2098 chip->ecc.algo = NAND_ECC_HAMMING;
2099 ecc->read_page_raw = marvell_nfc_hw_ecc_hmg_read_page_raw;
2100 ecc->read_page = marvell_nfc_hw_ecc_hmg_read_page;
2101 ecc->read_oob_raw = marvell_nfc_hw_ecc_hmg_read_oob_raw;
2102 ecc->read_oob = ecc->read_oob_raw;
2103 ecc->write_page_raw = marvell_nfc_hw_ecc_hmg_write_page_raw;
2104 ecc->write_page = marvell_nfc_hw_ecc_hmg_write_page;
2105 ecc->write_oob_raw = marvell_nfc_hw_ecc_hmg_write_oob_raw;
2106 ecc->write_oob = ecc->write_oob_raw;
2107 } else {
2108 chip->ecc.algo = NAND_ECC_BCH;
2109 ecc->strength = 16;
2110 ecc->read_page_raw = marvell_nfc_hw_ecc_bch_read_page_raw;
2111 ecc->read_page = marvell_nfc_hw_ecc_bch_read_page;
2112 ecc->read_oob_raw = marvell_nfc_hw_ecc_bch_read_oob_raw;
2113 ecc->read_oob = marvell_nfc_hw_ecc_bch_read_oob;
2114 ecc->write_page_raw = marvell_nfc_hw_ecc_bch_write_page_raw;
2115 ecc->write_page = marvell_nfc_hw_ecc_bch_write_page;
2116 ecc->write_oob_raw = marvell_nfc_hw_ecc_bch_write_oob_raw;
2117 ecc->write_oob = marvell_nfc_hw_ecc_bch_write_oob;
2118 }
2119
2120 return 0;
2121}
2122
2123static int marvell_nand_ecc_init(struct mtd_info *mtd,
2124 struct nand_ecc_ctrl *ecc)
2125{
2126 struct nand_chip *chip = mtd_to_nand(mtd);
2127 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2128 int ret;
2129
2130 if (ecc->mode != NAND_ECC_NONE && (!ecc->size || !ecc->strength)) {
2131 if (chip->ecc_step_ds && chip->ecc_strength_ds) {
2132 ecc->size = chip->ecc_step_ds;
2133 ecc->strength = chip->ecc_strength_ds;
2134 } else {
2135 dev_info(nfc->dev,
2136 "No minimum ECC strength, using 1b/512B\n");
2137 ecc->size = 512;
2138 ecc->strength = 1;
2139 }
2140 }
2141
2142 switch (ecc->mode) {
2143 case NAND_ECC_HW:
2144 ret = marvell_nand_hw_ecc_ctrl_init(mtd, ecc);
2145 if (ret)
2146 return ret;
2147 break;
2148 case NAND_ECC_NONE:
2149 case NAND_ECC_SOFT:
2150 if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
2151 mtd->writesize != SZ_2K) {
2152 dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
2153 mtd->writesize);
2154 return -EINVAL;
2155 }
2156 break;
2157 default:
2158 return -EINVAL;
2159 }
2160
2161 return 0;
2162}
2163
2164static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
2165static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
2166
2167static struct nand_bbt_descr bbt_main_descr = {
2168 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
2169 NAND_BBT_2BIT | NAND_BBT_VERSION,
2170 .offs = 8,
2171 .len = 6,
2172 .veroffs = 14,
2173 .maxblocks = 8, /* Last 8 blocks in each chip */
2174 .pattern = bbt_pattern
2175};
2176
2177static struct nand_bbt_descr bbt_mirror_descr = {
2178 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
2179 NAND_BBT_2BIT | NAND_BBT_VERSION,
2180 .offs = 8,
2181 .len = 6,
2182 .veroffs = 14,
2183 .maxblocks = 8, /* Last 8 blocks in each chip */
2184 .pattern = bbt_mirror_pattern
2185};
2186
2187static int marvell_nfc_setup_data_interface(struct mtd_info *mtd, int chipnr,
2188 const struct nand_data_interface
2189 *conf)
2190{
2191 struct nand_chip *chip = mtd_to_nand(mtd);
2192 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
2193 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2194 unsigned int period_ns = 1000000000 / clk_get_rate(nfc->ecc_clk) * 2;
2195 const struct nand_sdr_timings *sdr;
2196 struct marvell_nfc_timings nfc_tmg;
2197 int read_delay;
2198
2199 sdr = nand_get_sdr_timings(conf);
2200 if (IS_ERR(sdr))
2201 return PTR_ERR(sdr);
2202
2203 /*
2204 * SDR timings are given in pico-seconds while NFC timings must be
2205 * expressed in NAND controller clock cycles, which is half of the
2206 * frequency of the accessible ECC clock retrieved by clk_get_rate().
2207 * This is not written anywhere in the datasheet but was observed
2208 * with an oscilloscope.
2209 *
2210 * NFC datasheet gives equations from which thoses calculations
2211 * are derived, they tend to be slightly more restrictives than the
2212 * given core timings and may improve the overall speed.
2213 */
2214 nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1;
2215 nfc_tmg.tRH = nfc_tmg.tRP;
2216 nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1;
2217 nfc_tmg.tWH = nfc_tmg.tWP;
2218 nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns);
2219 nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1;
2220 nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns);
2221 /*
2222 * Read delay is the time of propagation from SoC pins to NFC internal
2223 * logic. With non-EDO timings, this is MIN_RD_DEL_CNT clock cycles. In
2224 * EDO mode, an additional delay of tRH must be taken into account so
2225 * the data is sampled on the falling edge instead of the rising edge.
2226 */
2227 read_delay = sdr->tRC_min >= 30000 ?
2228 MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH;
2229
2230 nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns);
2231 /*
2232 * tWHR and tRHW are supposed to be read to write delays (and vice
2233 * versa) but in some cases, ie. when doing a change column, they must
2234 * be greater than that to be sure tCCS delay is respected.
2235 */
2236 nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min),
2237 period_ns) - 2,
2238 nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min),
2239 period_ns);
2240
Miquel Raynal07ad5a72018-01-17 00:19:34 +01002241 /*
2242 * NFCv2: Use WAIT_MODE (wait for RB line), do not rely only on delays.
2243 * NFCv1: No WAIT_MODE, tR must be maximal.
2244 */
2245 if (nfc->caps->is_nfcv2) {
2246 nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns);
2247 } else {
2248 nfc_tmg.tR = TO_CYCLES64(sdr->tWB_max + sdr->tR_max,
2249 period_ns);
2250 if (nfc_tmg.tR + 3 > nfc_tmg.tCH)
2251 nfc_tmg.tR = nfc_tmg.tCH - 3;
2252 else
2253 nfc_tmg.tR = 0;
2254 }
Miquel Raynal02f26ec2018-01-09 11:36:33 +01002255
2256 if (chipnr < 0)
2257 return 0;
2258
2259 marvell_nand->ndtr0 =
2260 NDTR0_TRP(nfc_tmg.tRP) |
2261 NDTR0_TRH(nfc_tmg.tRH) |
2262 NDTR0_ETRP(nfc_tmg.tRP) |
2263 NDTR0_TWP(nfc_tmg.tWP) |
2264 NDTR0_TWH(nfc_tmg.tWH) |
2265 NDTR0_TCS(nfc_tmg.tCS) |
Miquel Raynal07ad5a72018-01-17 00:19:34 +01002266 NDTR0_TCH(nfc_tmg.tCH);
Miquel Raynal02f26ec2018-01-09 11:36:33 +01002267
2268 marvell_nand->ndtr1 =
2269 NDTR1_TAR(nfc_tmg.tAR) |
2270 NDTR1_TWHR(nfc_tmg.tWHR) |
Miquel Raynal02f26ec2018-01-09 11:36:33 +01002271 NDTR1_TR(nfc_tmg.tR);
2272
Miquel Raynal07ad5a72018-01-17 00:19:34 +01002273 if (nfc->caps->is_nfcv2) {
2274 marvell_nand->ndtr0 |=
2275 NDTR0_RD_CNT_DEL(read_delay) |
2276 NDTR0_SELCNTR |
2277 NDTR0_TADL(nfc_tmg.tADL);
2278
2279 marvell_nand->ndtr1 |=
2280 NDTR1_TRHW(nfc_tmg.tRHW) |
2281 NDTR1_WAIT_MODE;
2282 }
2283
Miquel Raynal02f26ec2018-01-09 11:36:33 +01002284 return 0;
2285}
2286
2287static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
2288 struct device_node *np)
2289{
2290 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(dev);
2291 struct marvell_nand_chip *marvell_nand;
2292 struct mtd_info *mtd;
2293 struct nand_chip *chip;
2294 int nsels, ret, i;
2295 u32 cs, rb;
2296
2297 /*
2298 * The legacy "num-cs" property indicates the number of CS on the only
2299 * chip connected to the controller (legacy bindings does not support
2300 * more than one chip). CS are only incremented one by one while the RB
2301 * pin is always the #0.
2302 *
2303 * When not using legacy bindings, a couple of "reg" and "nand-rb"
2304 * properties must be filled. For each chip, expressed as a subnode,
2305 * "reg" points to the CS lines and "nand-rb" to the RB line.
2306 */
2307 if (pdata) {
2308 nsels = 1;
2309 } else if (nfc->caps->legacy_of_bindings &&
2310 !of_get_property(np, "num-cs", &nsels)) {
2311 dev_err(dev, "missing num-cs property\n");
2312 return -EINVAL;
2313 } else if (!of_get_property(np, "reg", &nsels)) {
2314 dev_err(dev, "missing reg property\n");
2315 return -EINVAL;
2316 }
2317
2318 if (!pdata)
2319 nsels /= sizeof(u32);
2320 if (!nsels) {
2321 dev_err(dev, "invalid reg property size\n");
2322 return -EINVAL;
2323 }
2324
2325 /* Alloc the nand chip structure */
2326 marvell_nand = devm_kzalloc(dev, sizeof(*marvell_nand) +
2327 (nsels *
2328 sizeof(struct marvell_nand_chip_sel)),
2329 GFP_KERNEL);
2330 if (!marvell_nand) {
2331 dev_err(dev, "could not allocate chip structure\n");
2332 return -ENOMEM;
2333 }
2334
2335 marvell_nand->nsels = nsels;
2336 marvell_nand->selected_die = -1;
2337
2338 for (i = 0; i < nsels; i++) {
2339 if (pdata || nfc->caps->legacy_of_bindings) {
2340 /*
2341 * Legacy bindings use the CS lines in natural
2342 * order (0, 1, ...)
2343 */
2344 cs = i;
2345 } else {
2346 /* Retrieve CS id */
2347 ret = of_property_read_u32_index(np, "reg", i, &cs);
2348 if (ret) {
2349 dev_err(dev, "could not retrieve reg property: %d\n",
2350 ret);
2351 return ret;
2352 }
2353 }
2354
2355 if (cs >= nfc->caps->max_cs_nb) {
2356 dev_err(dev, "invalid reg value: %u (max CS = %d)\n",
2357 cs, nfc->caps->max_cs_nb);
2358 return -EINVAL;
2359 }
2360
2361 if (test_and_set_bit(cs, &nfc->assigned_cs)) {
2362 dev_err(dev, "CS %d already assigned\n", cs);
2363 return -EINVAL;
2364 }
2365
2366 /*
2367 * The cs variable represents the chip select id, which must be
2368 * converted in bit fields for NDCB0 and NDCB2 to select the
2369 * right chip. Unfortunately, due to a lack of information on
2370 * the subject and incoherent documentation, the user should not
2371 * use CS1 and CS3 at all as asserting them is not supported in
2372 * a reliable way (due to multiplexing inside ADDR5 field).
2373 */
2374 marvell_nand->sels[i].cs = cs;
2375 switch (cs) {
2376 case 0:
2377 case 2:
2378 marvell_nand->sels[i].ndcb0_csel = 0;
2379 break;
2380 case 1:
2381 case 3:
2382 marvell_nand->sels[i].ndcb0_csel = NDCB0_CSEL;
2383 break;
2384 default:
2385 return -EINVAL;
2386 }
2387
2388 /* Retrieve RB id */
2389 if (pdata || nfc->caps->legacy_of_bindings) {
2390 /* Legacy bindings always use RB #0 */
2391 rb = 0;
2392 } else {
2393 ret = of_property_read_u32_index(np, "nand-rb", i,
2394 &rb);
2395 if (ret) {
2396 dev_err(dev,
2397 "could not retrieve RB property: %d\n",
2398 ret);
2399 return ret;
2400 }
2401 }
2402
2403 if (rb >= nfc->caps->max_rb_nb) {
2404 dev_err(dev, "invalid reg value: %u (max RB = %d)\n",
2405 rb, nfc->caps->max_rb_nb);
2406 return -EINVAL;
2407 }
2408
2409 marvell_nand->sels[i].rb = rb;
2410 }
2411
2412 chip = &marvell_nand->chip;
2413 chip->controller = &nfc->controller;
2414 nand_set_flash_node(chip, np);
2415
2416 chip->exec_op = marvell_nfc_exec_op;
2417 chip->select_chip = marvell_nfc_select_chip;
Miquel Raynal07ad5a72018-01-17 00:19:34 +01002418 if (!of_property_read_bool(np, "marvell,nand-keep-config"))
Miquel Raynal02f26ec2018-01-09 11:36:33 +01002419 chip->setup_data_interface = marvell_nfc_setup_data_interface;
2420
2421 mtd = nand_to_mtd(chip);
2422 mtd->dev.parent = dev;
2423
2424 /*
2425 * Default to HW ECC engine mode. If the nand-ecc-mode property is given
2426 * in the DT node, this entry will be overwritten in nand_scan_ident().
2427 */
2428 chip->ecc.mode = NAND_ECC_HW;
2429
2430 /*
2431 * Save a reference value for timing registers before
2432 * ->setup_data_interface() is called.
2433 */
2434 marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0);
2435 marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
2436
2437 chip->options |= NAND_BUSWIDTH_AUTO;
2438 ret = nand_scan_ident(mtd, marvell_nand->nsels, NULL);
2439 if (ret) {
2440 dev_err(dev, "could not identify the nand chip\n");
2441 return ret;
2442 }
2443
2444 if (pdata && pdata->flash_bbt)
2445 chip->bbt_options |= NAND_BBT_USE_FLASH;
2446
2447 if (chip->bbt_options & NAND_BBT_USE_FLASH) {
2448 /*
2449 * We'll use a bad block table stored in-flash and don't
2450 * allow writing the bad block marker to the flash.
2451 */
2452 chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
2453 chip->bbt_td = &bbt_main_descr;
2454 chip->bbt_md = &bbt_mirror_descr;
2455 }
2456
2457 /* Save the chip-specific fields of NDCR */
2458 marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
2459 if (chip->options & NAND_BUSWIDTH_16)
2460 marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
2461
2462 /*
2463 * On small page NANDs, only one cycle is needed to pass the
2464 * column address.
2465 */
2466 if (mtd->writesize <= 512) {
2467 marvell_nand->addr_cyc = 1;
2468 } else {
2469 marvell_nand->addr_cyc = 2;
2470 marvell_nand->ndcr |= NDCR_RA_START;
2471 }
2472
2473 /*
2474 * Now add the number of cycles needed to pass the row
2475 * address.
2476 *
2477 * Addressing a chip using CS 2 or 3 should also need the third row
2478 * cycle but due to inconsistance in the documentation and lack of
2479 * hardware to test this situation, this case is not supported.
2480 */
2481 if (chip->options & NAND_ROW_ADDR_3)
2482 marvell_nand->addr_cyc += 3;
2483 else
2484 marvell_nand->addr_cyc += 2;
2485
2486 if (pdata) {
2487 chip->ecc.size = pdata->ecc_step_size;
2488 chip->ecc.strength = pdata->ecc_strength;
2489 }
2490
2491 ret = marvell_nand_ecc_init(mtd, &chip->ecc);
2492 if (ret) {
2493 dev_err(dev, "ECC init failed: %d\n", ret);
2494 return ret;
2495 }
2496
2497 if (chip->ecc.mode == NAND_ECC_HW) {
2498 /*
2499 * Subpage write not available with hardware ECC, prohibit also
2500 * subpage read as in userspace subpage access would still be
2501 * allowed and subpage write, if used, would lead to numerous
2502 * uncorrectable ECC errors.
2503 */
2504 chip->options |= NAND_NO_SUBPAGE_WRITE;
2505 }
2506
2507 if (pdata || nfc->caps->legacy_of_bindings) {
2508 /*
2509 * We keep the MTD name unchanged to avoid breaking platforms
2510 * where the MTD cmdline parser is used and the bootloader
2511 * has not been updated to use the new naming scheme.
2512 */
2513 mtd->name = "pxa3xx_nand-0";
2514 } else if (!mtd->name) {
2515 /*
2516 * If the new bindings are used and the bootloader has not been
2517 * updated to pass a new mtdparts parameter on the cmdline, you
2518 * should define the following property in your NAND node, ie:
2519 *
2520 * label = "main-storage";
2521 *
2522 * This way, mtd->name will be set by the core when
2523 * nand_set_flash_node() is called.
2524 */
2525 mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
2526 "%s:nand.%d", dev_name(nfc->dev),
2527 marvell_nand->sels[0].cs);
2528 if (!mtd->name) {
2529 dev_err(nfc->dev, "Failed to allocate mtd->name\n");
2530 return -ENOMEM;
2531 }
2532 }
2533
2534 ret = nand_scan_tail(mtd);
2535 if (ret) {
2536 dev_err(dev, "nand_scan_tail failed: %d\n", ret);
2537 return ret;
2538 }
2539
2540 if (pdata)
2541 /* Legacy bindings support only one chip */
2542 ret = mtd_device_register(mtd, pdata->parts[0],
2543 pdata->nr_parts[0]);
2544 else
2545 ret = mtd_device_register(mtd, NULL, 0);
2546 if (ret) {
2547 dev_err(dev, "failed to register mtd device: %d\n", ret);
2548 nand_release(mtd);
2549 return ret;
2550 }
2551
2552 list_add_tail(&marvell_nand->node, &nfc->chips);
2553
2554 return 0;
2555}
2556
2557static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
2558{
2559 struct device_node *np = dev->of_node;
2560 struct device_node *nand_np;
2561 int max_cs = nfc->caps->max_cs_nb;
2562 int nchips;
2563 int ret;
2564
2565 if (!np)
2566 nchips = 1;
2567 else
2568 nchips = of_get_child_count(np);
2569
2570 if (nchips > max_cs) {
2571 dev_err(dev, "too many NAND chips: %d (max = %d CS)\n", nchips,
2572 max_cs);
2573 return -EINVAL;
2574 }
2575
2576 /*
2577 * Legacy bindings do not use child nodes to exhibit NAND chip
2578 * properties and layout. Instead, NAND properties are mixed with the
2579 * controller ones, and partitions are defined as direct subnodes of the
2580 * NAND controller node.
2581 */
2582 if (nfc->caps->legacy_of_bindings) {
2583 ret = marvell_nand_chip_init(dev, nfc, np);
2584 return ret;
2585 }
2586
2587 for_each_child_of_node(np, nand_np) {
2588 ret = marvell_nand_chip_init(dev, nfc, nand_np);
2589 if (ret) {
2590 of_node_put(nand_np);
2591 return ret;
2592 }
2593 }
2594
2595 return 0;
2596}
2597
2598static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
2599{
2600 struct marvell_nand_chip *entry, *temp;
2601
2602 list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
2603 nand_release(nand_to_mtd(&entry->chip));
2604 list_del(&entry->node);
2605 }
2606}
2607
2608static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
2609{
2610 struct platform_device *pdev = container_of(nfc->dev,
2611 struct platform_device,
2612 dev);
2613 struct dma_slave_config config = {};
2614 struct resource *r;
2615 dma_cap_mask_t mask;
2616 struct pxad_param param;
2617 int ret;
2618
2619 if (!IS_ENABLED(CONFIG_PXA_DMA)) {
2620 dev_warn(nfc->dev,
2621 "DMA not enabled in configuration\n");
2622 return -ENOTSUPP;
2623 }
2624
2625 ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(32));
2626 if (ret)
2627 return ret;
2628
2629 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
2630 if (!r) {
2631 dev_err(nfc->dev, "No resource defined for data DMA\n");
2632 return -ENXIO;
2633 }
2634
2635 param.drcmr = r->start;
2636 param.prio = PXAD_PRIO_LOWEST;
2637 dma_cap_zero(mask);
2638 dma_cap_set(DMA_SLAVE, mask);
2639 nfc->dma_chan =
2640 dma_request_slave_channel_compat(mask, pxad_filter_fn,
2641 &param, nfc->dev,
2642 "data");
2643 if (!nfc->dma_chan) {
2644 dev_err(nfc->dev,
2645 "Unable to request data DMA channel\n");
2646 return -ENODEV;
2647 }
2648
2649 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2650 if (!r)
2651 return -ENXIO;
2652
2653 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2654 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2655 config.src_addr = r->start + NDDB;
2656 config.dst_addr = r->start + NDDB;
2657 config.src_maxburst = 32;
2658 config.dst_maxburst = 32;
2659 ret = dmaengine_slave_config(nfc->dma_chan, &config);
2660 if (ret < 0) {
2661 dev_err(nfc->dev, "Failed to configure DMA channel\n");
2662 return ret;
2663 }
2664
2665 /*
2666 * DMA must act on length multiple of 32 and this length may be
2667 * bigger than the destination buffer. Use this buffer instead
2668 * for DMA transfers and then copy the desired amount of data to
2669 * the provided buffer.
2670 */
Miquel Raynalc495a922018-01-19 18:39:01 +01002671 nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
Miquel Raynal02f26ec2018-01-09 11:36:33 +01002672 if (!nfc->dma_buf)
2673 return -ENOMEM;
2674
2675 nfc->use_dma = true;
2676
2677 return 0;
2678}
2679
2680static int marvell_nfc_init(struct marvell_nfc *nfc)
2681{
2682 struct device_node *np = nfc->dev->of_node;
2683
2684 /*
2685 * Some SoCs like A7k/A8k need to enable manually the NAND
2686 * controller, gated clocks and reset bits to avoid being bootloader
2687 * dependent. This is done through the use of the System Functions
2688 * registers.
2689 */
2690 if (nfc->caps->need_system_controller) {
2691 struct regmap *sysctrl_base =
2692 syscon_regmap_lookup_by_phandle(np,
2693 "marvell,system-controller");
2694 u32 reg;
2695
2696 if (IS_ERR(sysctrl_base))
2697 return PTR_ERR(sysctrl_base);
2698
2699 reg = GENCONF_SOC_DEVICE_MUX_NFC_EN |
2700 GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST |
2701 GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST |
2702 GENCONF_SOC_DEVICE_MUX_NFC_INT_EN;
2703 regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
2704
2705 regmap_read(sysctrl_base, GENCONF_CLK_GATING_CTRL, &reg);
2706 reg |= GENCONF_CLK_GATING_CTRL_ND_GATE;
2707 regmap_write(sysctrl_base, GENCONF_CLK_GATING_CTRL, reg);
2708
2709 regmap_read(sysctrl_base, GENCONF_ND_CLK_CTRL, &reg);
2710 reg |= GENCONF_ND_CLK_CTRL_EN;
2711 regmap_write(sysctrl_base, GENCONF_ND_CLK_CTRL, reg);
2712 }
2713
2714 /* Configure the DMA if appropriate */
2715 if (!nfc->caps->is_nfcv2)
2716 marvell_nfc_init_dma(nfc);
2717
2718 /*
2719 * ECC operations and interruptions are only enabled when specifically
2720 * needed. ECC shall not be activated in the early stages (fails probe).
2721 * Arbiter flag, even if marked as "reserved", must be set (empirical).
2722 * SPARE_EN bit must always be set or ECC bytes will not be at the same
2723 * offset in the read page and this will fail the protection.
2724 */
2725 writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
2726 NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
2727 writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
2728 writel_relaxed(0, nfc->regs + NDECCCTRL);
2729
2730 return 0;
2731}
2732
2733static int marvell_nfc_probe(struct platform_device *pdev)
2734{
2735 struct device *dev = &pdev->dev;
2736 struct resource *r;
2737 struct marvell_nfc *nfc;
2738 int ret;
2739 int irq;
2740
2741 nfc = devm_kzalloc(&pdev->dev, sizeof(struct marvell_nfc),
2742 GFP_KERNEL);
2743 if (!nfc)
2744 return -ENOMEM;
2745
2746 nfc->dev = dev;
2747 nand_hw_control_init(&nfc->controller);
2748 INIT_LIST_HEAD(&nfc->chips);
2749
2750 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2751 nfc->regs = devm_ioremap_resource(dev, r);
2752 if (IS_ERR(nfc->regs))
2753 return PTR_ERR(nfc->regs);
2754
2755 irq = platform_get_irq(pdev, 0);
2756 if (irq < 0) {
2757 dev_err(dev, "failed to retrieve irq\n");
2758 return irq;
2759 }
2760
2761 nfc->ecc_clk = devm_clk_get(&pdev->dev, NULL);
2762 if (IS_ERR(nfc->ecc_clk))
2763 return PTR_ERR(nfc->ecc_clk);
2764
2765 ret = clk_prepare_enable(nfc->ecc_clk);
2766 if (ret)
2767 return ret;
2768
2769 marvell_nfc_disable_int(nfc, NDCR_ALL_INT);
2770 marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
2771 ret = devm_request_irq(dev, irq, marvell_nfc_isr,
2772 0, "marvell-nfc", nfc);
2773 if (ret)
2774 goto unprepare_clk;
2775
2776 /* Get NAND controller capabilities */
2777 if (pdev->id_entry)
2778 nfc->caps = (void *)pdev->id_entry->driver_data;
2779 else
2780 nfc->caps = of_device_get_match_data(&pdev->dev);
2781
2782 if (!nfc->caps) {
2783 dev_err(dev, "Could not retrieve NFC caps\n");
2784 ret = -EINVAL;
2785 goto unprepare_clk;
2786 }
2787
2788 /* Init the controller and then probe the chips */
2789 ret = marvell_nfc_init(nfc);
2790 if (ret)
2791 goto unprepare_clk;
2792
2793 platform_set_drvdata(pdev, nfc);
2794
2795 ret = marvell_nand_chips_init(dev, nfc);
2796 if (ret)
2797 goto unprepare_clk;
2798
2799 return 0;
2800
2801unprepare_clk:
2802 clk_disable_unprepare(nfc->ecc_clk);
2803
2804 return ret;
2805}
2806
2807static int marvell_nfc_remove(struct platform_device *pdev)
2808{
2809 struct marvell_nfc *nfc = platform_get_drvdata(pdev);
2810
2811 marvell_nand_chips_cleanup(nfc);
2812
2813 if (nfc->use_dma) {
2814 dmaengine_terminate_all(nfc->dma_chan);
2815 dma_release_channel(nfc->dma_chan);
2816 }
2817
2818 clk_disable_unprepare(nfc->ecc_clk);
2819
2820 return 0;
2821}
2822
2823static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
2824 .max_cs_nb = 4,
2825 .max_rb_nb = 2,
2826 .need_system_controller = true,
2827 .is_nfcv2 = true,
2828};
2829
2830static const struct marvell_nfc_caps marvell_armada370_nfc_caps = {
2831 .max_cs_nb = 4,
2832 .max_rb_nb = 2,
2833 .is_nfcv2 = true,
2834};
2835
2836static const struct marvell_nfc_caps marvell_pxa3xx_nfc_caps = {
2837 .max_cs_nb = 2,
2838 .max_rb_nb = 1,
2839 .use_dma = true,
2840};
2841
2842static const struct marvell_nfc_caps marvell_armada_8k_nfc_legacy_caps = {
2843 .max_cs_nb = 4,
2844 .max_rb_nb = 2,
2845 .need_system_controller = true,
2846 .legacy_of_bindings = true,
2847 .is_nfcv2 = true,
2848};
2849
2850static const struct marvell_nfc_caps marvell_armada370_nfc_legacy_caps = {
2851 .max_cs_nb = 4,
2852 .max_rb_nb = 2,
2853 .legacy_of_bindings = true,
2854 .is_nfcv2 = true,
2855};
2856
2857static const struct marvell_nfc_caps marvell_pxa3xx_nfc_legacy_caps = {
2858 .max_cs_nb = 2,
2859 .max_rb_nb = 1,
2860 .legacy_of_bindings = true,
2861 .use_dma = true,
2862};
2863
2864static const struct platform_device_id marvell_nfc_platform_ids[] = {
2865 {
2866 .name = "pxa3xx-nand",
2867 .driver_data = (kernel_ulong_t)&marvell_pxa3xx_nfc_legacy_caps,
2868 },
2869 { /* sentinel */ },
2870};
2871MODULE_DEVICE_TABLE(platform, marvell_nfc_platform_ids);
2872
2873static const struct of_device_id marvell_nfc_of_ids[] = {
2874 {
2875 .compatible = "marvell,armada-8k-nand-controller",
2876 .data = &marvell_armada_8k_nfc_caps,
2877 },
2878 {
2879 .compatible = "marvell,armada370-nand-controller",
2880 .data = &marvell_armada370_nfc_caps,
2881 },
2882 {
2883 .compatible = "marvell,pxa3xx-nand-controller",
2884 .data = &marvell_pxa3xx_nfc_caps,
2885 },
2886 /* Support for old/deprecated bindings: */
2887 {
2888 .compatible = "marvell,armada-8k-nand",
2889 .data = &marvell_armada_8k_nfc_legacy_caps,
2890 },
2891 {
2892 .compatible = "marvell,armada370-nand",
2893 .data = &marvell_armada370_nfc_legacy_caps,
2894 },
2895 {
2896 .compatible = "marvell,pxa3xx-nand",
2897 .data = &marvell_pxa3xx_nfc_legacy_caps,
2898 },
2899 { /* sentinel */ },
2900};
2901MODULE_DEVICE_TABLE(of, marvell_nfc_of_ids);
2902
2903static struct platform_driver marvell_nfc_driver = {
2904 .driver = {
2905 .name = "marvell-nfc",
2906 .of_match_table = marvell_nfc_of_ids,
2907 },
2908 .id_table = marvell_nfc_platform_ids,
2909 .probe = marvell_nfc_probe,
2910 .remove = marvell_nfc_remove,
2911};
2912module_platform_driver(marvell_nfc_driver);
2913
2914MODULE_LICENSE("GPL");
2915MODULE_DESCRIPTION("Marvell NAND controller driver");