blob: 6be555806ecaa87eac5d921bf1defa76bd607eca [file] [log] [blame]
Archit Tanejac76b78d2016-02-03 14:29:50 +05301/*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk.h>
15#include <linux/slab.h>
16#include <linux/bitops.h>
17#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h>
19#include <linux/module.h>
Boris Brezillond4092d72017-08-04 17:29:10 +020020#include <linux/mtd/rawnand.h>
Archit Tanejac76b78d2016-02-03 14:29:50 +053021#include <linux/mtd/partitions.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
Archit Tanejac76b78d2016-02-03 14:29:50 +053024#include <linux/delay.h>
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +053025#include <linux/dma/qcom_bam_dma.h>
Archit Tanejac76b78d2016-02-03 14:29:50 +053026
27/* NANDc reg offsets */
28#define NAND_FLASH_CMD 0x00
29#define NAND_ADDR0 0x04
30#define NAND_ADDR1 0x08
31#define NAND_FLASH_CHIP_SELECT 0x0c
32#define NAND_EXEC_CMD 0x10
33#define NAND_FLASH_STATUS 0x14
34#define NAND_BUFFER_STATUS 0x18
35#define NAND_DEV0_CFG0 0x20
36#define NAND_DEV0_CFG1 0x24
37#define NAND_DEV0_ECC_CFG 0x28
38#define NAND_DEV1_ECC_CFG 0x2c
39#define NAND_DEV1_CFG0 0x30
40#define NAND_DEV1_CFG1 0x34
41#define NAND_READ_ID 0x40
42#define NAND_READ_STATUS 0x44
43#define NAND_DEV_CMD0 0xa0
44#define NAND_DEV_CMD1 0xa4
45#define NAND_DEV_CMD2 0xa8
46#define NAND_DEV_CMD_VLD 0xac
47#define SFLASHC_BURST_CFG 0xe0
48#define NAND_ERASED_CW_DETECT_CFG 0xe8
49#define NAND_ERASED_CW_DETECT_STATUS 0xec
50#define NAND_EBI2_ECC_BUF_CFG 0xf0
51#define FLASH_BUF_ACC 0x100
52
53#define NAND_CTRL 0xf00
54#define NAND_VERSION 0xf08
55#define NAND_READ_LOCATION_0 0xf20
56#define NAND_READ_LOCATION_1 0xf24
Abhishek Sahu91af95c2017-08-17 17:37:43 +053057#define NAND_READ_LOCATION_2 0xf28
58#define NAND_READ_LOCATION_3 0xf2c
Archit Tanejac76b78d2016-02-03 14:29:50 +053059
60/* dummy register offsets, used by write_reg_dma */
61#define NAND_DEV_CMD1_RESTORE 0xdead
62#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
63
64/* NAND_FLASH_CMD bits */
65#define PAGE_ACC BIT(4)
66#define LAST_PAGE BIT(5)
67
68/* NAND_FLASH_CHIP_SELECT bits */
69#define NAND_DEV_SEL 0
70#define DM_EN BIT(2)
71
72/* NAND_FLASH_STATUS bits */
73#define FS_OP_ERR BIT(4)
74#define FS_READY_BSY_N BIT(5)
75#define FS_MPU_ERR BIT(8)
76#define FS_DEVICE_STS_ERR BIT(16)
77#define FS_DEVICE_WP BIT(23)
78
79/* NAND_BUFFER_STATUS bits */
80#define BS_UNCORRECTABLE_BIT BIT(8)
81#define BS_CORRECTABLE_ERR_MSK 0x1f
82
83/* NAND_DEVn_CFG0 bits */
84#define DISABLE_STATUS_AFTER_WRITE 4
85#define CW_PER_PAGE 6
86#define UD_SIZE_BYTES 9
87#define ECC_PARITY_SIZE_BYTES_RS 19
88#define SPARE_SIZE_BYTES 23
89#define NUM_ADDR_CYCLES 27
90#define STATUS_BFR_READ 30
91#define SET_RD_MODE_AFTER_STATUS 31
92
93/* NAND_DEVn_CFG0 bits */
94#define DEV0_CFG1_ECC_DISABLE 0
95#define WIDE_FLASH 1
96#define NAND_RECOVERY_CYCLES 2
97#define CS_ACTIVE_BSY 5
98#define BAD_BLOCK_BYTE_NUM 6
99#define BAD_BLOCK_IN_SPARE_AREA 16
100#define WR_RD_BSY_GAP 17
101#define ENABLE_BCH_ECC 27
102
103/* NAND_DEV0_ECC_CFG bits */
104#define ECC_CFG_ECC_DISABLE 0
105#define ECC_SW_RESET 1
106#define ECC_MODE 4
107#define ECC_PARITY_SIZE_BYTES_BCH 8
108#define ECC_NUM_DATA_BYTES 16
109#define ECC_FORCE_CLK_OPEN 30
110
111/* NAND_DEV_CMD1 bits */
112#define READ_ADDR 0
113
114/* NAND_DEV_CMD_VLD bits */
Abhishek Sahud8a9b322017-08-11 17:09:16 +0530115#define READ_START_VLD BIT(0)
116#define READ_STOP_VLD BIT(1)
117#define WRITE_START_VLD BIT(2)
118#define ERASE_START_VLD BIT(3)
119#define SEQ_READ_START_VLD BIT(4)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530120
121/* NAND_EBI2_ECC_BUF_CFG bits */
122#define NUM_STEPS 0
123
124/* NAND_ERASED_CW_DETECT_CFG bits */
125#define ERASED_CW_ECC_MASK 1
126#define AUTO_DETECT_RES 0
127#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
128#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
129#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
130#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
131#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
132
133/* NAND_ERASED_CW_DETECT_STATUS bits */
134#define PAGE_ALL_ERASED BIT(7)
135#define CODEWORD_ALL_ERASED BIT(6)
136#define PAGE_ERASED BIT(5)
137#define CODEWORD_ERASED BIT(4)
138#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
139#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
140
Abhishek Sahu91af95c2017-08-17 17:37:43 +0530141/* NAND_READ_LOCATION_n bits */
142#define READ_LOCATION_OFFSET 0
143#define READ_LOCATION_SIZE 16
144#define READ_LOCATION_LAST 31
145
Archit Tanejac76b78d2016-02-03 14:29:50 +0530146/* Version Mask */
147#define NAND_VERSION_MAJOR_MASK 0xf0000000
148#define NAND_VERSION_MAJOR_SHIFT 28
149#define NAND_VERSION_MINOR_MASK 0x0fff0000
150#define NAND_VERSION_MINOR_SHIFT 16
151
152/* NAND OP_CMDs */
153#define PAGE_READ 0x2
154#define PAGE_READ_WITH_ECC 0x3
155#define PAGE_READ_WITH_ECC_SPARE 0x4
156#define PROGRAM_PAGE 0x6
157#define PAGE_PROGRAM_WITH_ECC 0x7
158#define PROGRAM_PAGE_SPARE 0x9
159#define BLOCK_ERASE 0xa
160#define FETCH_ID 0xb
161#define RESET_DEVICE 0xd
162
Abhishek Sahud8a9b322017-08-11 17:09:16 +0530163/* Default Value for NAND_DEV_CMD_VLD */
164#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
165 ERASE_START_VLD | SEQ_READ_START_VLD)
166
Abhishek Sahu9d43f912017-08-17 17:37:45 +0530167/* NAND_CTRL bits */
168#define BAM_MODE_EN BIT(0)
169
Archit Tanejac76b78d2016-02-03 14:29:50 +0530170/*
171 * the NAND controller performs reads/writes with ECC in 516 byte chunks.
172 * the driver calls the chunks 'step' or 'codeword' interchangeably
173 */
174#define NANDC_STEP_SIZE 512
175
176/*
177 * the largest page size we support is 8K, this will have 16 steps/codewords
178 * of 512 bytes each
179 */
180#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
181
182/* we read at most 3 registers per codeword scan */
183#define MAX_REG_RD (3 * MAX_NUM_STEPS)
184
185/* ECC modes supported by the controller */
186#define ECC_NONE BIT(0)
187#define ECC_RS_4BIT BIT(1)
188#define ECC_BCH_4BIT BIT(2)
189#define ECC_BCH_8BIT BIT(3)
190
Abhishek Sahu91af95c2017-08-17 17:37:43 +0530191#define nandc_set_read_loc(nandc, reg, offset, size, is_last) \
192nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
193 ((offset) << READ_LOCATION_OFFSET) | \
194 ((size) << READ_LOCATION_SIZE) | \
195 ((is_last) << READ_LOCATION_LAST))
196
Abhishek Sahucc409b92017-08-17 17:37:47 +0530197/*
198 * Returns the actual register address for all NAND_DEV_ registers
199 * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
200 */
201#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
202
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530203/* Returns the NAND register physical address */
204#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
205
206/* Returns the dma address for reg read buffer */
207#define reg_buf_dma_addr(chip, vaddr) \
208 ((chip)->reg_read_dma + \
209 ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
210
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530211#define QPIC_PER_CW_CMD_ELEMENTS 32
Abhishek Sahucb80f112017-08-17 17:37:40 +0530212#define QPIC_PER_CW_CMD_SGL 32
213#define QPIC_PER_CW_DATA_SGL 8
214
215/*
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530216 * Flags used in DMA descriptor preparation helper functions
217 * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
218 */
219/* Don't set the EOT in current tx BAM sgl */
220#define NAND_BAM_NO_EOT BIT(0)
221/* Set the NWD flag in current BAM sgl */
222#define NAND_BAM_NWD BIT(1)
223/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
224#define NAND_BAM_NEXT_SGL BIT(2)
Abhishek Sahua86b9c42017-08-17 17:37:44 +0530225/*
226 * Erased codeword status is being used two times in single transfer so this
227 * flag will determine the current value of erased codeword status register
228 */
229#define NAND_ERASED_CW_SET BIT(4)
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530230
231/*
Abhishek Sahucb80f112017-08-17 17:37:40 +0530232 * This data type corresponds to the BAM transaction which will be used for all
233 * NAND transfers.
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530234 * @bam_ce - the array of BAM command elements
Abhishek Sahucb80f112017-08-17 17:37:40 +0530235 * @cmd_sgl - sgl for NAND BAM command pipe
236 * @data_sgl - sgl for NAND BAM consumer/producer pipe
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530237 * @bam_ce_pos - the index in bam_ce which is available for next sgl
238 * @bam_ce_start - the index in bam_ce which marks the start position ce
239 * for current sgl. It will be used for size calculation
240 * for current sgl
Abhishek Sahucb80f112017-08-17 17:37:40 +0530241 * @cmd_sgl_pos - current index in command sgl.
242 * @cmd_sgl_start - start index in command sgl.
243 * @tx_sgl_pos - current index in data sgl for tx.
244 * @tx_sgl_start - start index in data sgl for tx.
245 * @rx_sgl_pos - current index in data sgl for rx.
246 * @rx_sgl_start - start index in data sgl for rx.
247 */
248struct bam_transaction {
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530249 struct bam_cmd_element *bam_ce;
Abhishek Sahucb80f112017-08-17 17:37:40 +0530250 struct scatterlist *cmd_sgl;
251 struct scatterlist *data_sgl;
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530252 u32 bam_ce_pos;
253 u32 bam_ce_start;
Abhishek Sahucb80f112017-08-17 17:37:40 +0530254 u32 cmd_sgl_pos;
255 u32 cmd_sgl_start;
256 u32 tx_sgl_pos;
257 u32 tx_sgl_start;
258 u32 rx_sgl_pos;
259 u32 rx_sgl_start;
260};
261
Abhishek Sahu381dd242017-08-17 17:37:41 +0530262/*
263 * This data type corresponds to the nand dma descriptor
264 * @list - list for desc_info
265 * @dir - DMA transfer direction
266 * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
267 * ADM
268 * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
269 * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
270 * @dma_desc - low level DMA engine descriptor
271 */
Archit Tanejac76b78d2016-02-03 14:29:50 +0530272struct desc_info {
273 struct list_head node;
274
275 enum dma_data_direction dir;
Abhishek Sahu381dd242017-08-17 17:37:41 +0530276 union {
277 struct scatterlist adm_sgl;
278 struct {
279 struct scatterlist *bam_sgl;
280 int sgl_cnt;
281 };
282 };
Archit Tanejac76b78d2016-02-03 14:29:50 +0530283 struct dma_async_tx_descriptor *dma_desc;
284};
285
286/*
287 * holds the current register values that we want to write. acts as a contiguous
288 * chunk of memory which we use to write the controller registers through DMA.
289 */
290struct nandc_regs {
291 __le32 cmd;
292 __le32 addr0;
293 __le32 addr1;
294 __le32 chip_sel;
295 __le32 exec;
296
297 __le32 cfg0;
298 __le32 cfg1;
299 __le32 ecc_bch_cfg;
300
301 __le32 clrflashstatus;
302 __le32 clrreadstatus;
303
304 __le32 cmd1;
305 __le32 vld;
306
307 __le32 orig_cmd1;
308 __le32 orig_vld;
309
310 __le32 ecc_buf_cfg;
Abhishek Sahu91af95c2017-08-17 17:37:43 +0530311 __le32 read_location0;
312 __le32 read_location1;
313 __le32 read_location2;
314 __le32 read_location3;
315
Abhishek Sahua86b9c42017-08-17 17:37:44 +0530316 __le32 erased_cw_detect_cfg_clr;
317 __le32 erased_cw_detect_cfg_set;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530318};
319
320/*
321 * NAND controller data struct
322 *
323 * @controller: base controller structure
324 * @host_list: list containing all the chips attached to the
325 * controller
326 * @dev: parent device
327 * @base: MMIO base
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530328 * @base_phys: physical base address of controller registers
329 * @base_dma: dma base address of controller registers
Archit Tanejac76b78d2016-02-03 14:29:50 +0530330 * @core_clk: controller clock
331 * @aon_clk: another controller clock
332 *
333 * @chan: dma channel
334 * @cmd_crci: ADM DMA CRCI for command flow control
335 * @data_crci: ADM DMA CRCI for data flow control
336 * @desc_list: DMA descriptor list (list of desc_infos)
337 *
338 * @data_buffer: our local DMA buffer for page read/writes,
339 * used when we can't use the buffer provided
340 * by upper layers directly
341 * @buf_size/count/start: markers for chip->read_buf/write_buf functions
342 * @reg_read_buf: local buffer for reading back registers via DMA
Abhishek Sahu6192ff72017-08-17 17:37:39 +0530343 * @reg_read_dma: contains dma address for register read buffer
Archit Tanejac76b78d2016-02-03 14:29:50 +0530344 * @reg_read_pos: marker for data read in reg_read_buf
345 *
346 * @regs: a contiguous chunk of memory for DMA register
347 * writes. contains the register values to be
348 * written to controller
349 * @cmd1/vld: some fixed controller register values
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530350 * @props: properties of current NAND controller,
Archit Tanejac76b78d2016-02-03 14:29:50 +0530351 * initialized via DT match data
Abhishek Sahucb80f112017-08-17 17:37:40 +0530352 * @max_cwperpage: maximum QPIC codewords required. calculated
353 * from all connected NAND devices pagesize
Archit Tanejac76b78d2016-02-03 14:29:50 +0530354 */
355struct qcom_nand_controller {
356 struct nand_hw_control controller;
357 struct list_head host_list;
358
359 struct device *dev;
360
361 void __iomem *base;
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530362 phys_addr_t base_phys;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530363 dma_addr_t base_dma;
364
365 struct clk *core_clk;
366 struct clk *aon_clk;
367
Abhishek Sahu497d7d82017-08-11 17:09:19 +0530368 union {
369 /* will be used only by QPIC for BAM DMA */
370 struct {
371 struct dma_chan *tx_chan;
372 struct dma_chan *rx_chan;
373 struct dma_chan *cmd_chan;
374 };
375
376 /* will be used only by EBI2 for ADM DMA */
377 struct {
378 struct dma_chan *chan;
379 unsigned int cmd_crci;
380 unsigned int data_crci;
381 };
382 };
383
Archit Tanejac76b78d2016-02-03 14:29:50 +0530384 struct list_head desc_list;
Abhishek Sahucb80f112017-08-17 17:37:40 +0530385 struct bam_transaction *bam_txn;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530386
387 u8 *data_buffer;
388 int buf_size;
389 int buf_count;
390 int buf_start;
Abhishek Sahucb80f112017-08-17 17:37:40 +0530391 unsigned int max_cwperpage;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530392
393 __le32 *reg_read_buf;
Abhishek Sahu6192ff72017-08-17 17:37:39 +0530394 dma_addr_t reg_read_dma;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530395 int reg_read_pos;
396
397 struct nandc_regs *regs;
398
399 u32 cmd1, vld;
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530400 const struct qcom_nandc_props *props;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530401};
402
403/*
404 * NAND chip structure
405 *
406 * @chip: base NAND chip structure
407 * @node: list node to add itself to host_list in
408 * qcom_nand_controller
409 *
410 * @cs: chip select value for this chip
411 * @cw_size: the number of bytes in a single step/codeword
412 * of a page, consisting of all data, ecc, spare
413 * and reserved bytes
414 * @cw_data: the number of bytes within a codeword protected
415 * by ECC
416 * @use_ecc: request the controller to use ECC for the
417 * upcoming read/write
418 * @bch_enabled: flag to tell whether BCH ECC mode is used
419 * @ecc_bytes_hw: ECC bytes used by controller hardware for this
420 * chip
421 * @status: value to be returned if NAND_CMD_STATUS command
422 * is executed
423 * @last_command: keeps track of last command on this chip. used
424 * for reading correct status
425 *
426 * @cfg0, cfg1, cfg0_raw..: NANDc register configurations needed for
427 * ecc/non-ecc mode for the current nand flash
428 * device
429 */
430struct qcom_nand_host {
431 struct nand_chip chip;
432 struct list_head node;
433
434 int cs;
435 int cw_size;
436 int cw_data;
437 bool use_ecc;
438 bool bch_enabled;
439 int ecc_bytes_hw;
440 int spare_bytes;
441 int bbm_size;
442 u8 status;
443 int last_command;
444
445 u32 cfg0, cfg1;
446 u32 cfg0_raw, cfg1_raw;
447 u32 ecc_buf_cfg;
448 u32 ecc_bch_cfg;
449 u32 clrflashstatus;
450 u32 clrreadstatus;
451};
452
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530453/*
454 * This data type corresponds to the NAND controller properties which varies
455 * among different NAND controllers.
456 * @ecc_modes - ecc mode for NAND
Abhishek Sahu8c5d5d62017-08-11 17:09:18 +0530457 * @is_bam - whether NAND controller is using BAM
Abhishek Sahucc409b92017-08-17 17:37:47 +0530458 * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530459 */
460struct qcom_nandc_props {
461 u32 ecc_modes;
Abhishek Sahu8c5d5d62017-08-11 17:09:18 +0530462 bool is_bam;
Abhishek Sahucc409b92017-08-17 17:37:47 +0530463 u32 dev_cmd_reg_start;
Abhishek Sahu58f1f222017-08-11 17:09:17 +0530464};
465
Abhishek Sahucb80f112017-08-17 17:37:40 +0530466/* Frees the BAM transaction memory */
467static void free_bam_transaction(struct qcom_nand_controller *nandc)
468{
469 struct bam_transaction *bam_txn = nandc->bam_txn;
470
471 devm_kfree(nandc->dev, bam_txn);
472}
473
474/* Allocates and Initializes the BAM transaction */
475static struct bam_transaction *
476alloc_bam_transaction(struct qcom_nand_controller *nandc)
477{
478 struct bam_transaction *bam_txn;
479 size_t bam_txn_size;
480 unsigned int num_cw = nandc->max_cwperpage;
481 void *bam_txn_buf;
482
483 bam_txn_size =
484 sizeof(*bam_txn) + num_cw *
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530485 ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
486 (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
Abhishek Sahucb80f112017-08-17 17:37:40 +0530487 (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
488
489 bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
490 if (!bam_txn_buf)
491 return NULL;
492
493 bam_txn = bam_txn_buf;
494 bam_txn_buf += sizeof(*bam_txn);
495
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530496 bam_txn->bam_ce = bam_txn_buf;
497 bam_txn_buf +=
498 sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
499
Abhishek Sahucb80f112017-08-17 17:37:40 +0530500 bam_txn->cmd_sgl = bam_txn_buf;
501 bam_txn_buf +=
502 sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
503
504 bam_txn->data_sgl = bam_txn_buf;
505
506 return bam_txn;
507}
508
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +0530509/* Clears the BAM transaction indexes */
510static void clear_bam_transaction(struct qcom_nand_controller *nandc)
511{
512 struct bam_transaction *bam_txn = nandc->bam_txn;
513
514 if (!nandc->props->is_bam)
515 return;
516
Abhishek Sahu8c4cdce2017-09-25 13:21:25 +0530517 bam_txn->bam_ce_pos = 0;
518 bam_txn->bam_ce_start = 0;
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +0530519 bam_txn->cmd_sgl_pos = 0;
520 bam_txn->cmd_sgl_start = 0;
521 bam_txn->tx_sgl_pos = 0;
522 bam_txn->tx_sgl_start = 0;
523 bam_txn->rx_sgl_pos = 0;
524 bam_txn->rx_sgl_start = 0;
525
526 sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
527 QPIC_PER_CW_CMD_SGL);
528 sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
529 QPIC_PER_CW_DATA_SGL);
530}
531
Archit Tanejac76b78d2016-02-03 14:29:50 +0530532static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
533{
534 return container_of(chip, struct qcom_nand_host, chip);
535}
536
537static inline struct qcom_nand_controller *
538get_qcom_nand_controller(struct nand_chip *chip)
539{
540 return container_of(chip->controller, struct qcom_nand_controller,
541 controller);
542}
543
544static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
545{
546 return ioread32(nandc->base + offset);
547}
548
549static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
550 u32 val)
551{
552 iowrite32(val, nandc->base + offset);
553}
554
Abhishek Sahu6192ff72017-08-17 17:37:39 +0530555static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
556 bool is_cpu)
557{
558 if (!nandc->props->is_bam)
559 return;
560
561 if (is_cpu)
562 dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
563 MAX_REG_RD *
564 sizeof(*nandc->reg_read_buf),
565 DMA_FROM_DEVICE);
566 else
567 dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
568 MAX_REG_RD *
569 sizeof(*nandc->reg_read_buf),
570 DMA_FROM_DEVICE);
571}
572
Archit Tanejac76b78d2016-02-03 14:29:50 +0530573static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
574{
575 switch (offset) {
576 case NAND_FLASH_CMD:
577 return &regs->cmd;
578 case NAND_ADDR0:
579 return &regs->addr0;
580 case NAND_ADDR1:
581 return &regs->addr1;
582 case NAND_FLASH_CHIP_SELECT:
583 return &regs->chip_sel;
584 case NAND_EXEC_CMD:
585 return &regs->exec;
586 case NAND_FLASH_STATUS:
587 return &regs->clrflashstatus;
588 case NAND_DEV0_CFG0:
589 return &regs->cfg0;
590 case NAND_DEV0_CFG1:
591 return &regs->cfg1;
592 case NAND_DEV0_ECC_CFG:
593 return &regs->ecc_bch_cfg;
594 case NAND_READ_STATUS:
595 return &regs->clrreadstatus;
596 case NAND_DEV_CMD1:
597 return &regs->cmd1;
598 case NAND_DEV_CMD1_RESTORE:
599 return &regs->orig_cmd1;
600 case NAND_DEV_CMD_VLD:
601 return &regs->vld;
602 case NAND_DEV_CMD_VLD_RESTORE:
603 return &regs->orig_vld;
604 case NAND_EBI2_ECC_BUF_CFG:
605 return &regs->ecc_buf_cfg;
Abhishek Sahu91af95c2017-08-17 17:37:43 +0530606 case NAND_READ_LOCATION_0:
607 return &regs->read_location0;
608 case NAND_READ_LOCATION_1:
609 return &regs->read_location1;
610 case NAND_READ_LOCATION_2:
611 return &regs->read_location2;
612 case NAND_READ_LOCATION_3:
613 return &regs->read_location3;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530614 default:
615 return NULL;
616 }
617}
618
619static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
620 u32 val)
621{
622 struct nandc_regs *regs = nandc->regs;
623 __le32 *reg;
624
625 reg = offset_to_nandc_reg(regs, offset);
626
627 if (reg)
628 *reg = cpu_to_le32(val);
629}
630
631/* helper to configure address register values */
632static void set_address(struct qcom_nand_host *host, u16 column, int page)
633{
634 struct nand_chip *chip = &host->chip;
635 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
636
637 if (chip->options & NAND_BUSWIDTH_16)
638 column >>= 1;
639
640 nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
641 nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
642}
643
644/*
645 * update_rw_regs: set up read/write register values, these will be
646 * written to the NAND controller registers via DMA
647 *
648 * @num_cw: number of steps for the read/write operation
649 * @read: read or write operation
650 */
651static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
652{
653 struct nand_chip *chip = &host->chip;
654 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
655 u32 cmd, cfg0, cfg1, ecc_bch_cfg;
656
657 if (read) {
658 if (host->use_ecc)
659 cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
660 else
661 cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
662 } else {
663 cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
664 }
665
666 if (host->use_ecc) {
667 cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
668 (num_cw - 1) << CW_PER_PAGE;
669
670 cfg1 = host->cfg1;
671 ecc_bch_cfg = host->ecc_bch_cfg;
672 } else {
673 cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
674 (num_cw - 1) << CW_PER_PAGE;
675
676 cfg1 = host->cfg1_raw;
677 ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
678 }
679
680 nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
681 nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
682 nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
683 nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
684 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
685 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
686 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
687 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
Abhishek Sahu91af95c2017-08-17 17:37:43 +0530688
689 if (read)
690 nandc_set_read_loc(nandc, 0, 0, host->use_ecc ?
691 host->cw_data : host->cw_size, 1);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530692}
693
Abhishek Sahu381dd242017-08-17 17:37:41 +0530694/*
695 * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
696 * for BAM. This descriptor will be added in the NAND DMA descriptor queue
697 * which will be submitted to DMA engine.
698 */
699static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
700 struct dma_chan *chan,
701 unsigned long flags)
702{
703 struct desc_info *desc;
704 struct scatterlist *sgl;
705 unsigned int sgl_cnt;
706 int ret;
707 struct bam_transaction *bam_txn = nandc->bam_txn;
708 enum dma_transfer_direction dir_eng;
709 struct dma_async_tx_descriptor *dma_desc;
710
711 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
712 if (!desc)
713 return -ENOMEM;
714
715 if (chan == nandc->cmd_chan) {
716 sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
717 sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
718 bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
719 dir_eng = DMA_MEM_TO_DEV;
720 desc->dir = DMA_TO_DEVICE;
721 } else if (chan == nandc->tx_chan) {
722 sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
723 sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
724 bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
725 dir_eng = DMA_MEM_TO_DEV;
726 desc->dir = DMA_TO_DEVICE;
727 } else {
728 sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
729 sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
730 bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
731 dir_eng = DMA_DEV_TO_MEM;
732 desc->dir = DMA_FROM_DEVICE;
733 }
734
735 sg_mark_end(sgl + sgl_cnt - 1);
736 ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
737 if (ret == 0) {
738 dev_err(nandc->dev, "failure in mapping desc\n");
739 kfree(desc);
740 return -ENOMEM;
741 }
742
743 desc->sgl_cnt = sgl_cnt;
744 desc->bam_sgl = sgl;
745
746 dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
747 flags);
748
749 if (!dma_desc) {
750 dev_err(nandc->dev, "failure in prep desc\n");
751 dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
752 kfree(desc);
753 return -EINVAL;
754 }
755
756 desc->dma_desc = dma_desc;
757
758 list_add_tail(&desc->node, &nandc->desc_list);
759
760 return 0;
761}
762
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +0530763/*
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530764 * Prepares the command descriptor for BAM DMA which will be used for NAND
765 * register reads and writes. The command descriptor requires the command
766 * to be formed in command element type so this function uses the command
767 * element from bam transaction ce array and fills the same with required
768 * data. A single SGL can contain multiple command elements so
769 * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
770 * after the current command element.
771 */
772static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
773 int reg_off, const void *vaddr,
774 int size, unsigned int flags)
775{
776 int bam_ce_size;
777 int i, ret;
778 struct bam_cmd_element *bam_ce_buffer;
779 struct bam_transaction *bam_txn = nandc->bam_txn;
780
781 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
782
783 /* fill the command desc */
784 for (i = 0; i < size; i++) {
785 if (read)
786 bam_prep_ce(&bam_ce_buffer[i],
787 nandc_reg_phys(nandc, reg_off + 4 * i),
788 BAM_READ_COMMAND,
789 reg_buf_dma_addr(nandc,
790 (__le32 *)vaddr + i));
791 else
792 bam_prep_ce_le32(&bam_ce_buffer[i],
793 nandc_reg_phys(nandc, reg_off + 4 * i),
794 BAM_WRITE_COMMAND,
795 *((__le32 *)vaddr + i));
796 }
797
798 bam_txn->bam_ce_pos += size;
799
800 /* use the separate sgl after this command */
801 if (flags & NAND_BAM_NEXT_SGL) {
802 bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
803 bam_ce_size = (bam_txn->bam_ce_pos -
804 bam_txn->bam_ce_start) *
805 sizeof(struct bam_cmd_element);
806 sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
807 bam_ce_buffer, bam_ce_size);
808 bam_txn->cmd_sgl_pos++;
809 bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
810
811 if (flags & NAND_BAM_NWD) {
812 ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
813 DMA_PREP_FENCE |
814 DMA_PREP_CMD);
815 if (ret)
816 return ret;
817 }
818 }
819
820 return 0;
821}
822
823/*
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +0530824 * Prepares the data descriptor for BAM DMA which will be used for NAND
825 * data reads and writes.
826 */
827static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
828 const void *vaddr,
829 int size, unsigned int flags)
830{
831 int ret;
832 struct bam_transaction *bam_txn = nandc->bam_txn;
833
834 if (read) {
835 sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
836 vaddr, size);
837 bam_txn->rx_sgl_pos++;
838 } else {
839 sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
840 vaddr, size);
841 bam_txn->tx_sgl_pos++;
842
843 /*
844 * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
845 * is not set, form the DMA descriptor
846 */
847 if (!(flags & NAND_BAM_NO_EOT)) {
848 ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
849 DMA_PREP_INTERRUPT);
850 if (ret)
851 return ret;
852 }
853 }
854
855 return 0;
856}
857
Abhishek Sahu381dd242017-08-17 17:37:41 +0530858static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
859 int reg_off, const void *vaddr, int size,
860 bool flow_control)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530861{
862 struct desc_info *desc;
863 struct dma_async_tx_descriptor *dma_desc;
864 struct scatterlist *sgl;
865 struct dma_slave_config slave_conf;
866 enum dma_transfer_direction dir_eng;
867 int ret;
868
869 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
870 if (!desc)
871 return -ENOMEM;
872
Abhishek Sahu381dd242017-08-17 17:37:41 +0530873 sgl = &desc->adm_sgl;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530874
875 sg_init_one(sgl, vaddr, size);
876
877 if (read) {
878 dir_eng = DMA_DEV_TO_MEM;
879 desc->dir = DMA_FROM_DEVICE;
880 } else {
881 dir_eng = DMA_MEM_TO_DEV;
882 desc->dir = DMA_TO_DEVICE;
883 }
884
885 ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
886 if (ret == 0) {
887 ret = -ENOMEM;
888 goto err;
889 }
890
891 memset(&slave_conf, 0x00, sizeof(slave_conf));
892
893 slave_conf.device_fc = flow_control;
894 if (read) {
895 slave_conf.src_maxburst = 16;
896 slave_conf.src_addr = nandc->base_dma + reg_off;
897 slave_conf.slave_id = nandc->data_crci;
898 } else {
899 slave_conf.dst_maxburst = 16;
900 slave_conf.dst_addr = nandc->base_dma + reg_off;
901 slave_conf.slave_id = nandc->cmd_crci;
902 }
903
904 ret = dmaengine_slave_config(nandc->chan, &slave_conf);
905 if (ret) {
906 dev_err(nandc->dev, "failed to configure dma channel\n");
907 goto err;
908 }
909
910 dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
911 if (!dma_desc) {
912 dev_err(nandc->dev, "failed to prepare desc\n");
913 ret = -EINVAL;
914 goto err;
915 }
916
917 desc->dma_desc = dma_desc;
918
919 list_add_tail(&desc->node, &nandc->desc_list);
920
921 return 0;
922err:
923 kfree(desc);
924
925 return ret;
926}
927
928/*
929 * read_reg_dma: prepares a descriptor to read a given number of
930 * contiguous registers to the reg_read_buf pointer
931 *
932 * @first: offset of the first register in the contiguous block
933 * @num_regs: number of registers to read
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530934 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +0530935 */
936static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530937 int num_regs, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530938{
939 bool flow_control = false;
940 void *vaddr;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530941
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530942 vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
943 nandc->reg_read_pos += num_regs;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530944
Abhishek Sahucc409b92017-08-17 17:37:47 +0530945 if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
946 first = dev_cmd_reg_addr(nandc, first);
947
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530948 if (nandc->props->is_bam)
949 return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
950 num_regs, flags);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530951
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530952 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
953 flow_control = true;
954
955 return prep_adm_dma_desc(nandc, true, first, vaddr,
956 num_regs * sizeof(u32), flow_control);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530957}
958
959/*
960 * write_reg_dma: prepares a descriptor to write a given number of
961 * contiguous registers
962 *
963 * @first: offset of the first register in the contiguous block
964 * @num_regs: number of registers to write
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530965 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +0530966 */
967static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530968 int num_regs, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +0530969{
970 bool flow_control = false;
971 struct nandc_regs *regs = nandc->regs;
972 void *vaddr;
Archit Tanejac76b78d2016-02-03 14:29:50 +0530973
974 vaddr = offset_to_nandc_reg(regs, first);
975
Abhishek Sahua86b9c42017-08-17 17:37:44 +0530976 if (first == NAND_ERASED_CW_DETECT_CFG) {
977 if (flags & NAND_ERASED_CW_SET)
978 vaddr = &regs->erased_cw_detect_cfg_set;
979 else
980 vaddr = &regs->erased_cw_detect_cfg_clr;
981 }
982
Abhishek Sahu67e830a2017-08-17 17:37:42 +0530983 if (first == NAND_EXEC_CMD)
984 flags |= NAND_BAM_NWD;
985
Abhishek Sahucc409b92017-08-17 17:37:47 +0530986 if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
987 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530988
Abhishek Sahucc409b92017-08-17 17:37:47 +0530989 if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
990 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530991
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530992 if (nandc->props->is_bam)
993 return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
994 num_regs, flags);
Archit Tanejac76b78d2016-02-03 14:29:50 +0530995
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +0530996 if (first == NAND_FLASH_CMD)
997 flow_control = true;
998
999 return prep_adm_dma_desc(nandc, false, first, vaddr,
1000 num_regs * sizeof(u32), flow_control);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301001}
1002
1003/*
1004 * read_data_dma: prepares a DMA descriptor to transfer data from the
1005 * controller's internal buffer to the buffer 'vaddr'
1006 *
1007 * @reg_off: offset within the controller's data buffer
1008 * @vaddr: virtual address of the buffer we want to write to
1009 * @size: DMA transaction size in bytes
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301010 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +05301011 */
1012static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301013 const u8 *vaddr, int size, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301014{
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301015 if (nandc->props->is_bam)
1016 return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
1017
Abhishek Sahu381dd242017-08-17 17:37:41 +05301018 return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301019}
1020
1021/*
1022 * write_data_dma: prepares a DMA descriptor to transfer data from
1023 * 'vaddr' to the controller's internal buffer
1024 *
1025 * @reg_off: offset within the controller's data buffer
1026 * @vaddr: virtual address of the buffer we want to read from
1027 * @size: DMA transaction size in bytes
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301028 * @flags: flags to control DMA descriptor preparation
Archit Tanejac76b78d2016-02-03 14:29:50 +05301029 */
1030static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301031 const u8 *vaddr, int size, unsigned int flags)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301032{
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301033 if (nandc->props->is_bam)
1034 return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
1035
Abhishek Sahu381dd242017-08-17 17:37:41 +05301036 return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301037}
1038
1039/*
Abhishek Sahubde43302017-07-19 17:17:55 +05301040 * Helper to prepare DMA descriptors for configuring registers
1041 * before reading a NAND page.
Archit Tanejac76b78d2016-02-03 14:29:50 +05301042 */
Abhishek Sahubde43302017-07-19 17:17:55 +05301043static void config_nand_page_read(struct qcom_nand_controller *nandc)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301044{
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301045 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1046 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1047 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
Abhishek Sahua86b9c42017-08-17 17:37:44 +05301048 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
1049 write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
1050 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
Abhishek Sahubde43302017-07-19 17:17:55 +05301051}
Archit Tanejac76b78d2016-02-03 14:29:50 +05301052
Abhishek Sahubde43302017-07-19 17:17:55 +05301053/*
1054 * Helper to prepare DMA descriptors for configuring registers
1055 * before reading each codeword in NAND page.
1056 */
1057static void config_nand_cw_read(struct qcom_nand_controller *nandc)
1058{
Abhishek Sahu91af95c2017-08-17 17:37:43 +05301059 if (nandc->props->is_bam)
1060 write_reg_dma(nandc, NAND_READ_LOCATION_0, 4,
1061 NAND_BAM_NEXT_SGL);
1062
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301063 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1064 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301065
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301066 read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
1067 read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
1068 NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301069}
1070
1071/*
Abhishek Sahubde43302017-07-19 17:17:55 +05301072 * Helper to prepare dma descriptors to configure registers needed for reading a
1073 * single codeword in page
Archit Tanejac76b78d2016-02-03 14:29:50 +05301074 */
Abhishek Sahubde43302017-07-19 17:17:55 +05301075static void config_nand_single_cw_page_read(struct qcom_nand_controller *nandc)
1076{
1077 config_nand_page_read(nandc);
1078 config_nand_cw_read(nandc);
1079}
1080
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301081/*
1082 * Helper to prepare DMA descriptors used to configure registers needed for
1083 * before writing a NAND page.
1084 */
1085static void config_nand_page_write(struct qcom_nand_controller *nandc)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301086{
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301087 write_reg_dma(nandc, NAND_ADDR0, 2, 0);
1088 write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
1089 write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
1090 NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301091}
1092
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301093/*
1094 * Helper to prepare DMA descriptors for configuring registers
1095 * before writing each codeword in NAND page.
1096 */
1097static void config_nand_cw_write(struct qcom_nand_controller *nandc)
Archit Tanejac76b78d2016-02-03 14:29:50 +05301098{
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301099 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1100 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301101
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301102 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301103
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301104 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1105 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301106}
1107
1108/*
1109 * the following functions are used within chip->cmdfunc() to perform different
1110 * NAND_CMD_* commands
1111 */
1112
1113/* sets up descriptors for NAND_CMD_PARAM */
1114static int nandc_param(struct qcom_nand_host *host)
1115{
1116 struct nand_chip *chip = &host->chip;
1117 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1118
1119 /*
1120 * NAND_CMD_PARAM is called before we know much about the FLASH chip
1121 * in use. we configure the controller to perform a raw read of 512
1122 * bytes to read onfi params
1123 */
1124 nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
1125 nandc_set_reg(nandc, NAND_ADDR0, 0);
1126 nandc_set_reg(nandc, NAND_ADDR1, 0);
1127 nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
1128 | 512 << UD_SIZE_BYTES
1129 | 5 << NUM_ADDR_CYCLES
1130 | 0 << SPARE_SIZE_BYTES);
1131 nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
1132 | 0 << CS_ACTIVE_BSY
1133 | 17 << BAD_BLOCK_BYTE_NUM
1134 | 1 << BAD_BLOCK_IN_SPARE_AREA
1135 | 2 << WR_RD_BSY_GAP
1136 | 0 << WIDE_FLASH
1137 | 1 << DEV0_CFG1_ECC_DISABLE);
1138 nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
1139
1140 /* configure CMD1 and VLD for ONFI param probing */
1141 nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
Abhishek Sahud8a9b322017-08-11 17:09:16 +05301142 (nandc->vld & ~READ_START_VLD));
Archit Tanejac76b78d2016-02-03 14:29:50 +05301143 nandc_set_reg(nandc, NAND_DEV_CMD1,
1144 (nandc->cmd1 & ~(0xFF << READ_ADDR))
1145 | NAND_CMD_PARAM << READ_ADDR);
1146
1147 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1148
1149 nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
1150 nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
Abhishek Sahu91af95c2017-08-17 17:37:43 +05301151 nandc_set_read_loc(nandc, 0, 0, 512, 1);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301152
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301153 write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
1154 write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301155
1156 nandc->buf_count = 512;
1157 memset(nandc->data_buffer, 0xff, nandc->buf_count);
1158
Abhishek Sahubde43302017-07-19 17:17:55 +05301159 config_nand_single_cw_page_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301160
1161 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301162 nandc->buf_count, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301163
1164 /* restore CMD1 and VLD regs */
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301165 write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
1166 write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301167
1168 return 0;
1169}
1170
1171/* sets up descriptors for NAND_CMD_ERASE1 */
1172static int erase_block(struct qcom_nand_host *host, int page_addr)
1173{
1174 struct nand_chip *chip = &host->chip;
1175 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1176
1177 nandc_set_reg(nandc, NAND_FLASH_CMD,
1178 BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
1179 nandc_set_reg(nandc, NAND_ADDR0, page_addr);
1180 nandc_set_reg(nandc, NAND_ADDR1, 0);
1181 nandc_set_reg(nandc, NAND_DEV0_CFG0,
1182 host->cfg0_raw & ~(7 << CW_PER_PAGE));
1183 nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
1184 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1185 nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
1186 nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
1187
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301188 write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1189 write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
1190 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301191
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301192 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301193
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301194 write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
1195 write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301196
1197 return 0;
1198}
1199
1200/* sets up descriptors for NAND_CMD_READID */
1201static int read_id(struct qcom_nand_host *host, int column)
1202{
1203 struct nand_chip *chip = &host->chip;
1204 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1205
1206 if (column == -1)
1207 return 0;
1208
1209 nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
1210 nandc_set_reg(nandc, NAND_ADDR0, column);
1211 nandc_set_reg(nandc, NAND_ADDR1, 0);
Abhishek Sahu9d43f912017-08-17 17:37:45 +05301212 nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
1213 nandc->props->is_bam ? 0 : DM_EN);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301214 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1215
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301216 write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
1217 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301218
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301219 read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301220
1221 return 0;
1222}
1223
1224/* sets up descriptors for NAND_CMD_RESET */
1225static int reset(struct qcom_nand_host *host)
1226{
1227 struct nand_chip *chip = &host->chip;
1228 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1229
1230 nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
1231 nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
1232
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301233 write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1234 write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301235
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301236 read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301237
1238 return 0;
1239}
1240
1241/* helpers to submit/free our list of dma descriptors */
1242static int submit_descs(struct qcom_nand_controller *nandc)
1243{
1244 struct desc_info *desc;
1245 dma_cookie_t cookie = 0;
Abhishek Sahu381dd242017-08-17 17:37:41 +05301246 struct bam_transaction *bam_txn = nandc->bam_txn;
1247 int r;
1248
1249 if (nandc->props->is_bam) {
1250 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
1251 r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
1252 if (r)
1253 return r;
1254 }
1255
1256 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
1257 r = prepare_bam_async_desc(nandc, nandc->tx_chan,
1258 DMA_PREP_INTERRUPT);
1259 if (r)
1260 return r;
1261 }
1262
1263 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +05301264 r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
1265 DMA_PREP_CMD);
Abhishek Sahu381dd242017-08-17 17:37:41 +05301266 if (r)
1267 return r;
1268 }
1269 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05301270
1271 list_for_each_entry(desc, &nandc->desc_list, node)
1272 cookie = dmaengine_submit(desc->dma_desc);
1273
Abhishek Sahu381dd242017-08-17 17:37:41 +05301274 if (nandc->props->is_bam) {
1275 dma_async_issue_pending(nandc->tx_chan);
1276 dma_async_issue_pending(nandc->rx_chan);
1277
1278 if (dma_sync_wait(nandc->cmd_chan, cookie) != DMA_COMPLETE)
1279 return -ETIMEDOUT;
1280 } else {
1281 if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
1282 return -ETIMEDOUT;
1283 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05301284
1285 return 0;
1286}
1287
1288static void free_descs(struct qcom_nand_controller *nandc)
1289{
1290 struct desc_info *desc, *n;
1291
1292 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
1293 list_del(&desc->node);
Abhishek Sahu381dd242017-08-17 17:37:41 +05301294
1295 if (nandc->props->is_bam)
1296 dma_unmap_sg(nandc->dev, desc->bam_sgl,
1297 desc->sgl_cnt, desc->dir);
1298 else
1299 dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
1300 desc->dir);
1301
Archit Tanejac76b78d2016-02-03 14:29:50 +05301302 kfree(desc);
1303 }
1304}
1305
1306/* reset the register read buffer for next NAND operation */
1307static void clear_read_regs(struct qcom_nand_controller *nandc)
1308{
1309 nandc->reg_read_pos = 0;
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301310 nandc_read_buffer_sync(nandc, false);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301311}
1312
1313static void pre_command(struct qcom_nand_host *host, int command)
1314{
1315 struct nand_chip *chip = &host->chip;
1316 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1317
1318 nandc->buf_count = 0;
1319 nandc->buf_start = 0;
1320 host->use_ecc = false;
1321 host->last_command = command;
1322
1323 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301324
1325 if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
1326 command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
1327 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301328}
1329
1330/*
1331 * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
1332 * privately maintained status byte, this status byte can be read after
1333 * NAND_CMD_STATUS is called
1334 */
1335static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
1336{
1337 struct nand_chip *chip = &host->chip;
1338 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1339 struct nand_ecc_ctrl *ecc = &chip->ecc;
1340 int num_cw;
1341 int i;
1342
1343 num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301344 nandc_read_buffer_sync(nandc, true);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301345
1346 for (i = 0; i < num_cw; i++) {
1347 u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
1348
1349 if (flash_status & FS_MPU_ERR)
1350 host->status &= ~NAND_STATUS_WP;
1351
1352 if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
1353 (flash_status &
1354 FS_DEVICE_STS_ERR)))
1355 host->status |= NAND_STATUS_FAIL;
1356 }
1357}
1358
1359static void post_command(struct qcom_nand_host *host, int command)
1360{
1361 struct nand_chip *chip = &host->chip;
1362 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1363
1364 switch (command) {
1365 case NAND_CMD_READID:
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301366 nandc_read_buffer_sync(nandc, true);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301367 memcpy(nandc->data_buffer, nandc->reg_read_buf,
1368 nandc->buf_count);
1369 break;
1370 case NAND_CMD_PAGEPROG:
1371 case NAND_CMD_ERASE1:
1372 parse_erase_write_errors(host, command);
1373 break;
1374 default:
1375 break;
1376 }
1377}
1378
1379/*
1380 * Implements chip->cmdfunc. It's only used for a limited set of commands.
1381 * The rest of the commands wouldn't be called by upper layers. For example,
1382 * NAND_CMD_READOOB would never be called because we have our own versions
1383 * of read_oob ops for nand_ecc_ctrl.
1384 */
1385static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
1386 int column, int page_addr)
1387{
1388 struct nand_chip *chip = mtd_to_nand(mtd);
1389 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1390 struct nand_ecc_ctrl *ecc = &chip->ecc;
1391 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1392 bool wait = false;
1393 int ret = 0;
1394
1395 pre_command(host, command);
1396
1397 switch (command) {
1398 case NAND_CMD_RESET:
1399 ret = reset(host);
1400 wait = true;
1401 break;
1402
1403 case NAND_CMD_READID:
1404 nandc->buf_count = 4;
1405 ret = read_id(host, column);
1406 wait = true;
1407 break;
1408
1409 case NAND_CMD_PARAM:
1410 ret = nandc_param(host);
1411 wait = true;
1412 break;
1413
1414 case NAND_CMD_ERASE1:
1415 ret = erase_block(host, page_addr);
1416 wait = true;
1417 break;
1418
1419 case NAND_CMD_READ0:
1420 /* we read the entire page for now */
1421 WARN_ON(column != 0);
1422
1423 host->use_ecc = true;
1424 set_address(host, 0, page_addr);
1425 update_rw_regs(host, ecc->steps, true);
1426 break;
1427
1428 case NAND_CMD_SEQIN:
1429 WARN_ON(column != 0);
1430 set_address(host, 0, page_addr);
1431 break;
1432
1433 case NAND_CMD_PAGEPROG:
1434 case NAND_CMD_STATUS:
1435 case NAND_CMD_NONE:
1436 default:
1437 break;
1438 }
1439
1440 if (ret) {
1441 dev_err(nandc->dev, "failure executing command %d\n",
1442 command);
1443 free_descs(nandc);
1444 return;
1445 }
1446
1447 if (wait) {
1448 ret = submit_descs(nandc);
1449 if (ret)
1450 dev_err(nandc->dev,
1451 "failure submitting descs for command %d\n",
1452 command);
1453 }
1454
1455 free_descs(nandc);
1456
1457 post_command(host, command);
1458}
1459
1460/*
1461 * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
1462 * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
1463 *
1464 * when using RS ECC, the HW reports the same erros when reading an erased CW,
1465 * but it notifies that it is an erased CW by placing special characters at
1466 * certain offsets in the buffer.
1467 *
1468 * verify if the page is erased or not, and fix up the page for RS ECC by
1469 * replacing the special characters with 0xff.
1470 */
1471static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
1472{
1473 u8 empty1, empty2;
1474
1475 /*
1476 * an erased page flags an error in NAND_FLASH_STATUS, check if the page
1477 * is erased by looking for 0x54s at offsets 3 and 175 from the
1478 * beginning of each codeword
1479 */
1480
1481 empty1 = data_buf[3];
1482 empty2 = data_buf[175];
1483
1484 /*
1485 * if the erased codework markers, if they exist override them with
1486 * 0xffs
1487 */
1488 if ((empty1 == 0x54 && empty2 == 0xff) ||
1489 (empty1 == 0xff && empty2 == 0x54)) {
1490 data_buf[3] = 0xff;
1491 data_buf[175] = 0xff;
1492 }
1493
1494 /*
1495 * check if the entire chunk contains 0xffs or not. if it doesn't, then
1496 * restore the original values at the special offsets
1497 */
1498 if (memchr_inv(data_buf, 0xff, data_len)) {
1499 data_buf[3] = empty1;
1500 data_buf[175] = empty2;
1501
1502 return false;
1503 }
1504
1505 return true;
1506}
1507
1508struct read_stats {
1509 __le32 flash;
1510 __le32 buffer;
1511 __le32 erased_cw;
1512};
1513
1514/*
1515 * reads back status registers set by the controller to notify page read
1516 * errors. this is equivalent to what 'ecc->correct()' would do.
1517 */
1518static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
1519 u8 *oob_buf)
1520{
1521 struct nand_chip *chip = &host->chip;
1522 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1523 struct mtd_info *mtd = nand_to_mtd(chip);
1524 struct nand_ecc_ctrl *ecc = &chip->ecc;
1525 unsigned int max_bitflips = 0;
1526 struct read_stats *buf;
1527 int i;
1528
1529 buf = (struct read_stats *)nandc->reg_read_buf;
Abhishek Sahu6192ff72017-08-17 17:37:39 +05301530 nandc_read_buffer_sync(nandc, true);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301531
1532 for (i = 0; i < ecc->steps; i++, buf++) {
1533 u32 flash, buffer, erased_cw;
1534 int data_len, oob_len;
1535
1536 if (i == (ecc->steps - 1)) {
1537 data_len = ecc->size - ((ecc->steps - 1) << 2);
1538 oob_len = ecc->steps << 2;
1539 } else {
1540 data_len = host->cw_data;
1541 oob_len = 0;
1542 }
1543
1544 flash = le32_to_cpu(buf->flash);
1545 buffer = le32_to_cpu(buf->buffer);
1546 erased_cw = le32_to_cpu(buf->erased_cw);
1547
1548 if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1549 bool erased;
1550
1551 /* ignore erased codeword errors */
1552 if (host->bch_enabled) {
1553 erased = (erased_cw & ERASED_CW) == ERASED_CW ?
1554 true : false;
1555 } else {
1556 erased = erased_chunk_check_and_fixup(data_buf,
1557 data_len);
1558 }
1559
1560 if (erased) {
1561 data_buf += data_len;
1562 if (oob_buf)
1563 oob_buf += oob_len + ecc->bytes;
1564 continue;
1565 }
1566
1567 if (buffer & BS_UNCORRECTABLE_BIT) {
1568 int ret, ecclen, extraooblen;
1569 void *eccbuf;
1570
1571 eccbuf = oob_buf ? oob_buf + oob_len : NULL;
1572 ecclen = oob_buf ? host->ecc_bytes_hw : 0;
1573 extraooblen = oob_buf ? oob_len : 0;
1574
1575 /*
1576 * make sure it isn't an erased page reported
1577 * as not-erased by HW because of a few bitflips
1578 */
1579 ret = nand_check_erased_ecc_chunk(data_buf,
1580 data_len, eccbuf, ecclen, oob_buf,
1581 extraooblen, ecc->strength);
1582 if (ret < 0) {
1583 mtd->ecc_stats.failed++;
1584 } else {
1585 mtd->ecc_stats.corrected += ret;
1586 max_bitflips =
1587 max_t(unsigned int, max_bitflips, ret);
1588 }
1589 }
1590 } else {
1591 unsigned int stat;
1592
1593 stat = buffer & BS_CORRECTABLE_ERR_MSK;
1594 mtd->ecc_stats.corrected += stat;
1595 max_bitflips = max(max_bitflips, stat);
1596 }
1597
1598 data_buf += data_len;
1599 if (oob_buf)
1600 oob_buf += oob_len + ecc->bytes;
1601 }
1602
1603 return max_bitflips;
1604}
1605
1606/*
1607 * helper to perform the actual page read operation, used by ecc->read_page(),
1608 * ecc->read_oob()
1609 */
1610static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
1611 u8 *oob_buf)
1612{
1613 struct nand_chip *chip = &host->chip;
1614 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1615 struct nand_ecc_ctrl *ecc = &chip->ecc;
1616 int i, ret;
1617
Abhishek Sahubde43302017-07-19 17:17:55 +05301618 config_nand_page_read(nandc);
1619
Archit Tanejac76b78d2016-02-03 14:29:50 +05301620 /* queue cmd descs for each codeword */
1621 for (i = 0; i < ecc->steps; i++) {
1622 int data_size, oob_size;
1623
1624 if (i == (ecc->steps - 1)) {
1625 data_size = ecc->size - ((ecc->steps - 1) << 2);
1626 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1627 host->spare_bytes;
1628 } else {
1629 data_size = host->cw_data;
1630 oob_size = host->ecc_bytes_hw + host->spare_bytes;
1631 }
1632
Abhishek Sahu91af95c2017-08-17 17:37:43 +05301633 if (nandc->props->is_bam) {
1634 if (data_buf && oob_buf) {
1635 nandc_set_read_loc(nandc, 0, 0, data_size, 0);
1636 nandc_set_read_loc(nandc, 1, data_size,
1637 oob_size, 1);
1638 } else if (data_buf) {
1639 nandc_set_read_loc(nandc, 0, 0, data_size, 1);
1640 } else {
1641 nandc_set_read_loc(nandc, 0, data_size,
1642 oob_size, 1);
1643 }
1644 }
1645
Abhishek Sahubde43302017-07-19 17:17:55 +05301646 config_nand_cw_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301647
1648 if (data_buf)
1649 read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301650 data_size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301651
1652 /*
1653 * when ecc is enabled, the controller doesn't read the real
1654 * or dummy bad block markers in each chunk. To maintain a
1655 * consistent layout across RAW and ECC reads, we just
1656 * leave the real/dummy BBM offsets empty (i.e, filled with
1657 * 0xffs)
1658 */
1659 if (oob_buf) {
1660 int j;
1661
1662 for (j = 0; j < host->bbm_size; j++)
1663 *oob_buf++ = 0xff;
1664
1665 read_data_dma(nandc, FLASH_BUF_ACC + data_size,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301666 oob_buf, oob_size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301667 }
1668
1669 if (data_buf)
1670 data_buf += data_size;
1671 if (oob_buf)
1672 oob_buf += oob_size;
1673 }
1674
1675 ret = submit_descs(nandc);
1676 if (ret)
1677 dev_err(nandc->dev, "failure to read page/oob\n");
1678
1679 free_descs(nandc);
1680
1681 return ret;
1682}
1683
1684/*
1685 * a helper that copies the last step/codeword of a page (containing free oob)
1686 * into our local buffer
1687 */
1688static int copy_last_cw(struct qcom_nand_host *host, int page)
1689{
1690 struct nand_chip *chip = &host->chip;
1691 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1692 struct nand_ecc_ctrl *ecc = &chip->ecc;
1693 int size;
1694 int ret;
1695
1696 clear_read_regs(nandc);
1697
1698 size = host->use_ecc ? host->cw_data : host->cw_size;
1699
1700 /* prepare a clean read buffer */
1701 memset(nandc->data_buffer, 0xff, size);
1702
1703 set_address(host, host->cw_size * (ecc->steps - 1), page);
1704 update_rw_regs(host, 1, true);
1705
Abhishek Sahubde43302017-07-19 17:17:55 +05301706 config_nand_single_cw_page_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301707
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301708 read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301709
1710 ret = submit_descs(nandc);
1711 if (ret)
1712 dev_err(nandc->dev, "failed to copy last codeword\n");
1713
1714 free_descs(nandc);
1715
1716 return ret;
1717}
1718
1719/* implements ecc->read_page() */
1720static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1721 uint8_t *buf, int oob_required, int page)
1722{
1723 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1724 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1725 u8 *data_buf, *oob_buf = NULL;
1726 int ret;
1727
Boris Brezillon25f815f2017-11-30 18:01:30 +01001728 nand_read_page_op(chip, page, 0, NULL, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301729 data_buf = buf;
1730 oob_buf = oob_required ? chip->oob_poi : NULL;
1731
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301732 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301733 ret = read_page_ecc(host, data_buf, oob_buf);
1734 if (ret) {
1735 dev_err(nandc->dev, "failure to read page\n");
1736 return ret;
1737 }
1738
1739 return parse_read_errors(host, data_buf, oob_buf);
1740}
1741
1742/* implements ecc->read_page_raw() */
1743static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
1744 struct nand_chip *chip, uint8_t *buf,
1745 int oob_required, int page)
1746{
1747 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1748 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1749 u8 *data_buf, *oob_buf;
1750 struct nand_ecc_ctrl *ecc = &chip->ecc;
1751 int i, ret;
Abhishek Sahu91af95c2017-08-17 17:37:43 +05301752 int read_loc;
Archit Tanejac76b78d2016-02-03 14:29:50 +05301753
Boris Brezillon25f815f2017-11-30 18:01:30 +01001754 nand_read_page_op(chip, page, 0, NULL, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301755 data_buf = buf;
1756 oob_buf = chip->oob_poi;
1757
1758 host->use_ecc = false;
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301759
1760 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301761 update_rw_regs(host, ecc->steps, true);
Abhishek Sahubde43302017-07-19 17:17:55 +05301762 config_nand_page_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301763
1764 for (i = 0; i < ecc->steps; i++) {
1765 int data_size1, data_size2, oob_size1, oob_size2;
1766 int reg_off = FLASH_BUF_ACC;
1767
1768 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1769 oob_size1 = host->bbm_size;
1770
1771 if (i == (ecc->steps - 1)) {
1772 data_size2 = ecc->size - data_size1 -
1773 ((ecc->steps - 1) << 2);
1774 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1775 host->spare_bytes;
1776 } else {
1777 data_size2 = host->cw_data - data_size1;
1778 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1779 }
1780
Abhishek Sahu91af95c2017-08-17 17:37:43 +05301781 if (nandc->props->is_bam) {
1782 read_loc = 0;
1783 nandc_set_read_loc(nandc, 0, read_loc, data_size1, 0);
1784 read_loc += data_size1;
1785
1786 nandc_set_read_loc(nandc, 1, read_loc, oob_size1, 0);
1787 read_loc += oob_size1;
1788
1789 nandc_set_read_loc(nandc, 2, read_loc, data_size2, 0);
1790 read_loc += data_size2;
1791
1792 nandc_set_read_loc(nandc, 3, read_loc, oob_size2, 1);
1793 }
1794
Abhishek Sahubde43302017-07-19 17:17:55 +05301795 config_nand_cw_read(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301796
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301797 read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301798 reg_off += data_size1;
1799 data_buf += data_size1;
1800
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301801 read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301802 reg_off += oob_size1;
1803 oob_buf += oob_size1;
1804
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301805 read_data_dma(nandc, reg_off, data_buf, data_size2, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301806 reg_off += data_size2;
1807 data_buf += data_size2;
1808
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301809 read_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301810 oob_buf += oob_size2;
1811 }
1812
1813 ret = submit_descs(nandc);
1814 if (ret)
1815 dev_err(nandc->dev, "failure to read raw page\n");
1816
1817 free_descs(nandc);
1818
1819 return 0;
1820}
1821
1822/* implements ecc->read_oob() */
1823static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1824 int page)
1825{
1826 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1827 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1828 struct nand_ecc_ctrl *ecc = &chip->ecc;
1829 int ret;
1830
1831 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301832 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301833
1834 host->use_ecc = true;
1835 set_address(host, 0, page);
1836 update_rw_regs(host, ecc->steps, true);
1837
1838 ret = read_page_ecc(host, NULL, chip->oob_poi);
1839 if (ret)
1840 dev_err(nandc->dev, "failure to read oob\n");
1841
1842 return ret;
1843}
1844
1845/* implements ecc->write_page() */
1846static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1847 const uint8_t *buf, int oob_required, int page)
1848{
1849 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1850 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1851 struct nand_ecc_ctrl *ecc = &chip->ecc;
1852 u8 *data_buf, *oob_buf;
1853 int i, ret;
1854
Boris Brezillon25f815f2017-11-30 18:01:30 +01001855 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1856
Archit Tanejac76b78d2016-02-03 14:29:50 +05301857 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301858 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301859
1860 data_buf = (u8 *)buf;
1861 oob_buf = chip->oob_poi;
1862
1863 host->use_ecc = true;
1864 update_rw_regs(host, ecc->steps, false);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301865 config_nand_page_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301866
1867 for (i = 0; i < ecc->steps; i++) {
1868 int data_size, oob_size;
1869
1870 if (i == (ecc->steps - 1)) {
1871 data_size = ecc->size - ((ecc->steps - 1) << 2);
1872 oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
1873 host->spare_bytes;
1874 } else {
1875 data_size = host->cw_data;
1876 oob_size = ecc->bytes;
1877 }
1878
Archit Tanejac76b78d2016-02-03 14:29:50 +05301879
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301880 write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
1881 i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301882
1883 /*
1884 * when ECC is enabled, we don't really need to write anything
1885 * to oob for the first n - 1 codewords since these oob regions
1886 * just contain ECC bytes that's written by the controller
1887 * itself. For the last codeword, we skip the bbm positions and
1888 * write to the free oob area.
1889 */
1890 if (i == (ecc->steps - 1)) {
1891 oob_buf += host->bbm_size;
1892
1893 write_data_dma(nandc, FLASH_BUF_ACC + data_size,
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301894 oob_buf, oob_size, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301895 }
1896
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301897 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301898
1899 data_buf += data_size;
1900 oob_buf += oob_size;
1901 }
1902
1903 ret = submit_descs(nandc);
1904 if (ret)
1905 dev_err(nandc->dev, "failure to write page\n");
1906
1907 free_descs(nandc);
1908
Boris Brezillon25f815f2017-11-30 18:01:30 +01001909 if (!ret)
1910 ret = nand_prog_page_end_op(chip);
1911
Archit Tanejac76b78d2016-02-03 14:29:50 +05301912 return ret;
1913}
1914
1915/* implements ecc->write_page_raw() */
1916static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
1917 struct nand_chip *chip, const uint8_t *buf,
1918 int oob_required, int page)
1919{
1920 struct qcom_nand_host *host = to_qcom_nand_host(chip);
1921 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
1922 struct nand_ecc_ctrl *ecc = &chip->ecc;
1923 u8 *data_buf, *oob_buf;
1924 int i, ret;
1925
Boris Brezillon25f815f2017-11-30 18:01:30 +01001926 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301927 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05301928 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301929
1930 data_buf = (u8 *)buf;
1931 oob_buf = chip->oob_poi;
1932
1933 host->use_ecc = false;
1934 update_rw_regs(host, ecc->steps, false);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301935 config_nand_page_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301936
1937 for (i = 0; i < ecc->steps; i++) {
1938 int data_size1, data_size2, oob_size1, oob_size2;
1939 int reg_off = FLASH_BUF_ACC;
1940
1941 data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
1942 oob_size1 = host->bbm_size;
1943
1944 if (i == (ecc->steps - 1)) {
1945 data_size2 = ecc->size - data_size1 -
1946 ((ecc->steps - 1) << 2);
1947 oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
1948 host->spare_bytes;
1949 } else {
1950 data_size2 = host->cw_data - data_size1;
1951 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
1952 }
1953
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301954 write_data_dma(nandc, reg_off, data_buf, data_size1,
1955 NAND_BAM_NO_EOT);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301956 reg_off += data_size1;
1957 data_buf += data_size1;
1958
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301959 write_data_dma(nandc, reg_off, oob_buf, oob_size1,
1960 NAND_BAM_NO_EOT);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301961 reg_off += oob_size1;
1962 oob_buf += oob_size1;
1963
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301964 write_data_dma(nandc, reg_off, data_buf, data_size2,
1965 NAND_BAM_NO_EOT);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301966 reg_off += data_size2;
1967 data_buf += data_size2;
1968
Abhishek Sahu67e830a2017-08-17 17:37:42 +05301969 write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301970 oob_buf += oob_size2;
1971
Abhishek Sahu77cc5362017-07-19 17:17:56 +05301972 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05301973 }
1974
1975 ret = submit_descs(nandc);
1976 if (ret)
1977 dev_err(nandc->dev, "failure to write raw page\n");
1978
1979 free_descs(nandc);
1980
Boris Brezillon25f815f2017-11-30 18:01:30 +01001981 if (!ret)
1982 ret = nand_prog_page_end_op(chip);
1983
Archit Tanejac76b78d2016-02-03 14:29:50 +05301984 return ret;
1985}
1986
1987/*
1988 * implements ecc->write_oob()
1989 *
1990 * the NAND controller cannot write only data or only oob within a codeword,
1991 * since ecc is calculated for the combined codeword. we first copy the
1992 * entire contents for the last codeword(data + oob), replace the old oob
1993 * with the new one in chip->oob_poi, and then write the entire codeword.
1994 * this read-copy-write operation results in a slight performance loss.
1995 */
1996static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1997 int page)
1998{
1999 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2000 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2001 struct nand_ecc_ctrl *ecc = &chip->ecc;
2002 u8 *oob = chip->oob_poi;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302003 int data_size, oob_size;
Boris Brezillon97d90da2017-11-30 18:01:29 +01002004 int ret;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302005
2006 host->use_ecc = true;
2007
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05302008 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302009 ret = copy_last_cw(host, page);
2010 if (ret)
2011 return ret;
2012
2013 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05302014 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302015
2016 /* calculate the data and oob size for the last codeword/step */
2017 data_size = ecc->size - ((ecc->steps - 1) << 2);
Boris Brezillonaa02fcf2016-03-18 17:53:31 +01002018 oob_size = mtd->oobavail;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302019
2020 /* override new oob content to last codeword */
Boris Brezillonaa02fcf2016-03-18 17:53:31 +01002021 mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
2022 0, mtd->oobavail);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302023
2024 set_address(host, host->cw_size * (ecc->steps - 1), page);
2025 update_rw_regs(host, 1, false);
2026
Abhishek Sahu77cc5362017-07-19 17:17:56 +05302027 config_nand_page_write(nandc);
Abhishek Sahu67e830a2017-08-17 17:37:42 +05302028 write_data_dma(nandc, FLASH_BUF_ACC,
2029 nandc->data_buffer, data_size + oob_size, 0);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05302030 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302031
2032 ret = submit_descs(nandc);
2033
2034 free_descs(nandc);
2035
2036 if (ret) {
2037 dev_err(nandc->dev, "failure to write oob\n");
2038 return -EIO;
2039 }
2040
Boris Brezillon97d90da2017-11-30 18:01:29 +01002041 return nand_prog_page_end_op(chip);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302042}
2043
2044static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
2045{
2046 struct nand_chip *chip = mtd_to_nand(mtd);
2047 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2048 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2049 struct nand_ecc_ctrl *ecc = &chip->ecc;
2050 int page, ret, bbpos, bad = 0;
2051 u32 flash_status;
2052
2053 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2054
2055 /*
2056 * configure registers for a raw sub page read, the address is set to
2057 * the beginning of the last codeword, we don't care about reading ecc
2058 * portion of oob. we just want the first few bytes from this codeword
2059 * that contains the BBM
2060 */
2061 host->use_ecc = false;
2062
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05302063 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302064 ret = copy_last_cw(host, page);
2065 if (ret)
2066 goto err;
2067
2068 flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
2069
2070 if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
2071 dev_warn(nandc->dev, "error when trying to read BBM\n");
2072 goto err;
2073 }
2074
2075 bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
2076
2077 bad = nandc->data_buffer[bbpos] != 0xff;
2078
2079 if (chip->options & NAND_BUSWIDTH_16)
2080 bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
2081err:
2082 return bad;
2083}
2084
2085static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
2086{
2087 struct nand_chip *chip = mtd_to_nand(mtd);
2088 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2089 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2090 struct nand_ecc_ctrl *ecc = &chip->ecc;
Boris Brezillon97d90da2017-11-30 18:01:29 +01002091 int page, ret;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302092
2093 clear_read_regs(nandc);
Abhishek Sahu4e2f6c52017-08-17 17:37:46 +05302094 clear_bam_transaction(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302095
2096 /*
2097 * to mark the BBM as bad, we flash the entire last codeword with 0s.
2098 * we don't care about the rest of the content in the codeword since
2099 * we aren't going to use this block again
2100 */
2101 memset(nandc->data_buffer, 0x00, host->cw_size);
2102
2103 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
2104
2105 /* prepare write */
2106 host->use_ecc = false;
2107 set_address(host, host->cw_size * (ecc->steps - 1), page);
2108 update_rw_regs(host, 1, false);
2109
Abhishek Sahu77cc5362017-07-19 17:17:56 +05302110 config_nand_page_write(nandc);
Abhishek Sahu67e830a2017-08-17 17:37:42 +05302111 write_data_dma(nandc, FLASH_BUF_ACC,
2112 nandc->data_buffer, host->cw_size, 0);
Abhishek Sahu77cc5362017-07-19 17:17:56 +05302113 config_nand_cw_write(nandc);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302114
2115 ret = submit_descs(nandc);
2116
2117 free_descs(nandc);
2118
2119 if (ret) {
2120 dev_err(nandc->dev, "failure to update BBM\n");
2121 return -EIO;
2122 }
2123
Boris Brezillon97d90da2017-11-30 18:01:29 +01002124 return nand_prog_page_end_op(chip);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302125}
2126
2127/*
2128 * the three functions below implement chip->read_byte(), chip->read_buf()
2129 * and chip->write_buf() respectively. these aren't used for
2130 * reading/writing page data, they are used for smaller data like reading
2131 * id, status etc
2132 */
2133static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
2134{
2135 struct nand_chip *chip = mtd_to_nand(mtd);
2136 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2137 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2138 u8 *buf = nandc->data_buffer;
2139 u8 ret = 0x0;
2140
2141 if (host->last_command == NAND_CMD_STATUS) {
2142 ret = host->status;
2143
2144 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2145
2146 return ret;
2147 }
2148
2149 if (nandc->buf_start < nandc->buf_count)
2150 ret = buf[nandc->buf_start++];
2151
2152 return ret;
2153}
2154
2155static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
2156{
2157 struct nand_chip *chip = mtd_to_nand(mtd);
2158 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2159 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2160
2161 memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
2162 nandc->buf_start += real_len;
2163}
2164
2165static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
2166 int len)
2167{
2168 struct nand_chip *chip = mtd_to_nand(mtd);
2169 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2170 int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
2171
2172 memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
2173
2174 nandc->buf_start += real_len;
2175}
2176
2177/* we support only one external chip for now */
2178static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
2179{
2180 struct nand_chip *chip = mtd_to_nand(mtd);
2181 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2182
2183 if (chipnr <= 0)
2184 return;
2185
2186 dev_warn(nandc->dev, "invalid chip select\n");
2187}
2188
2189/*
2190 * NAND controller page layout info
2191 *
2192 * Layout with ECC enabled:
2193 *
2194 * |----------------------| |---------------------------------|
2195 * | xx.......yy| | *********xx.......yy|
2196 * | DATA xx..ECC..yy| | DATA **SPARE**xx..ECC..yy|
2197 * | (516) xx.......yy| | (516-n*4) **(n*4)**xx.......yy|
2198 * | xx.......yy| | *********xx.......yy|
2199 * |----------------------| |---------------------------------|
2200 * codeword 1,2..n-1 codeword n
2201 * <---(528/532 Bytes)--> <-------(528/532 Bytes)--------->
2202 *
2203 * n = Number of codewords in the page
2204 * . = ECC bytes
2205 * * = Spare/free bytes
2206 * x = Unused byte(s)
2207 * y = Reserved byte(s)
2208 *
2209 * 2K page: n = 4, spare = 16 bytes
2210 * 4K page: n = 8, spare = 32 bytes
2211 * 8K page: n = 16, spare = 64 bytes
2212 *
2213 * the qcom nand controller operates at a sub page/codeword level. each
2214 * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
2215 * the number of ECC bytes vary based on the ECC strength and the bus width.
2216 *
2217 * the first n - 1 codewords contains 516 bytes of user data, the remaining
2218 * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
2219 * both user data and spare(oobavail) bytes that sum up to 516 bytes.
2220 *
2221 * When we access a page with ECC enabled, the reserved bytes(s) are not
2222 * accessible at all. When reading, we fill up these unreadable positions
2223 * with 0xffs. When writing, the controller skips writing the inaccessible
2224 * bytes.
2225 *
2226 * Layout with ECC disabled:
2227 *
2228 * |------------------------------| |---------------------------------------|
2229 * | yy xx.......| | bb *********xx.......|
2230 * | DATA1 yy DATA2 xx..ECC..| | DATA1 bb DATA2 **SPARE**xx..ECC..|
2231 * | (size1) yy (size2) xx.......| | (size1) bb (size2) **(n*4)**xx.......|
2232 * | yy xx.......| | bb *********xx.......|
2233 * |------------------------------| |---------------------------------------|
2234 * codeword 1,2..n-1 codeword n
2235 * <-------(528/532 Bytes)------> <-----------(528/532 Bytes)----------->
2236 *
2237 * n = Number of codewords in the page
2238 * . = ECC bytes
2239 * * = Spare/free bytes
2240 * x = Unused byte(s)
2241 * y = Dummy Bad Bock byte(s)
2242 * b = Real Bad Block byte(s)
2243 * size1/size2 = function of codeword size and 'n'
2244 *
2245 * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
2246 * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
2247 * Block Markers. In the last codeword, this position contains the real BBM
2248 *
2249 * In order to have a consistent layout between RAW and ECC modes, we assume
2250 * the following OOB layout arrangement:
2251 *
2252 * |-----------| |--------------------|
2253 * |yyxx.......| |bb*********xx.......|
2254 * |yyxx..ECC..| |bb*FREEOOB*xx..ECC..|
2255 * |yyxx.......| |bb*********xx.......|
2256 * |yyxx.......| |bb*********xx.......|
2257 * |-----------| |--------------------|
2258 * first n - 1 nth OOB region
2259 * OOB regions
2260 *
2261 * n = Number of codewords in the page
2262 * . = ECC bytes
2263 * * = FREE OOB bytes
2264 * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
2265 * x = Unused byte(s)
2266 * b = Real bad block byte(s) (inaccessible when ECC enabled)
2267 *
2268 * This layout is read as is when ECC is disabled. When ECC is enabled, the
2269 * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
2270 * and assumed as 0xffs when we read a page/oob. The ECC, unused and
Boris Brezillon421e81c2016-03-18 17:54:27 +01002271 * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
2272 * the sum of the three).
Archit Tanejac76b78d2016-02-03 14:29:50 +05302273 */
Boris Brezillon421e81c2016-03-18 17:54:27 +01002274static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2275 struct mtd_oob_region *oobregion)
Archit Tanejac76b78d2016-02-03 14:29:50 +05302276{
Boris Brezillon421e81c2016-03-18 17:54:27 +01002277 struct nand_chip *chip = mtd_to_nand(mtd);
2278 struct qcom_nand_host *host = to_qcom_nand_host(chip);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302279 struct nand_ecc_ctrl *ecc = &chip->ecc;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302280
Boris Brezillon421e81c2016-03-18 17:54:27 +01002281 if (section > 1)
2282 return -ERANGE;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302283
Boris Brezillon421e81c2016-03-18 17:54:27 +01002284 if (!section) {
2285 oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
2286 host->bbm_size;
2287 oobregion->offset = 0;
2288 } else {
2289 oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
2290 oobregion->offset = mtd->oobsize - oobregion->length;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302291 }
2292
Boris Brezillon421e81c2016-03-18 17:54:27 +01002293 return 0;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302294}
2295
Boris Brezillon421e81c2016-03-18 17:54:27 +01002296static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
2297 struct mtd_oob_region *oobregion)
2298{
2299 struct nand_chip *chip = mtd_to_nand(mtd);
2300 struct qcom_nand_host *host = to_qcom_nand_host(chip);
2301 struct nand_ecc_ctrl *ecc = &chip->ecc;
2302
2303 if (section)
2304 return -ERANGE;
2305
2306 oobregion->length = ecc->steps * 4;
2307 oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
2308
2309 return 0;
2310}
2311
2312static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
2313 .ecc = qcom_nand_ooblayout_ecc,
2314 .free = qcom_nand_ooblayout_free,
2315};
2316
Archit Tanejac76b78d2016-02-03 14:29:50 +05302317static int qcom_nand_host_setup(struct qcom_nand_host *host)
2318{
2319 struct nand_chip *chip = &host->chip;
2320 struct mtd_info *mtd = nand_to_mtd(chip);
2321 struct nand_ecc_ctrl *ecc = &chip->ecc;
2322 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
2323 int cwperpage, bad_block_byte;
2324 bool wide_bus;
2325 int ecc_mode = 1;
2326
2327 /*
2328 * the controller requires each step consists of 512 bytes of data.
2329 * bail out if DT has populated a wrong step size.
2330 */
2331 if (ecc->size != NANDC_STEP_SIZE) {
2332 dev_err(nandc->dev, "invalid ecc size\n");
2333 return -EINVAL;
2334 }
2335
2336 wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
2337
2338 if (ecc->strength >= 8) {
2339 /* 8 bit ECC defaults to BCH ECC on all platforms */
2340 host->bch_enabled = true;
2341 ecc_mode = 1;
2342
2343 if (wide_bus) {
2344 host->ecc_bytes_hw = 14;
2345 host->spare_bytes = 0;
2346 host->bbm_size = 2;
2347 } else {
2348 host->ecc_bytes_hw = 13;
2349 host->spare_bytes = 2;
2350 host->bbm_size = 1;
2351 }
2352 } else {
2353 /*
2354 * if the controller supports BCH for 4 bit ECC, the controller
2355 * uses lesser bytes for ECC. If RS is used, the ECC bytes is
2356 * always 10 bytes
2357 */
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302358 if (nandc->props->ecc_modes & ECC_BCH_4BIT) {
Archit Tanejac76b78d2016-02-03 14:29:50 +05302359 /* BCH */
2360 host->bch_enabled = true;
2361 ecc_mode = 0;
2362
2363 if (wide_bus) {
2364 host->ecc_bytes_hw = 8;
2365 host->spare_bytes = 2;
2366 host->bbm_size = 2;
2367 } else {
2368 host->ecc_bytes_hw = 7;
2369 host->spare_bytes = 4;
2370 host->bbm_size = 1;
2371 }
2372 } else {
2373 /* RS */
2374 host->ecc_bytes_hw = 10;
2375
2376 if (wide_bus) {
2377 host->spare_bytes = 0;
2378 host->bbm_size = 2;
2379 } else {
2380 host->spare_bytes = 1;
2381 host->bbm_size = 1;
2382 }
2383 }
2384 }
2385
2386 /*
2387 * we consider ecc->bytes as the sum of all the non-data content in a
2388 * step. It gives us a clean representation of the oob area (even if
2389 * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
2390 * ECC and 12 bytes for 4 bit ECC
2391 */
2392 ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
2393
2394 ecc->read_page = qcom_nandc_read_page;
2395 ecc->read_page_raw = qcom_nandc_read_page_raw;
2396 ecc->read_oob = qcom_nandc_read_oob;
2397 ecc->write_page = qcom_nandc_write_page;
2398 ecc->write_page_raw = qcom_nandc_write_page_raw;
2399 ecc->write_oob = qcom_nandc_write_oob;
2400
2401 ecc->mode = NAND_ECC_HW;
2402
Boris Brezillon421e81c2016-03-18 17:54:27 +01002403 mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302404
2405 cwperpage = mtd->writesize / ecc->size;
Abhishek Sahucb80f112017-08-17 17:37:40 +05302406 nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
2407 cwperpage);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302408
2409 /*
2410 * DATA_UD_BYTES varies based on whether the read/write command protects
2411 * spare data with ECC too. We protect spare data by default, so we set
2412 * it to main + spare data, which are 512 and 4 bytes respectively.
2413 */
2414 host->cw_data = 516;
2415
2416 /*
2417 * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
2418 * for 8 bit ECC
2419 */
2420 host->cw_size = host->cw_data + ecc->bytes;
2421
2422 if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
2423 dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
2424 return -EINVAL;
2425 }
2426
2427 bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
2428
2429 host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
2430 | host->cw_data << UD_SIZE_BYTES
2431 | 0 << DISABLE_STATUS_AFTER_WRITE
2432 | 5 << NUM_ADDR_CYCLES
2433 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
2434 | 0 << STATUS_BFR_READ
2435 | 1 << SET_RD_MODE_AFTER_STATUS
2436 | host->spare_bytes << SPARE_SIZE_BYTES;
2437
2438 host->cfg1 = 7 << NAND_RECOVERY_CYCLES
2439 | 0 << CS_ACTIVE_BSY
2440 | bad_block_byte << BAD_BLOCK_BYTE_NUM
2441 | 0 << BAD_BLOCK_IN_SPARE_AREA
2442 | 2 << WR_RD_BSY_GAP
2443 | wide_bus << WIDE_FLASH
2444 | host->bch_enabled << ENABLE_BCH_ECC;
2445
2446 host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
2447 | host->cw_size << UD_SIZE_BYTES
2448 | 5 << NUM_ADDR_CYCLES
2449 | 0 << SPARE_SIZE_BYTES;
2450
2451 host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
2452 | 0 << CS_ACTIVE_BSY
2453 | 17 << BAD_BLOCK_BYTE_NUM
2454 | 1 << BAD_BLOCK_IN_SPARE_AREA
2455 | 2 << WR_RD_BSY_GAP
2456 | wide_bus << WIDE_FLASH
2457 | 1 << DEV0_CFG1_ECC_DISABLE;
2458
Abhishek Sahu10777de2017-08-03 17:56:39 +02002459 host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
Archit Tanejac76b78d2016-02-03 14:29:50 +05302460 | 0 << ECC_SW_RESET
2461 | host->cw_data << ECC_NUM_DATA_BYTES
2462 | 1 << ECC_FORCE_CLK_OPEN
2463 | ecc_mode << ECC_MODE
2464 | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
2465
2466 host->ecc_buf_cfg = 0x203 << NUM_STEPS;
2467
2468 host->clrflashstatus = FS_READY_BSY_N;
2469 host->clrreadstatus = 0xc0;
Abhishek Sahua86b9c42017-08-17 17:37:44 +05302470 nandc->regs->erased_cw_detect_cfg_clr =
2471 cpu_to_le32(CLR_ERASED_PAGE_DET);
2472 nandc->regs->erased_cw_detect_cfg_set =
2473 cpu_to_le32(SET_ERASED_PAGE_DET);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302474
2475 dev_dbg(nandc->dev,
2476 "cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
2477 host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
2478 host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
2479 cwperpage);
2480
2481 return 0;
2482}
2483
2484static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
2485{
2486 int ret;
2487
2488 ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
2489 if (ret) {
2490 dev_err(nandc->dev, "failed to set DMA mask\n");
2491 return ret;
2492 }
2493
2494 /*
2495 * we use the internal buffer for reading ONFI params, reading small
2496 * data like ID and status, and preforming read-copy-write operations
2497 * when writing to a codeword partially. 532 is the maximum possible
2498 * size of a codeword for our nand controller
2499 */
2500 nandc->buf_size = 532;
2501
2502 nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
2503 GFP_KERNEL);
2504 if (!nandc->data_buffer)
2505 return -ENOMEM;
2506
2507 nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
2508 GFP_KERNEL);
2509 if (!nandc->regs)
2510 return -ENOMEM;
2511
2512 nandc->reg_read_buf = devm_kzalloc(nandc->dev,
2513 MAX_REG_RD * sizeof(*nandc->reg_read_buf),
2514 GFP_KERNEL);
2515 if (!nandc->reg_read_buf)
2516 return -ENOMEM;
2517
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302518 if (nandc->props->is_bam) {
Abhishek Sahu6192ff72017-08-17 17:37:39 +05302519 nandc->reg_read_dma =
2520 dma_map_single(nandc->dev, nandc->reg_read_buf,
2521 MAX_REG_RD *
2522 sizeof(*nandc->reg_read_buf),
2523 DMA_FROM_DEVICE);
2524 if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
2525 dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
2526 return -EIO;
2527 }
2528
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302529 nandc->tx_chan = dma_request_slave_channel(nandc->dev, "tx");
2530 if (!nandc->tx_chan) {
2531 dev_err(nandc->dev, "failed to request tx channel\n");
2532 return -ENODEV;
2533 }
2534
2535 nandc->rx_chan = dma_request_slave_channel(nandc->dev, "rx");
2536 if (!nandc->rx_chan) {
2537 dev_err(nandc->dev, "failed to request rx channel\n");
2538 return -ENODEV;
2539 }
2540
2541 nandc->cmd_chan = dma_request_slave_channel(nandc->dev, "cmd");
2542 if (!nandc->cmd_chan) {
2543 dev_err(nandc->dev, "failed to request cmd channel\n");
2544 return -ENODEV;
2545 }
Abhishek Sahucb80f112017-08-17 17:37:40 +05302546
2547 /*
2548 * Initially allocate BAM transaction to read ONFI param page.
2549 * After detecting all the devices, this BAM transaction will
2550 * be freed and the next BAM tranasction will be allocated with
2551 * maximum codeword size
2552 */
2553 nandc->max_cwperpage = 1;
2554 nandc->bam_txn = alloc_bam_transaction(nandc);
2555 if (!nandc->bam_txn) {
2556 dev_err(nandc->dev,
2557 "failed to allocate bam transaction\n");
2558 return -ENOMEM;
2559 }
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302560 } else {
2561 nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
2562 if (!nandc->chan) {
2563 dev_err(nandc->dev,
2564 "failed to request slave channel\n");
2565 return -ENODEV;
2566 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302567 }
2568
2569 INIT_LIST_HEAD(&nandc->desc_list);
2570 INIT_LIST_HEAD(&nandc->host_list);
2571
Marc Gonzalezd45bc582016-07-27 11:23:52 +02002572 nand_hw_control_init(&nandc->controller);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302573
2574 return 0;
2575}
2576
2577static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
2578{
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302579 if (nandc->props->is_bam) {
Abhishek Sahu6192ff72017-08-17 17:37:39 +05302580 if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
2581 dma_unmap_single(nandc->dev, nandc->reg_read_dma,
2582 MAX_REG_RD *
2583 sizeof(*nandc->reg_read_buf),
2584 DMA_FROM_DEVICE);
2585
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302586 if (nandc->tx_chan)
2587 dma_release_channel(nandc->tx_chan);
2588
2589 if (nandc->rx_chan)
2590 dma_release_channel(nandc->rx_chan);
2591
2592 if (nandc->cmd_chan)
2593 dma_release_channel(nandc->cmd_chan);
2594 } else {
2595 if (nandc->chan)
2596 dma_release_channel(nandc->chan);
2597 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302598}
2599
2600/* one time setup of a few nand controller registers */
2601static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
2602{
Abhishek Sahu9d43f912017-08-17 17:37:45 +05302603 u32 nand_ctrl;
2604
Archit Tanejac76b78d2016-02-03 14:29:50 +05302605 /* kill onenand */
2606 nandc_write(nandc, SFLASHC_BURST_CFG, 0);
Abhishek Sahucc409b92017-08-17 17:37:47 +05302607 nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
2608 NAND_DEV_CMD_VLD_VAL);
Archit Tanejac76b78d2016-02-03 14:29:50 +05302609
Abhishek Sahu9d43f912017-08-17 17:37:45 +05302610 /* enable ADM or BAM DMA */
2611 if (nandc->props->is_bam) {
2612 nand_ctrl = nandc_read(nandc, NAND_CTRL);
2613 nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN);
2614 } else {
2615 nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
2616 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302617
2618 /* save the original values of these registers */
Abhishek Sahucc409b92017-08-17 17:37:47 +05302619 nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
Abhishek Sahud8a9b322017-08-11 17:09:16 +05302620 nandc->vld = NAND_DEV_CMD_VLD_VAL;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302621
2622 return 0;
2623}
2624
2625static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
2626 struct qcom_nand_host *host,
2627 struct device_node *dn)
2628{
2629 struct nand_chip *chip = &host->chip;
2630 struct mtd_info *mtd = nand_to_mtd(chip);
2631 struct device *dev = nandc->dev;
2632 int ret;
2633
2634 ret = of_property_read_u32(dn, "reg", &host->cs);
2635 if (ret) {
2636 dev_err(dev, "can't get chip-select\n");
2637 return -ENXIO;
2638 }
2639
2640 nand_set_flash_node(chip, dn);
2641 mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
Fabio Estevam069f0532018-01-05 18:02:55 -02002642 if (!mtd->name)
2643 return -ENOMEM;
2644
Archit Tanejac76b78d2016-02-03 14:29:50 +05302645 mtd->owner = THIS_MODULE;
2646 mtd->dev.parent = dev;
2647
2648 chip->cmdfunc = qcom_nandc_command;
2649 chip->select_chip = qcom_nandc_select_chip;
2650 chip->read_byte = qcom_nandc_read_byte;
2651 chip->read_buf = qcom_nandc_read_buf;
2652 chip->write_buf = qcom_nandc_write_buf;
Boris Brezillon4a78cc62017-05-26 17:10:15 +02002653 chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
2654 chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302655
2656 /*
2657 * the bad block marker is readable only when we read the last codeword
2658 * of a page with ECC disabled. currently, the nand_base and nand_bbt
2659 * helpers don't allow us to read BB from a nand chip with ECC
2660 * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
2661 * and block_markbad helpers until we permanently switch to using
2662 * MTD_OPS_RAW for all drivers (with the help of badblockbits)
2663 */
2664 chip->block_bad = qcom_nandc_block_bad;
2665 chip->block_markbad = qcom_nandc_block_markbad;
2666
2667 chip->controller = &nandc->controller;
2668 chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
2669 NAND_SKIP_BBTSCAN;
2670
2671 /* set up initial status value */
2672 host->status = NAND_STATUS_READY | NAND_STATUS_WP;
2673
2674 ret = nand_scan_ident(mtd, 1, NULL);
2675 if (ret)
2676 return ret;
2677
2678 ret = qcom_nand_host_setup(host);
Abhishek Sahu89f51272017-07-19 17:17:58 +05302679
2680 return ret;
2681}
2682
2683static int qcom_nand_mtd_register(struct qcom_nand_controller *nandc,
2684 struct qcom_nand_host *host,
2685 struct device_node *dn)
2686{
2687 struct nand_chip *chip = &host->chip;
2688 struct mtd_info *mtd = nand_to_mtd(chip);
2689 int ret;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302690
2691 ret = nand_scan_tail(mtd);
2692 if (ret)
2693 return ret;
2694
Abhishek Sahu89f51272017-07-19 17:17:58 +05302695 ret = mtd_device_register(mtd, NULL, 0);
2696 if (ret)
2697 nand_cleanup(mtd_to_nand(mtd));
2698
2699 return ret;
2700}
2701
2702static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2703{
2704 struct device *dev = nandc->dev;
2705 struct device_node *dn = dev->of_node, *child;
2706 struct qcom_nand_host *host, *tmp;
2707 int ret;
2708
2709 for_each_available_child_of_node(dn, child) {
2710 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2711 if (!host) {
2712 of_node_put(child);
2713 return -ENOMEM;
2714 }
2715
2716 ret = qcom_nand_host_init(nandc, host, child);
2717 if (ret) {
2718 devm_kfree(dev, host);
2719 continue;
2720 }
2721
2722 list_add_tail(&host->node, &nandc->host_list);
2723 }
2724
2725 if (list_empty(&nandc->host_list))
2726 return -ENODEV;
2727
Abhishek Sahucb80f112017-08-17 17:37:40 +05302728 if (nandc->props->is_bam) {
2729 free_bam_transaction(nandc);
2730 nandc->bam_txn = alloc_bam_transaction(nandc);
2731 if (!nandc->bam_txn) {
2732 dev_err(nandc->dev,
2733 "failed to allocate bam transaction\n");
2734 return -ENOMEM;
2735 }
2736 }
2737
Abhishek Sahu89f51272017-07-19 17:17:58 +05302738 list_for_each_entry_safe(host, tmp, &nandc->host_list, node) {
2739 ret = qcom_nand_mtd_register(nandc, host, child);
2740 if (ret) {
2741 list_del(&host->node);
2742 devm_kfree(dev, host);
2743 }
2744 }
2745
2746 if (list_empty(&nandc->host_list))
2747 return -ENODEV;
2748
2749 return 0;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302750}
2751
2752/* parse custom DT properties here */
2753static int qcom_nandc_parse_dt(struct platform_device *pdev)
2754{
2755 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2756 struct device_node *np = nandc->dev->of_node;
2757 int ret;
2758
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302759 if (!nandc->props->is_bam) {
2760 ret = of_property_read_u32(np, "qcom,cmd-crci",
2761 &nandc->cmd_crci);
2762 if (ret) {
2763 dev_err(nandc->dev, "command CRCI unspecified\n");
2764 return ret;
2765 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302766
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302767 ret = of_property_read_u32(np, "qcom,data-crci",
2768 &nandc->data_crci);
2769 if (ret) {
2770 dev_err(nandc->dev, "data CRCI unspecified\n");
2771 return ret;
2772 }
Archit Tanejac76b78d2016-02-03 14:29:50 +05302773 }
2774
2775 return 0;
2776}
2777
2778static int qcom_nandc_probe(struct platform_device *pdev)
2779{
2780 struct qcom_nand_controller *nandc;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302781 const void *dev_data;
2782 struct device *dev = &pdev->dev;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302783 struct resource *res;
2784 int ret;
2785
2786 nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
2787 if (!nandc)
2788 return -ENOMEM;
2789
2790 platform_set_drvdata(pdev, nandc);
2791 nandc->dev = dev;
2792
2793 dev_data = of_device_get_match_data(dev);
2794 if (!dev_data) {
2795 dev_err(&pdev->dev, "failed to get device data\n");
2796 return -ENODEV;
2797 }
2798
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302799 nandc->props = dev_data;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302800
2801 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2802 nandc->base = devm_ioremap_resource(dev, res);
2803 if (IS_ERR(nandc->base))
2804 return PTR_ERR(nandc->base);
2805
Abhishek Sahu8d6b6d72017-09-25 13:21:26 +05302806 nandc->base_phys = res->start;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302807 nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
2808
2809 nandc->core_clk = devm_clk_get(dev, "core");
2810 if (IS_ERR(nandc->core_clk))
2811 return PTR_ERR(nandc->core_clk);
2812
2813 nandc->aon_clk = devm_clk_get(dev, "aon");
2814 if (IS_ERR(nandc->aon_clk))
2815 return PTR_ERR(nandc->aon_clk);
2816
2817 ret = qcom_nandc_parse_dt(pdev);
2818 if (ret)
2819 return ret;
2820
2821 ret = qcom_nandc_alloc(nandc);
2822 if (ret)
Abhishek Sahu497d7d82017-08-11 17:09:19 +05302823 goto err_core_clk;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302824
2825 ret = clk_prepare_enable(nandc->core_clk);
2826 if (ret)
2827 goto err_core_clk;
2828
2829 ret = clk_prepare_enable(nandc->aon_clk);
2830 if (ret)
2831 goto err_aon_clk;
2832
2833 ret = qcom_nandc_setup(nandc);
2834 if (ret)
2835 goto err_setup;
2836
Abhishek Sahu89f51272017-07-19 17:17:58 +05302837 ret = qcom_probe_nand_devices(nandc);
2838 if (ret)
2839 goto err_setup;
Archit Tanejac76b78d2016-02-03 14:29:50 +05302840
2841 return 0;
2842
Archit Tanejac76b78d2016-02-03 14:29:50 +05302843err_setup:
2844 clk_disable_unprepare(nandc->aon_clk);
2845err_aon_clk:
2846 clk_disable_unprepare(nandc->core_clk);
2847err_core_clk:
2848 qcom_nandc_unalloc(nandc);
2849
2850 return ret;
2851}
2852
2853static int qcom_nandc_remove(struct platform_device *pdev)
2854{
2855 struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
2856 struct qcom_nand_host *host;
2857
2858 list_for_each_entry(host, &nandc->host_list, node)
2859 nand_release(nand_to_mtd(&host->chip));
2860
2861 qcom_nandc_unalloc(nandc);
2862
2863 clk_disable_unprepare(nandc->aon_clk);
2864 clk_disable_unprepare(nandc->core_clk);
2865
2866 return 0;
2867}
2868
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302869static const struct qcom_nandc_props ipq806x_nandc_props = {
2870 .ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
Abhishek Sahu8c5d5d62017-08-11 17:09:18 +05302871 .is_bam = false,
Abhishek Sahucc409b92017-08-17 17:37:47 +05302872 .dev_cmd_reg_start = 0x0,
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302873};
Archit Tanejac76b78d2016-02-03 14:29:50 +05302874
Abhishek Sahua0637832017-08-17 17:37:53 +05302875static const struct qcom_nandc_props ipq4019_nandc_props = {
2876 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
2877 .is_bam = true,
2878 .dev_cmd_reg_start = 0x0,
2879};
2880
Abhishek Sahudce84762017-08-17 17:37:54 +05302881static const struct qcom_nandc_props ipq8074_nandc_props = {
2882 .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
2883 .is_bam = true,
2884 .dev_cmd_reg_start = 0x7000,
2885};
2886
Archit Tanejac76b78d2016-02-03 14:29:50 +05302887/*
2888 * data will hold a struct pointer containing more differences once we support
2889 * more controller variants
2890 */
2891static const struct of_device_id qcom_nandc_of_match[] = {
Abhishek Sahu58f1f222017-08-11 17:09:17 +05302892 {
2893 .compatible = "qcom,ipq806x-nand",
2894 .data = &ipq806x_nandc_props,
Archit Tanejac76b78d2016-02-03 14:29:50 +05302895 },
Abhishek Sahua0637832017-08-17 17:37:53 +05302896 {
2897 .compatible = "qcom,ipq4019-nand",
2898 .data = &ipq4019_nandc_props,
2899 },
Abhishek Sahudce84762017-08-17 17:37:54 +05302900 {
2901 .compatible = "qcom,ipq8074-nand",
2902 .data = &ipq8074_nandc_props,
2903 },
Archit Tanejac76b78d2016-02-03 14:29:50 +05302904 {}
2905};
2906MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
2907
2908static struct platform_driver qcom_nandc_driver = {
2909 .driver = {
2910 .name = "qcom-nandc",
2911 .of_match_table = qcom_nandc_of_match,
2912 },
2913 .probe = qcom_nandc_probe,
2914 .remove = qcom_nandc_remove,
2915};
2916module_platform_driver(qcom_nandc_driver);
2917
2918MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
2919MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
2920MODULE_LICENSE("GPL v2");