blob: 9a6cc80a50ebedebf13246eb80afe4d9069954ee [file] [log] [blame]
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05301/*
2 * Copyright (C) 2007 Google, Inc.
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#define pr_fmt(fmt) "%s: " fmt, __func__
17
18#include <linux/slab.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/nand.h>
23#include <linux/mtd/partitions.h>
24#include <linux/platform_device.h>
25#include <linux/dma-mapping.h>
26#include <linux/io.h>
27#include <linux/crc16.h>
28#include <linux/bitrev.h>
29#include <linux/mutex.h>
30#include <linux/of.h>
31#include <mach/sps.h>
32
33#define PAGE_SIZE_2K 2048
34#define PAGE_SIZE_4K 4096
35#define WRITE 1
36#define READ 0
37/*
38 * The maximum no of descriptors per transfer (page read/write) won't be more
39 * than 64. For more details on what those commands are, please refer to the
40 * page read and page write functions in the driver.
41 */
42#define SPS_MAX_DESC_NUM 64
43#define SPS_DATA_CONS_PIPE_INDEX 0
44#define SPS_DATA_PROD_PIPE_INDEX 1
45#define SPS_CMD_CONS_PIPE_INDEX 2
46
47#define msm_virt_to_dma(chip, vaddr) \
48 ((chip)->dma_phys_addr + \
49 ((uint8_t *)(vaddr) - (chip)->dma_virt_addr))
50
51/*
52 * A single page read/write request would typically need DMA memory of about
53 * 1K memory approximately. So for a single request this memory is more than
54 * enough.
55 *
56 * But to accommodate multiple clients we allocate 8K of memory. Though only
57 * one client request can be submitted to NANDc at any time, other clients can
58 * still prepare the descriptors while waiting for current client request to
59 * be done. Thus for a total memory of 8K, the driver can currently support
60 * maximum clients up to 7 or 8 at a time. The client for which there is no
61 * free DMA memory shall wait on the wait queue until other clients free up
62 * the required memory.
63 */
64#define MSM_NAND_DMA_BUFFER_SIZE SZ_8K
65/*
66 * This defines the granularity at which the buffer management is done. The
67 * total number of slots is based on the size of the atomic_t variable
68 * dma_buffer_busy(number of bits) within the structure msm_nand_chip.
69 */
70#define MSM_NAND_DMA_BUFFER_SLOT_SZ \
71 (MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8))
72
73/* ONFI(Open NAND Flash Interface) parameters */
74#define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800
75#define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000
76#define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d
77#define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d
78#define ONFI_PARAM_INFO_LENGTH 0x0200
79#define ONFI_PARAM_PAGE_LENGTH 0x0100
80#define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F
81#define FLASH_READ_ONFI_SIGNATURE_ADDRESS 0x20
82#define FLASH_READ_ONFI_PARAMETERS_COMMAND 0xEC
83#define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00
84#define FLASH_READ_DEVICE_ID_ADDRESS 0x00
85
86#define MSM_NAND_RESET_FLASH_STS 0x00000020
87#define MSM_NAND_RESET_READ_STS 0x000000C0
88
89/* QPIC NANDc (NAND Controller) Register Set */
90#define MSM_NAND_REG(info, off) (info->nand_phys + off)
91#define MSM_NAND_FLASH_CMD(info) MSM_NAND_REG(info, 0x30000)
92#define MSM_NAND_ADDR0(info) MSM_NAND_REG(info, 0x30004)
93#define MSM_NAND_ADDR1(info) MSM_NAND_REG(info, 0x30008)
94#define MSM_NAND_EXEC_CMD(info) MSM_NAND_REG(info, 0x30010)
95#define MSM_NAND_FLASH_STATUS(info) MSM_NAND_REG(info, 0x30014)
96#define FS_OP_ERR (1 << 4)
97#define FS_MPU_ERR (1 << 8)
98#define FS_DEVICE_STS_ERR (1 << 16)
99#define FS_DEVICE_WP (1 << 23)
100
101#define MSM_NAND_BUFFER_STATUS(info) MSM_NAND_REG(info, 0x30018)
102#define BS_UNCORRECTABLE_BIT (1 << 8)
103#define BS_CORRECTABLE_ERR_MSK 0x1F
104
105#define MSM_NAND_DEV0_CFG0(info) MSM_NAND_REG(info, 0x30020)
106#define DISABLE_STATUS_AFTER_WRITE 4
107#define CW_PER_PAGE 6
108#define UD_SIZE_BYTES 9
109#define SPARE_SIZE_BYTES 23
110#define NUM_ADDR_CYCLES 27
111
112#define MSM_NAND_DEV0_CFG1(info) MSM_NAND_REG(info, 0x30024)
113#define DEV0_CFG1_ECC_DISABLE 0
114#define WIDE_FLASH 1
115#define NAND_RECOVERY_CYCLES 2
116#define CS_ACTIVE_BSY 5
117#define BAD_BLOCK_BYTE_NUM 6
118#define BAD_BLOCK_IN_SPARE_AREA 16
119#define WR_RD_BSY_GAP 17
120#define ENABLE_BCH_ECC 27
121
122#define MSM_NAND_DEV0_ECC_CFG(info) MSM_NAND_REG(info, 0x30028)
123#define ECC_CFG_ECC_DISABLE 0
124#define ECC_SW_RESET 1
125#define ECC_MODE 4
126#define ECC_PARITY_SIZE_BYTES 8
127#define ECC_NUM_DATA_BYTES 16
128#define ECC_FORCE_CLK_OPEN 30
129
130#define MSM_NAND_READ_ID(info) MSM_NAND_REG(info, 0x30040)
131#define MSM_NAND_READ_STATUS(info) MSM_NAND_REG(info, 0x30044)
132#define MSM_NAND_DEV_CMD1(info) MSM_NAND_REG(info, 0x300A4)
133#define MSM_NAND_DEV_CMD_VLD(info) MSM_NAND_REG(info, 0x300AC)
134#define MSM_NAND_EBI2_ECC_BUF_CFG(info) MSM_NAND_REG(info, 0x300F0)
135#define MSM_NAND_ERASED_CW_DETECT_CFG(info) MSM_NAND_REG(info, 0x300E8)
136#define MSM_NAND_ERASED_CW_DETECT_STATUS(info) MSM_NAND_REG(info, 0x300EC)
137
138#define MSM_NAND_CTRL(info) MSM_NAND_REG(info, 0x30F00)
139#define BAM_MODE_EN 0
140
141#define MSM_NAND_READ_LOCATION_0(info) MSM_NAND_REG(info, 0x30F20)
142#define MSM_NAND_READ_LOCATION_1(info) MSM_NAND_REG(info, 0x30F24)
143
144/* device commands */
145#define MSM_NAND_CMD_PAGE_READ 0x32
146#define MSM_NAND_CMD_PAGE_READ_ECC 0x33
147#define MSM_NAND_CMD_PAGE_READ_ALL 0x34
148#define MSM_NAND_CMD_PRG_PAGE 0x36
149#define MSM_NAND_CMD_PRG_PAGE_ECC 0x37
150#define MSM_NAND_CMD_PRG_PAGE_ALL 0x39
151#define MSM_NAND_CMD_BLOCK_ERASE 0x3A
152#define MSM_NAND_CMD_FETCH_ID 0x0B
153
154/* Structure that defines a NAND SPS command element */
155struct msm_nand_sps_cmd {
156 struct sps_command_element ce;
157 uint32_t flags;
158};
159
160/*
161 * Structure that defines the NAND controller properties as per the
162 * NAND flash device/chip that is attached.
163 */
164struct msm_nand_chip {
165 struct device *dev;
166 /*
167 * DMA memory will be allocated only once during probe and this memory
168 * will be used by all NAND clients. This wait queue is needed to
169 * make the applications wait for DMA memory to be free'd when the
170 * complete memory is exhausted.
171 */
172 wait_queue_head_t dma_wait_queue;
173 atomic_t dma_buffer_busy;
174 uint8_t *dma_virt_addr;
175 dma_addr_t dma_phys_addr;
176 uint32_t ecc_parity_bytes;
177 uint32_t bch_caps; /* Controller BCH ECC capabilities */
178#define MSM_NAND_CAP_4_BIT_BCH (1 << 0)
179#define MSM_NAND_CAP_8_BIT_BCH (1 << 1)
180 uint32_t cw_size;
181 /* NANDc register configurations */
182 uint32_t cfg0, cfg1, cfg0_raw, cfg1_raw;
183 uint32_t ecc_buf_cfg;
184 uint32_t ecc_bch_cfg;
185};
186
187/* Structure that defines an SPS end point for a NANDc BAM pipe. */
188struct msm_nand_sps_endpt {
189 struct sps_pipe *handle;
190 struct sps_connect config;
191 struct sps_register_event event;
192 struct completion completion;
193};
194
195/*
196 * Structure that defines NANDc SPS data - BAM handle and an end point
197 * for each BAM pipe.
198 */
199struct msm_nand_sps_info {
200 uint32_t bam_handle;
201 struct msm_nand_sps_endpt data_prod;
202 struct msm_nand_sps_endpt data_cons;
203 struct msm_nand_sps_endpt cmd_pipe;
204};
205
206/*
207 * Structure that contains flash device information. This gets updated after
208 * the NAND flash device detection.
209 */
210struct flash_identification {
211 uint32_t flash_id;
212 uint32_t density;
213 uint32_t widebus;
214 uint32_t pagesize;
215 uint32_t blksize;
216 uint32_t oobsize;
217 uint32_t ecc_correctability;
218};
219
220/* Structure that defines NANDc private data. */
221struct msm_nand_info {
222 struct mtd_info mtd;
223 struct msm_nand_chip nand_chip;
224 struct msm_nand_sps_info sps;
225 unsigned long bam_phys;
226 unsigned long nand_phys;
227 void __iomem *bam_base;
228 int bam_irq;
229 /*
230 * This lock must be acquired before submitting any command or data
231 * descriptors to BAM pipes and must be held until all the submitted
232 * descriptors are processed.
233 *
234 * This is required to ensure that both command and descriptors are
235 * submitted atomically without interruption from other clients,
236 * when there are requests from more than client at any time.
237 * Othewise, data and command descriptors can be submitted out of
238 * order for a request which can cause data corruption.
239 */
240 struct mutex bam_lock;
241 struct flash_identification flash_dev;
242};
243
244/* Structure that defines an ONFI parameter page (512B) */
245struct onfi_param_page {
246 uint32_t parameter_page_signature;
247 uint16_t revision_number;
248 uint16_t features_supported;
249 uint16_t optional_commands_supported;
250 uint8_t reserved0[22];
251 uint8_t device_manufacturer[12];
252 uint8_t device_model[20];
253 uint8_t jedec_manufacturer_id;
254 uint16_t date_code;
255 uint8_t reserved1[13];
256 uint32_t number_of_data_bytes_per_page;
257 uint16_t number_of_spare_bytes_per_page;
258 uint32_t number_of_data_bytes_per_partial_page;
259 uint16_t number_of_spare_bytes_per_partial_page;
260 uint32_t number_of_pages_per_block;
261 uint32_t number_of_blocks_per_logical_unit;
262 uint8_t number_of_logical_units;
263 uint8_t number_of_address_cycles;
264 uint8_t number_of_bits_per_cell;
265 uint16_t maximum_bad_blocks_per_logical_unit;
266 uint16_t block_endurance;
267 uint8_t guaranteed_valid_begin_blocks;
268 uint16_t guaranteed_valid_begin_blocks_endurance;
269 uint8_t number_of_programs_per_page;
270 uint8_t partial_program_attributes;
271 uint8_t number_of_bits_ecc_correctability;
272 uint8_t number_of_interleaved_address_bits;
273 uint8_t interleaved_operation_attributes;
274 uint8_t reserved2[13];
275 uint8_t io_pin_capacitance;
276 uint16_t timing_mode_support;
277 uint16_t program_cache_timing_mode_support;
278 uint16_t maximum_page_programming_time;
279 uint16_t maximum_block_erase_time;
280 uint16_t maximum_page_read_time;
281 uint16_t maximum_change_column_setup_time;
282 uint8_t reserved3[23];
283 uint16_t vendor_specific_revision_number;
284 uint8_t vendor_specific[88];
285 uint16_t integrity_crc;
286} __attribute__((__packed__));
287
288/*
289 * Get the DMA memory for requested amount of size. It returns the pointer
290 * to free memory available from the allocated pool. Returns NULL if there
291 * is no free memory.
292 */
293static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size)
294{
295 uint32_t bitmask, free_bitmask, old_bitmask;
296 uint32_t need_mask, current_need_mask;
297 int free_index;
298
299 need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
300 - 1;
301 bitmask = atomic_read(&chip->dma_buffer_busy);
302 free_bitmask = ~bitmask;
303 do {
304 free_index = __ffs(free_bitmask);
305 current_need_mask = need_mask << free_index;
306
307 if (size + free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ >=
308 MSM_NAND_DMA_BUFFER_SIZE)
309 return NULL;
310
311 if ((bitmask & current_need_mask) == 0) {
312 old_bitmask =
313 atomic_cmpxchg(&chip->dma_buffer_busy,
314 bitmask,
315 bitmask | current_need_mask);
316 if (old_bitmask == bitmask)
317 return chip->dma_virt_addr +
318 free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ;
319 free_bitmask = 0;/* force return */
320 }
321 /* current free range was too small, clear all free bits */
322 /* below the top busy bit within current_need_mask */
323 free_bitmask &=
324 ~(~0U >> (32 - fls(bitmask & current_need_mask)));
325 } while (free_bitmask);
326
327 return NULL;
328}
329
330/*
331 * Releases the DMA memory used to the free pool and also wakes up any user
332 * thread waiting on wait queue for free memory to be available.
333 */
334static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip,
335 void *buffer, size_t size)
336{
337 int index;
338 uint32_t used_mask;
339
340 used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
341 - 1;
342 index = ((uint8_t *)buffer - chip->dma_virt_addr) /
343 MSM_NAND_DMA_BUFFER_SLOT_SZ;
344 atomic_sub(used_mask << index, &chip->dma_buffer_busy);
345
346 wake_up(&chip->dma_wait_queue);
347}
348
349/*
350 * Calculates page address of the buffer passed, offset of buffer within
351 * that page and then maps it for DMA by calling dma_map_page().
352 */
353static dma_addr_t msm_nand_dma_map(struct device *dev, void *addr, size_t size,
354 enum dma_data_direction dir)
355{
356 struct page *page;
357 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
358 if (virt_addr_valid(addr))
359 page = virt_to_page(addr);
360 else {
361 if (WARN_ON(size + offset > PAGE_SIZE))
362 return ~0;
363 page = vmalloc_to_page(addr);
364 }
365 return dma_map_page(dev, page, offset, size, dir);
366}
367
368/*
369 * Wrapper function to prepare a SPS command element with the data that is
370 * passed to this function.
371 *
372 * Since for any command element it is a must to have this flag
373 * SPS_IOVEC_FLAG_CMD, this function by default updates this flag for a
374 * command element that is passed and thus, the caller need not explicilty
375 * pass this flag. The other flags must be passed based on the need. If a
376 * command element doesn't have any other flag, then 0 can be passed to flags.
377 */
378static inline void msm_nand_prep_ce(struct msm_nand_sps_cmd *sps_cmd,
379 uint32_t addr, uint32_t command,
380 uint32_t data, uint32_t flags)
381{
382 struct sps_command_element *cmd = &sps_cmd->ce;
383
384 cmd->addr = addr;
385 cmd->command = (command & WRITE) ? (uint32_t) SPS_WRITE_COMMAND :
386 (uint32_t) SPS_READ_COMMAND;
387 cmd->data = data;
388 cmd->mask = 0xFFFFFFFF;
389 sps_cmd->flags = SPS_IOVEC_FLAG_CMD | flags;
390}
391
392/*
393 * Read a single NANDc register as mentioned by its parameter addr. The return
394 * value indicates whether read is successful or not. The register value read
395 * is stored in val.
396 */
397static int msm_nand_flash_rd_reg(struct msm_nand_info *info, uint32_t addr,
398 uint32_t *val)
399{
400 int ret = 0;
401 struct msm_nand_sps_cmd *cmd;
402 struct msm_nand_chip *chip = &info->nand_chip;
403 struct {
404 struct msm_nand_sps_cmd cmd;
405 uint32_t data;
406 } *dma_buffer;
407
408 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
409 chip, sizeof(*dma_buffer))));
410 cmd = &dma_buffer->cmd;
411 msm_nand_prep_ce(cmd, addr, READ, msm_virt_to_dma(chip,
412 &dma_buffer->data), SPS_IOVEC_FLAG_INT);
413
414 ret = sps_transfer_one(info->sps.cmd_pipe.handle,
415 msm_virt_to_dma(chip, &cmd->ce),
416 sizeof(struct sps_command_element), NULL, cmd->flags);
417 if (ret) {
418 pr_err("failed to submit command %x ret %d\n", addr, ret);
419 goto out;
420 }
421 wait_for_completion_io(&info->sps.cmd_pipe.completion);
422 *val = dma_buffer->data;
423out:
424 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
425 return ret;
426}
427
428/*
429 * Read the Flash ID from the Nand Flash Device. The return value < 0
430 * indicates failure. When successful, the Flash ID is stored in parameter
431 * read_id.
432 */
433static int msm_nand_flash_read_id(struct msm_nand_info *info,
434 bool read_onfi_signature,
435 uint32_t *read_id)
436{
437 int err = 0, i;
438 struct msm_nand_sps_cmd *cmd;
439 struct sps_iovec *iovec;
440 struct msm_nand_chip *chip = &info->nand_chip;
441 uint32_t total_cnt = 4;
442 /*
443 * The following 4 commands are required to read id -
444 * write commands - addr0, flash, exec
445 * read_commands - read_id
446 */
447 struct {
448 struct sps_transfer xfer;
449 struct sps_iovec cmd_iovec[total_cnt];
450 struct msm_nand_sps_cmd cmd[total_cnt];
451 uint32_t data[total_cnt];
452 } *dma_buffer;
453
454 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
455 (chip, sizeof(*dma_buffer))));
456 if (read_onfi_signature)
457 dma_buffer->data[0] = FLASH_READ_ONFI_SIGNATURE_ADDRESS;
458 else
459 dma_buffer->data[0] = FLASH_READ_DEVICE_ID_ADDRESS;
460
461 dma_buffer->data[1] = MSM_NAND_CMD_FETCH_ID;
462 dma_buffer->data[2] = 1;
463 dma_buffer->data[3] = 0xeeeeeeee;
464
465 cmd = dma_buffer->cmd;
466 msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE,
467 dma_buffer->data[0], SPS_IOVEC_FLAG_LOCK);
468 cmd++;
469
470 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE,
471 dma_buffer->data[1], 0);
472 cmd++;
473
474 msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
475 dma_buffer->data[2], SPS_IOVEC_FLAG_NWD);
476 cmd++;
477
478 msm_nand_prep_ce(cmd, MSM_NAND_READ_ID(info), READ,
479 msm_virt_to_dma(chip, &dma_buffer->data[3]),
480 SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
481 cmd++;
482
483 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
484 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
485 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
486 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
487 &dma_buffer->cmd_iovec);
488 iovec = dma_buffer->xfer.iovec;
489
490 for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
491 iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
492 iovec->size = sizeof(struct sps_command_element);
493 iovec->flags = dma_buffer->cmd[i].flags;
494 iovec++;
495 }
496
497 mutex_lock(&info->bam_lock);
498 err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
499 if (err) {
500 pr_err("Failed to submit commands %d\n", err);
501 mutex_unlock(&info->bam_lock);
502 goto out;
503 }
504 wait_for_completion_io(&info->sps.cmd_pipe.completion);
505 mutex_unlock(&info->bam_lock);
506
507 pr_debug("Read ID register value 0x%x\n", dma_buffer->data[3]);
508 if (!read_onfi_signature)
509 pr_debug("nandid: %x maker %02x device %02x\n",
510 dma_buffer->data[3], dma_buffer->data[3] & 0xff,
511 (dma_buffer->data[3] >> 8) & 0xff);
512 *read_id = dma_buffer->data[3];
513out:
514 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
515 return err;
516}
517
518/*
519 * Contains data for common configuration registers that must be programmed
520 * for every NANDc operation.
521 */
522struct msm_nand_common_cfgs {
523 uint32_t cmd;
524 uint32_t addr0;
525 uint32_t addr1;
526 uint32_t cfg0;
527 uint32_t cfg1;
528};
529
530/*
531 * Function to prepare SPS command elements to write into NANDc configuration
532 * registers as per the data defined in struct msm_nand_common_cfgs. This is
533 * required for the following NANDc operations - Erase, Bad Block checking
534 * and for reading ONFI parameter page.
535 */
536static void msm_nand_prep_cfg_cmd_desc(struct msm_nand_info *info,
537 struct msm_nand_common_cfgs data,
538 struct msm_nand_sps_cmd **curr_cmd)
539{
540 struct msm_nand_sps_cmd *cmd;
541
542 cmd = *curr_cmd;
543 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE, data.cmd,
544 SPS_IOVEC_FLAG_LOCK);
545 cmd++;
546
547 msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE, data.addr0, 0);
548 cmd++;
549
550 msm_nand_prep_ce(cmd, MSM_NAND_ADDR1(info), WRITE, data.addr1, 0);
551 cmd++;
552
553 msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG0(info), WRITE, data.cfg0, 0);
554 cmd++;
555
556 msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG1(info), WRITE, data.cfg1, 0);
557 cmd++;
558 *curr_cmd = cmd;
559}
560
561/*
562 * Function to check the CRC integrity check on ONFI parameter page read.
563 * For ONFI parameter page read, the controller ECC will be disabled. Hence,
564 * it is mandatory to manually compute CRC and check it against the value
565 * stored within ONFI page.
566 */
567static uint16_t msm_nand_flash_onfi_crc_check(uint8_t *buffer, uint16_t count)
568{
569 int i;
570 uint16_t result;
571
572 for (i = 0; i < count; i++)
573 buffer[i] = bitrev8(buffer[i]);
574
575 result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count));
576
577 for (i = 0; i < count; i++)
578 buffer[i] = bitrev8(buffer[i]);
579
580 return result;
581}
582
583/*
584 * Structure that contains NANDc register data for commands required
585 * for reading ONFI paramter page.
586 */
587struct msm_nand_flash_onfi_data {
588 struct msm_nand_common_cfgs cfg;
589 uint32_t exec;
590 uint32_t devcmd1_orig;
591 uint32_t devcmdvld_orig;
592 uint32_t devcmd1_mod;
593 uint32_t devcmdvld_mod;
594 uint32_t ecc_bch_cfg;
595};
596
597/*
598 * Function to identify whether the attached NAND flash device is
599 * complaint to ONFI spec or not. If yes, then it reads the ONFI parameter
600 * page to get the device parameters.
601 */
602static int msm_nand_flash_onfi_probe(struct msm_nand_info *info)
603{
604 struct msm_nand_chip *chip = &info->nand_chip;
605 struct flash_identification *flash = &info->flash_dev;
606 uint32_t crc_chk_count = 0, page_address = 0;
607 int ret = 0, i;
608
609 /* SPS parameters */
610 struct msm_nand_sps_cmd *cmd, *curr_cmd;
611 struct sps_iovec *iovec;
612 uint32_t rdata;
613
614 /* ONFI Identifier/Parameter Page parameters */
615 uint8_t *onfi_param_info_buf = NULL;
616 dma_addr_t dma_addr_param_info = 0;
617 struct onfi_param_page *onfi_param_page_ptr;
618 struct msm_nand_flash_onfi_data data;
619 uint32_t onfi_signature;
620
621 /* SPS command/data descriptors */
622 uint32_t total_cnt = 13;
623 /*
624 * The following 13 commands are required to get onfi parameters -
625 * flash, addr0, addr1, cfg0, cfg1, dev0_ecc_cfg, cmd_vld, dev_cmd1,
626 * read_loc_0, exec, flash_status (read cmd), dev_cmd1, cmd_vld.
627 */
628 struct {
629 struct sps_transfer xfer;
630 struct sps_iovec cmd_iovec[total_cnt];
631 struct msm_nand_sps_cmd cmd[total_cnt];
632 uint32_t flash_status;
633 } *dma_buffer;
634
635 wait_event(chip->dma_wait_queue, (onfi_param_info_buf =
636 msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH)));
637 dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf);
638
639 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
640 (chip, sizeof(*dma_buffer))));
641
642 ret = msm_nand_flash_read_id(info, 1, &onfi_signature);
643 if (ret < 0) {
644 pr_err("Failed to read ONFI signature\n");
645 goto free_dma;
646 }
647 if (onfi_signature != ONFI_PARAMETER_PAGE_SIGNATURE) {
648 pr_info("Found a non ONFI device\n");
649 ret = -EIO;
650 goto free_dma;
651 }
652
653 memset(&data, 0, sizeof(struct msm_nand_flash_onfi_data));
654 ret = msm_nand_flash_rd_reg(info, MSM_NAND_DEV_CMD1(info),
655 &data.devcmd1_orig);
656 if (ret < 0)
657 goto free_dma;
658 ret = msm_nand_flash_rd_reg(info, MSM_NAND_DEV_CMD_VLD(info),
659 &data.devcmdvld_orig);
660 if (ret < 0)
661 goto free_dma;
662
663 data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
664 data.exec = 1;
665 data.cfg.addr0 = (page_address << 16) |
666 FLASH_READ_ONFI_PARAMETERS_ADDRESS;
667 data.cfg.addr1 = (page_address >> 16) & 0xFF;
668 data.cfg.cfg0 = MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO;
669 data.cfg.cfg1 = MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO;
670 data.devcmd1_mod = (data.devcmd1_orig & 0xFFFFFF00) |
671 FLASH_READ_ONFI_PARAMETERS_COMMAND;
672 data.devcmdvld_mod = data.devcmdvld_orig & 0xFFFFFFFE;
673 data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
674 dma_buffer->flash_status = 0xeeeeeeee;
675
676 curr_cmd = cmd = dma_buffer->cmd;
677 msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
678
679 cmd = curr_cmd;
680 msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
681 data.ecc_bch_cfg, 0);
682 cmd++;
683
684 msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD_VLD(info), WRITE,
685 data.devcmdvld_mod, 0);
686 cmd++;
687
688 msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD1(info), WRITE,
689 data.devcmd1_mod, 0);
690 cmd++;
691
692 rdata = (0 << 0) | (ONFI_PARAM_INFO_LENGTH << 16) | (1 << 31);
693 msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
694 rdata, 0);
695 cmd++;
696
697 msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
698 data.exec, SPS_IOVEC_FLAG_NWD);
699 cmd++;
700
701 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
702 msm_virt_to_dma(chip, &dma_buffer->flash_status), 0);
703 cmd++;
704
705 msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD1(info), WRITE,
706 data.devcmd1_orig, 0);
707 cmd++;
708
709 msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD_VLD(info), WRITE,
710 data.devcmdvld_orig,
711 SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
712 cmd++;
713
714 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
715 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
716 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
717 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
718 &dma_buffer->cmd_iovec);
719 iovec = dma_buffer->xfer.iovec;
720
721 for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
722 iovec->addr = msm_virt_to_dma(chip,
723 &dma_buffer->cmd[i].ce);
724 iovec->size = sizeof(struct sps_command_element);
725 iovec->flags = dma_buffer->cmd[i].flags;
726 iovec++;
727 }
728 mutex_lock(&info->bam_lock);
729 /* Submit data descriptor */
730 ret = sps_transfer_one(info->sps.data_prod.handle, dma_addr_param_info,
731 ONFI_PARAM_INFO_LENGTH, NULL, SPS_IOVEC_FLAG_INT);
732 if (ret) {
733 pr_err("Failed to submit data descriptors %d\n", ret);
734 mutex_unlock(&info->bam_lock);
735 goto free_dma;
736 }
737 /* Submit command descriptors */
738 ret = sps_transfer(info->sps.cmd_pipe.handle,
739 &dma_buffer->xfer);
740 if (ret) {
741 pr_err("Failed to submit commands %d\n", ret);
742 mutex_unlock(&info->bam_lock);
743 goto free_dma;
744 }
745 wait_for_completion_io(&info->sps.cmd_pipe.completion);
746 wait_for_completion_io(&info->sps.data_prod.completion);
747 mutex_unlock(&info->bam_lock);
748
749 /* Check for flash status errors */
750 if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
751 pr_err("MPU/OP err (0x%x) is set\n", dma_buffer->flash_status);
752 ret = -EIO;
753 goto free_dma;
754 }
755
756 for (crc_chk_count = 0; crc_chk_count < ONFI_PARAM_INFO_LENGTH
757 / ONFI_PARAM_PAGE_LENGTH; crc_chk_count++) {
758 onfi_param_page_ptr =
759 (struct onfi_param_page *)
760 (&(onfi_param_info_buf
761 [ONFI_PARAM_PAGE_LENGTH *
762 crc_chk_count]));
763 if (msm_nand_flash_onfi_crc_check(
764 (uint8_t *)onfi_param_page_ptr,
765 ONFI_PARAM_PAGE_LENGTH - 2) ==
766 onfi_param_page_ptr->integrity_crc) {
767 break;
768 }
769 }
770 if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH
771 / ONFI_PARAM_PAGE_LENGTH) {
772 pr_err("CRC Check failed on param page\n");
773 ret = -EIO;
774 goto free_dma;
775 }
776 ret = msm_nand_flash_read_id(info, 0, &flash->flash_id);
777 if (ret < 0) {
778 pr_err("Failed to read flash ID\n");
779 goto free_dma;
780 }
781 flash->widebus = onfi_param_page_ptr->features_supported & 0x01;
782 flash->pagesize = onfi_param_page_ptr->number_of_data_bytes_per_page;
783 flash->blksize = onfi_param_page_ptr->number_of_pages_per_block *
784 flash->pagesize;
785 flash->oobsize = onfi_param_page_ptr->number_of_spare_bytes_per_page;
786 flash->density = onfi_param_page_ptr->number_of_blocks_per_logical_unit
787 * flash->blksize;
788 flash->ecc_correctability = onfi_param_page_ptr->
789 number_of_bits_ecc_correctability;
790
791 pr_info("Found an ONFI compliant device %s\n",
792 onfi_param_page_ptr->device_model);
793 /*
794 * Temporary hack for MT29F4G08ABC device.
795 * Since the device is not properly adhering
796 * to ONFi specification it is reporting
797 * as 16 bit device though it is 8 bit device!!!
798 */
799 if (!strncmp(onfi_param_page_ptr->device_model, "MT29F4G08ABC", 12))
800 flash->widebus = 0;
801free_dma:
802 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
803 msm_nand_release_dma_buffer(chip, onfi_param_info_buf,
804 ONFI_PARAM_INFO_LENGTH);
805 return ret;
806}
807
808/*
809 * Structure that contains read/write parameters required for reading/writing
810 * from/to a page.
811 */
812struct msm_nand_rw_params {
813 uint32_t page;
814 uint32_t page_count;
815 uint32_t sectordatasize;
816 uint32_t sectoroobsize;
817 uint32_t cwperpage;
818 uint32_t oob_len_cmd;
819 uint32_t oob_len_data;
820 uint32_t start_sector;
821 uint32_t oob_col;
822 dma_addr_t data_dma_addr;
823 dma_addr_t oob_dma_addr;
824 dma_addr_t data_dma_addr_curr;
825 dma_addr_t oob_dma_addr_curr;
826 bool read;
827};
828
829/*
830 * Structure that contains NANDc register data required for reading/writing
831 * from/to a page.
832 */
833struct msm_nand_rw_reg_data {
834 uint32_t cmd;
835 uint32_t addr0;
836 uint32_t addr1;
837 uint32_t cfg0;
838 uint32_t cfg1;
839 uint32_t ecc_bch_cfg;
840 uint32_t exec;
841 uint32_t ecc_cfg;
842 uint32_t clrfstatus;
843 uint32_t clrrstatus;
844};
845
846/*
847 * Function that validates page read/write MTD parameters received from upper
848 * layers such as MTD/YAFFS2 and returns error for any unsupported operations
849 * by the driver. In case of success, it also maps the data and oob buffer
850 * received for DMA.
851 */
852static int msm_nand_validate_mtd_params(struct mtd_info *mtd, bool read,
853 loff_t offset,
854 struct mtd_oob_ops *ops,
855 struct msm_nand_rw_params *args)
856{
857 struct msm_nand_info *info = mtd->priv;
858 struct msm_nand_chip *chip = &info->nand_chip;
859 int err = 0;
860
861 pr_debug("========================================================\n");
862 pr_debug("offset 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x\n",
863 offset, ops->mode, ops->datbuf, ops->len);
864 pr_debug("oobbuf 0x%p ooblen 0x%x\n", ops->oobbuf, ops->ooblen);
865
866 if (ops->mode == MTD_OPS_PLACE_OOB) {
867 pr_err("MTD_OPS_PLACE_OOB is not supported\n");
868 err = -EINVAL;
869 goto out;
870 }
871
872 if (mtd->writesize == PAGE_SIZE_2K)
873 args->page = offset >> 11;
874
875 if (mtd->writesize == PAGE_SIZE_4K)
876 args->page = offset >> 12;
877
878 args->oob_len_cmd = ops->ooblen;
879 args->oob_len_data = ops->ooblen;
880 args->cwperpage = (mtd->writesize >> 9);
881 args->read = (read ? true : false);
882
883 if (offset & (mtd->writesize - 1)) {
884 pr_err("unsupported offset 0x%llx\n", offset);
885 err = -EINVAL;
886 goto out;
887 }
888
889 if (!read && !ops->datbuf) {
890 pr_err("No data buffer provided for write!!\n");
891 err = -EINVAL;
892 goto out;
893 }
894
895 if (ops->mode == MTD_OPS_RAW) {
896 if (!ops->datbuf) {
897 pr_err("No data buffer provided for RAW mode\n");
898 err = -EINVAL;
899 goto out;
900 } else if ((ops->len % (mtd->writesize +
901 mtd->oobsize)) != 0) {
902 pr_err("unsupported data len %d for RAW mode\n",
903 ops->len);
904 err = -EINVAL;
905 goto out;
906 }
907 args->page_count = ops->len / (mtd->writesize + mtd->oobsize);
908
909 } else if (ops->mode == MTD_OPS_AUTO_OOB) {
910 if (ops->datbuf && (ops->len % mtd->writesize) != 0) {
911 /* when ops->datbuf is NULL, ops->len can be ooblen */
912 pr_err("unsupported data len %d for AUTO mode\n",
913 ops->len);
914 err = -EINVAL;
915 goto out;
916 }
917 if (read && ops->oobbuf && !ops->datbuf) {
918 args->start_sector = args->cwperpage - 1;
919 args->page_count = ops->ooblen / mtd->oobavail;
920 if ((args->page_count == 0) && (ops->ooblen))
921 args->page_count = 1;
922 } else if (ops->datbuf) {
923 args->page_count = ops->len / mtd->writesize;
924 }
925 }
926
927 if (ops->datbuf) {
928 args->data_dma_addr_curr = args->data_dma_addr =
929 msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
930 (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
931 if (dma_mapping_error(chip->dev, args->data_dma_addr)) {
932 pr_err("dma mapping failed for 0x%p\n", ops->datbuf);
933 err = -EIO;
934 goto out;
935 }
936 }
937 if (ops->oobbuf) {
938 if (read)
939 memset(ops->oobbuf, 0xFF, ops->ooblen);
940 args->oob_dma_addr_curr = args->oob_dma_addr =
941 msm_nand_dma_map(chip->dev, ops->oobbuf, ops->ooblen,
942 (read ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE));
943 if (dma_mapping_error(chip->dev, args->oob_dma_addr)) {
944 pr_err("dma mapping failed for 0x%p\n", ops->oobbuf);
945 err = -EIO;
946 goto dma_map_oobbuf_failed;
947 }
948 }
949 goto out;
950dma_map_oobbuf_failed:
951 if (ops->datbuf)
952 dma_unmap_page(chip->dev, args->data_dma_addr, ops->len,
953 (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
954out:
955 return err;
956}
957
958/*
959 * Function that updates NANDc register data (struct msm_nand_rw_reg_data)
960 * required for page read/write.
961 */
962static void msm_nand_update_rw_reg_data(struct msm_nand_chip *chip,
963 struct mtd_oob_ops *ops,
964 struct msm_nand_rw_params *args,
965 struct msm_nand_rw_reg_data *data)
966{
967 if (args->read) {
968 if (ops->mode != MTD_OPS_RAW) {
969 data->cmd = MSM_NAND_CMD_PAGE_READ_ECC;
970 data->cfg0 =
971 (chip->cfg0 & ~(7U << CW_PER_PAGE)) |
972 (((args->cwperpage-1) - args->start_sector)
973 << CW_PER_PAGE);
974 data->cfg1 = chip->cfg1;
975 data->ecc_bch_cfg = chip->ecc_bch_cfg;
976 } else {
977 data->cmd = MSM_NAND_CMD_PAGE_READ_ALL;
978 data->cfg0 = chip->cfg0_raw;
979 data->cfg1 = chip->cfg1_raw;
980 data->ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
981 }
982
983 } else {
984 if (ops->mode != MTD_OPS_RAW) {
985 data->cfg0 = chip->cfg0;
986 data->cfg1 = chip->cfg1;
987 data->ecc_bch_cfg = chip->ecc_bch_cfg;
988 } else {
989 data->cfg0 = chip->cfg0_raw;
990 data->cfg1 = chip->cfg1_raw;
991 data->ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
992 }
993 data->cmd = MSM_NAND_CMD_PRG_PAGE;
994 data->clrfstatus = MSM_NAND_RESET_FLASH_STS;
995 data->clrrstatus = MSM_NAND_RESET_READ_STS;
996 }
997 data->exec = 1;
998 data->ecc_cfg = chip->ecc_buf_cfg;
999}
1000
1001/*
1002 * Function to prepare series of SPS command descriptors required for a page
1003 * read/write operation.
1004 */
1005static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops,
1006 struct msm_nand_rw_params *args,
1007 struct msm_nand_rw_reg_data *data,
1008 struct msm_nand_info *info,
1009 uint32_t curr_cw,
1010 struct msm_nand_sps_cmd **curr_cmd)
1011{
1012 struct msm_nand_chip *chip = &info->nand_chip;
1013 struct msm_nand_sps_cmd *cmd;
1014 uint32_t rdata;
1015 /* read_location register parameters */
1016 uint32_t offset, size, last_read;
1017
1018 cmd = *curr_cmd;
1019 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE, data->cmd,
1020 ((curr_cw == args->start_sector) ?
1021 SPS_IOVEC_FLAG_LOCK : 0));
1022 cmd++;
1023
1024 if (curr_cw == args->start_sector) {
1025 msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE,
1026 data->addr0, 0);
1027 cmd++;
1028
1029 msm_nand_prep_ce(cmd, MSM_NAND_ADDR1(info), WRITE,
1030 data->addr1, 0);
1031 cmd++;
1032
1033 msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG0(info), WRITE,
1034 data->cfg0, 0);
1035 cmd++;
1036
1037 msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG1(info), WRITE,
1038 data->cfg1, 0);
1039 cmd++;
1040
1041 msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
1042 data->ecc_bch_cfg, 0);
1043 cmd++;
1044
1045 msm_nand_prep_ce(cmd, MSM_NAND_EBI2_ECC_BUF_CFG(info),
1046 WRITE, data->ecc_cfg, 0);
1047 cmd++;
1048 }
1049
1050 if (!args->read)
1051 goto sub_exec_cmd;
1052
1053 if (ops->mode == MTD_OPS_RAW) {
1054 rdata = (0 << 0) | (chip->cw_size << 16) | (1 << 31);
1055 msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
1056 rdata, 0);
1057 cmd++;
1058 }
1059 if (ops->mode == MTD_OPS_AUTO_OOB && ops->datbuf) {
1060 offset = 0;
1061 size = (curr_cw < (args->cwperpage - 1)) ? 516 :
1062 (512 - ((args->cwperpage - 1) << 2));
1063 last_read = (curr_cw < (args->cwperpage - 1)) ? 1 :
1064 (ops->oobbuf ? 0 : 1);
1065 rdata = (offset << 0) | (size << 16) | (last_read << 31);
1066 msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
1067 rdata, 0);
1068 cmd++;
1069 }
1070 if (ops->mode == MTD_OPS_AUTO_OOB && ops->oobbuf
1071 && (curr_cw == (args->cwperpage - 1))) {
1072 offset = 512 - ((args->cwperpage - 1) << 2);
1073 size = (args->cwperpage) << 2;
1074 if (size > args->oob_len_cmd)
1075 size = args->oob_len_cmd;
1076 args->oob_len_cmd -= size;
1077 last_read = 1;
1078 rdata = (offset << 0) | (size << 16) | (last_read << 31);
1079 if (ops->datbuf) {
1080 msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_1(info),
1081 WRITE, rdata, 0);
1082 } else {
1083 msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info),
1084 WRITE, rdata, 0);
1085 }
1086 cmd++;
1087 }
1088sub_exec_cmd:
1089 msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data->exec,
1090 SPS_IOVEC_FLAG_NWD);
1091 cmd++;
1092 *curr_cmd = cmd;
1093}
1094
1095/*
1096 * Function to prepare and submit SPS data descriptors required for a page
1097 * read/write operation.
1098 */
1099static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops,
1100 struct msm_nand_rw_params *args,
1101 struct msm_nand_info *info,
1102 uint32_t curr_cw)
1103{
1104 struct msm_nand_chip *chip = &info->nand_chip;
1105 struct sps_pipe *data_pipe_handle;
1106 uint32_t sectordatasize, sectoroobsize;
1107 uint32_t sps_flags = 0;
1108 int err = 0;
1109
1110 if (args->read)
1111 data_pipe_handle = info->sps.data_prod.handle;
1112 else
1113 data_pipe_handle = info->sps.data_cons.handle;
1114
1115 if (ops->mode == MTD_OPS_RAW) {
1116 sectordatasize = chip->cw_size;
1117 if (!args->read)
1118 sps_flags = SPS_IOVEC_FLAG_EOT;
1119 if (curr_cw == (args->cwperpage - 1))
1120 sps_flags |= SPS_IOVEC_FLAG_INT;
1121
1122 err = sps_transfer_one(data_pipe_handle,
1123 args->data_dma_addr_curr,
1124 sectordatasize, NULL,
1125 sps_flags);
1126 if (err)
1127 goto out;
1128 args->data_dma_addr_curr += sectordatasize;
1129
1130 } else if (ops->mode == MTD_OPS_AUTO_OOB) {
1131 if (ops->datbuf) {
1132 sectordatasize = (curr_cw < (args->cwperpage - 1))
1133 ? 516 : (512 - ((args->cwperpage - 1) << 2));
1134
1135 if (!args->read) {
1136 sps_flags = SPS_IOVEC_FLAG_EOT;
1137 if (curr_cw == (args->cwperpage - 1) &&
1138 ops->oobbuf)
1139 sps_flags = 0;
1140 }
1141 if ((curr_cw == (args->cwperpage - 1)) && !ops->oobbuf)
1142 sps_flags |= SPS_IOVEC_FLAG_INT;
1143
1144 err = sps_transfer_one(data_pipe_handle,
1145 args->data_dma_addr_curr,
1146 sectordatasize, NULL,
1147 sps_flags);
1148 if (err)
1149 goto out;
1150 args->data_dma_addr_curr += sectordatasize;
1151 }
1152
1153 if (ops->oobbuf && (curr_cw == (args->cwperpage - 1))) {
1154 sectoroobsize = args->cwperpage << 2;
1155 if (sectoroobsize > args->oob_len_data)
1156 sectoroobsize = args->oob_len_data;
1157
1158 if (!args->read)
1159 sps_flags |= SPS_IOVEC_FLAG_EOT;
1160 sps_flags |= SPS_IOVEC_FLAG_INT;
1161 err = sps_transfer_one(data_pipe_handle,
1162 args->oob_dma_addr_curr,
1163 sectoroobsize, NULL,
1164 sps_flags);
1165 if (err)
1166 goto out;
1167 args->oob_dma_addr_curr += sectoroobsize;
1168 args->oob_len_data -= sectoroobsize;
1169 }
1170 }
1171out:
1172 return err;
1173}
1174
1175/*
1176 * Function that gets called from upper layers such as MTD/YAFFS2 to read a
1177 * page with main or/and spare data.
1178 */
1179static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from,
1180 struct mtd_oob_ops *ops)
1181{
1182 struct msm_nand_info *info = mtd->priv;
1183 struct msm_nand_chip *chip = &info->nand_chip;
1184 uint32_t cwperpage = (mtd->writesize >> 9);
1185 int err, pageerr = 0, rawerr = 0;
1186 uint32_t n = 0, pages_read = 0;
1187 uint32_t ecc_errors = 0, total_ecc_errors = 0;
1188 struct msm_nand_rw_params rw_params;
1189 struct msm_nand_rw_reg_data data;
1190 struct msm_nand_sps_cmd *cmd, *curr_cmd;
1191 struct sps_iovec *iovec;
1192 /*
1193 * The following 6 commands will be sent only once for the first
1194 * codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
1195 * dev0_ecc_cfg, ebi2_ecc_buf_cfg. The following 6 commands will
1196 * be sent for every CW - flash, read_location_0, read_location_1,
1197 * exec, flash_status and buffer_status.
1198 */
1199 uint32_t total_cnt = (6 * cwperpage) + 6;
1200 struct {
1201 struct sps_transfer xfer;
1202 struct sps_iovec cmd_iovec[total_cnt];
1203 struct msm_nand_sps_cmd cmd[total_cnt];
1204 struct {
1205 uint32_t flash_status;
1206 uint32_t buffer_status;
1207 } result[cwperpage];
1208 } *dma_buffer;
1209
1210 memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
1211 err = msm_nand_validate_mtd_params(mtd, true, from, ops, &rw_params);
1212 if (err)
1213 goto validate_mtd_params_failed;
1214
1215 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
1216 chip, sizeof(*dma_buffer))));
1217
1218 rw_params.oob_col = rw_params.start_sector * chip->cw_size;
1219 if (chip->cfg1 & (1 << WIDE_FLASH))
1220 rw_params.oob_col >>= 1;
1221
1222 memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
1223 msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
1224
1225 while (rw_params.page_count-- > 0) {
1226 data.addr0 = (rw_params.page << 16) | rw_params.oob_col;
1227 data.addr1 = (rw_params.page >> 16) & 0xff;
1228 cmd = dma_buffer->cmd;
1229 for (n = rw_params.start_sector; n < cwperpage; n++) {
1230 dma_buffer->result[n].flash_status = 0xeeeeeeee;
1231 dma_buffer->result[n].buffer_status = 0xeeeeeeee;
1232
1233 curr_cmd = cmd;
1234 msm_nand_prep_rw_cmd_desc(ops, &rw_params,
1235 &data, info, n, &curr_cmd);
1236
1237 cmd = curr_cmd;
1238 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
1239 READ, msm_virt_to_dma(chip,
1240 &dma_buffer->result[n].flash_status), 0);
1241 cmd++;
1242
1243 msm_nand_prep_ce(cmd, MSM_NAND_BUFFER_STATUS(info),
1244 READ, msm_virt_to_dma(chip,
1245 &dma_buffer->result[n].buffer_status),
1246 ((n == (cwperpage - 1)) ?
1247 (SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT) :
1248 0));
1249 cmd++;
1250 }
1251
1252 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
1253 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
1254 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
1255 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
1256 &dma_buffer->cmd_iovec);
1257 iovec = dma_buffer->xfer.iovec;
1258
1259 for (n = 0; n < dma_buffer->xfer.iovec_count; n++) {
1260 iovec->addr = msm_virt_to_dma(chip,
1261 &dma_buffer->cmd[n].ce);
1262 iovec->size = sizeof(struct sps_command_element);
1263 iovec->flags = dma_buffer->cmd[n].flags;
1264 iovec++;
1265 }
1266 mutex_lock(&info->bam_lock);
1267 /* Submit data descriptors */
1268 for (n = rw_params.start_sector; n < cwperpage; n++) {
1269 err = msm_nand_submit_rw_data_desc(ops,
1270 &rw_params, info, n);
1271 if (err) {
1272 pr_err("Failed to submit data descs %d\n", err);
1273 mutex_unlock(&info->bam_lock);
1274 goto free_dma;
1275 }
1276 }
1277 /* Submit command descriptors */
1278 err = sps_transfer(info->sps.cmd_pipe.handle,
1279 &dma_buffer->xfer);
1280 if (err) {
1281 pr_err("Failed to submit commands %d\n", err);
1282 mutex_unlock(&info->bam_lock);
1283 goto free_dma;
1284 }
1285 wait_for_completion_io(&info->sps.cmd_pipe.completion);
1286 wait_for_completion_io(&info->sps.data_prod.completion);
1287 mutex_unlock(&info->bam_lock);
1288 /* Check for flash status errors */
1289 pageerr = rawerr = 0;
1290 for (n = rw_params.start_sector; n < cwperpage; n++) {
1291 if (dma_buffer->result[n].flash_status & (FS_OP_ERR |
1292 FS_MPU_ERR)) {
1293 rawerr = -EIO;
1294 break;
1295 }
1296 }
1297 /* Check for ECC correction on empty block */
1298 if (rawerr && ops->datbuf && ops->mode != MTD_OPS_RAW) {
1299 uint8_t *datbuf = ops->datbuf +
1300 pages_read * mtd->writesize;
1301
1302 dma_sync_single_for_cpu(chip->dev,
1303 rw_params.data_dma_addr_curr - mtd->writesize,
1304 mtd->writesize, DMA_BIDIRECTIONAL);
1305
1306 for (n = 0; n < mtd->writesize; n++) {
1307 /* TODO: check offset for 4bit BCHECC */
1308 if ((n % 516 == 3 || n % 516 == 175)
1309 && datbuf[n] == 0x54)
1310 datbuf[n] = 0xff;
1311 if (datbuf[n] != 0xff) {
1312 pageerr = rawerr;
1313 break;
1314 }
1315 }
1316
1317 dma_sync_single_for_device(chip->dev,
1318 rw_params.data_dma_addr_curr - mtd->writesize,
1319 mtd->writesize, DMA_BIDIRECTIONAL);
1320 }
1321 if (rawerr && ops->oobbuf) {
1322 dma_sync_single_for_cpu(chip->dev,
1323 rw_params.oob_dma_addr_curr - (ops->ooblen -
1324 rw_params.oob_len_data),
1325 ops->ooblen - rw_params.oob_len_data,
1326 DMA_BIDIRECTIONAL);
1327
1328 for (n = 0; n < ops->ooblen; n++) {
1329 if (ops->oobbuf[n] != 0xff) {
1330 pageerr = rawerr;
1331 break;
1332 }
1333 }
1334
1335 dma_sync_single_for_device(chip->dev,
1336 rw_params.oob_dma_addr_curr - (ops->ooblen -
1337 rw_params.oob_len_data),
1338 ops->ooblen - rw_params.oob_len_data,
1339 DMA_BIDIRECTIONAL);
1340 }
1341 /* check for uncorrectable errors */
1342 if (pageerr) {
1343 for (n = rw_params.start_sector; n < cwperpage; n++) {
1344 if (dma_buffer->result[n].buffer_status &
1345 BS_UNCORRECTABLE_BIT) {
1346 mtd->ecc_stats.failed++;
1347 pageerr = -EBADMSG;
1348 break;
1349 }
1350 }
1351 }
1352 /* check for correctable errors */
1353 if (!rawerr) {
1354 for (n = rw_params.start_sector; n < cwperpage; n++) {
1355 ecc_errors =
1356 dma_buffer->result[n].buffer_status
1357 & BS_CORRECTABLE_ERR_MSK;
1358 if (ecc_errors) {
1359 total_ecc_errors += ecc_errors;
1360 mtd->ecc_stats.corrected += ecc_errors;
1361 /*
1362 * For Micron devices it is observed
1363 * that correctable errors upto 3 bits
1364 * are very common.
1365 */
1366 if (ecc_errors > 3)
1367 pageerr = -EUCLEAN;
1368 }
1369 }
1370 }
1371 if (pageerr && (pageerr != -EUCLEAN || err == 0))
1372 err = pageerr;
1373
1374 if (rawerr && !pageerr) {
1375 pr_debug("%llx %x %x empty page\n",
1376 (loff_t)rw_params.page * mtd->writesize,
1377 ops->len, ops->ooblen);
1378 } else {
1379 for (n = rw_params.start_sector; n < cwperpage; n++)
1380 pr_debug("cw %d: flash_sts %x buffr_sts %x\n",
1381 n, dma_buffer->result[n].flash_status,
1382 dma_buffer->result[n].buffer_status);
1383 }
1384 if (err && err != -EUCLEAN && err != -EBADMSG)
1385 goto free_dma;
1386 pages_read++;
1387 rw_params.page++;
1388 }
1389free_dma:
1390 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
1391 if (ops->oobbuf)
1392 dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
1393 ops->ooblen, DMA_FROM_DEVICE);
1394 if (ops->datbuf)
1395 dma_unmap_page(chip->dev, rw_params.data_dma_addr,
1396 ops->len, DMA_BIDIRECTIONAL);
1397validate_mtd_params_failed:
1398 if (ops->mode != MTD_OPS_RAW)
1399 ops->retlen = mtd->writesize * pages_read;
1400 else
1401 ops->retlen = (mtd->writesize + mtd->oobsize) * pages_read;
1402 ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
1403 if (err)
1404 pr_err("0x%llx datalen 0x%x ooblen %x err %d corrected %d\n",
1405 from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
1406 total_ecc_errors);
1407 pr_debug("ret %d, retlen %d oobretlen %d\n",
1408 err, ops->retlen, ops->oobretlen);
1409
1410 pr_debug("========================================================\n");
1411 return err;
1412}
1413
1414/*
1415 * Function that gets called from upper layers such as MTD/YAFFS2 to read a
1416 * page with only main data.
1417 */
1418static int msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1419 size_t *retlen, u_char *buf)
1420{
1421 int ret;
1422 struct mtd_oob_ops ops;
1423
1424 ops.mode = MTD_OPS_PLACE_OOB;
1425 ops.len = len;
1426 ops.retlen = 0;
1427 ops.ooblen = 0;
1428 ops.datbuf = buf;
1429 ops.oobbuf = NULL;
1430 ret = msm_nand_read_oob(mtd, from, &ops);
1431 *retlen = ops.retlen;
1432 return ret;
1433}
1434
1435/*
1436 * Function that gets called from upper layers such as MTD/YAFFS2 to write a
1437 * page with both main and spare data.
1438 */
1439static int msm_nand_write_oob(struct mtd_info *mtd, loff_t to,
1440 struct mtd_oob_ops *ops)
1441{
1442 struct msm_nand_info *info = mtd->priv;
1443 struct msm_nand_chip *chip = &info->nand_chip;
1444 uint32_t cwperpage = (mtd->writesize >> 9);
1445 uint32_t n, flash_sts, pages_written = 0;
1446 int err = 0;
1447 struct msm_nand_rw_params rw_params;
1448 struct msm_nand_rw_reg_data data;
1449 struct msm_nand_sps_cmd *cmd, *curr_cmd;
1450 struct sps_iovec *iovec;
1451 /*
1452 * The following 7 commands will be sent only once :
1453 * For first codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
1454 * dev0_ecc_cfg, ebi2_ecc_buf_cfg.
1455 * For last codeword (CW) - read_status(write)
1456 *
1457 * The following 4 commands will be sent for every CW :
1458 * flash, exec, flash_status (read), flash_status (write).
1459 */
1460 uint32_t total_cnt = (4 * cwperpage) + 7;
1461 struct {
1462 struct sps_transfer xfer;
1463 struct sps_iovec cmd_iovec[total_cnt];
1464 struct msm_nand_sps_cmd cmd[total_cnt];
1465 struct {
1466 uint32_t flash_status[cwperpage];
1467 } data;
1468 } *dma_buffer;
1469
1470 memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
1471 err = msm_nand_validate_mtd_params(mtd, false, to, ops, &rw_params);
1472 if (err)
1473 goto validate_mtd_params_failed;
1474
1475 wait_event(chip->dma_wait_queue, (dma_buffer =
1476 msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
1477
1478 memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
1479 msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
1480
1481 while (rw_params.page_count-- > 0) {
1482 data.addr0 = (rw_params.page << 16);
1483 data.addr1 = (rw_params.page >> 16) & 0xff;
1484 cmd = dma_buffer->cmd;
1485
1486 for (n = 0; n < cwperpage ; n++) {
1487 dma_buffer->data.flash_status[n] = 0xeeeeeeee;
1488
1489 curr_cmd = cmd;
1490 msm_nand_prep_rw_cmd_desc(ops, &rw_params,
1491 &data, info, n, &curr_cmd);
1492
1493 cmd = curr_cmd;
1494 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
1495 READ, msm_virt_to_dma(chip,
1496 &dma_buffer->data.flash_status[n]), 0);
1497 cmd++;
1498
1499 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
1500 WRITE, data.clrfstatus, 0);
1501 cmd++;
1502
1503 if (n == (cwperpage - 1)) {
1504 msm_nand_prep_ce(cmd,
1505 MSM_NAND_READ_STATUS(info), WRITE,
1506 data.clrrstatus, SPS_IOVEC_FLAG_UNLOCK
1507 | SPS_IOVEC_FLAG_INT);
1508 cmd++;
1509 }
1510 }
1511
1512 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
1513 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
1514 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
1515 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
1516 &dma_buffer->cmd_iovec);
1517 iovec = dma_buffer->xfer.iovec;
1518
1519 for (n = 0; n < dma_buffer->xfer.iovec_count; n++) {
1520 iovec->addr = msm_virt_to_dma(chip,
1521 &dma_buffer->cmd[n].ce);
1522 iovec->size = sizeof(struct sps_command_element);
1523 iovec->flags = dma_buffer->cmd[n].flags;
1524 iovec++;
1525 }
1526 mutex_lock(&info->bam_lock);
1527 /* Submit data descriptors */
1528 for (n = 0; n < cwperpage; n++) {
1529 err = msm_nand_submit_rw_data_desc(ops,
1530 &rw_params, info, n);
1531 if (err) {
1532 pr_err("Failed to submit data descs %d\n", err);
1533 mutex_unlock(&info->bam_lock);
1534 goto free_dma;
1535 }
1536 }
1537 /* Submit command descriptors */
1538 err = sps_transfer(info->sps.cmd_pipe.handle,
1539 &dma_buffer->xfer);
1540 if (err) {
1541 pr_err("Failed to submit commands %d\n", err);
1542 mutex_unlock(&info->bam_lock);
1543 goto free_dma;
1544 }
1545 wait_for_completion_io(&info->sps.cmd_pipe.completion);
1546 wait_for_completion_io(&info->sps.data_cons.completion);
1547 mutex_unlock(&info->bam_lock);
1548
1549 for (n = 0; n < cwperpage; n++)
1550 pr_debug("write pg %d: flash_status[%d] = %x\n",
1551 rw_params.page, n,
1552 dma_buffer->data.flash_status[n]);
1553
1554 /* Check for flash status errors */
1555 for (n = 0; n < cwperpage; n++) {
1556 flash_sts = dma_buffer->data.flash_status[n];
1557 if (flash_sts & (FS_OP_ERR | FS_MPU_ERR)) {
1558 pr_err("MPU/OP err (0x%x) set\n", flash_sts);
1559 err = -EIO;
1560 goto free_dma;
1561 }
1562 if (n == (cwperpage - 1)) {
1563 if (!(flash_sts & FS_DEVICE_WP) ||
1564 (flash_sts & FS_DEVICE_STS_ERR)) {
1565 pr_err("Dev sts err 0x%x\n", flash_sts);
1566 err = -EIO;
1567 goto free_dma;
1568 }
1569 }
1570 }
1571 pages_written++;
1572 rw_params.page++;
1573 }
1574free_dma:
1575 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
1576 if (ops->oobbuf)
1577 dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
1578 ops->ooblen, DMA_TO_DEVICE);
1579 if (ops->datbuf)
1580 dma_unmap_page(chip->dev, rw_params.data_dma_addr,
1581 ops->len, DMA_TO_DEVICE);
1582validate_mtd_params_failed:
1583 if (ops->mode != MTD_OPS_RAW)
1584 ops->retlen = mtd->writesize * pages_written;
1585 else
1586 ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
1587
1588 ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
1589 if (err)
1590 pr_err("to %llx datalen %x ooblen %x failed with err %d\n",
1591 to, ops->len, ops->ooblen, err);
1592 pr_debug("ret %d, retlen %d oobretlen %d\n",
1593 err, ops->retlen, ops->oobretlen);
1594
1595 pr_debug("================================================\n");
1596 return err;
1597}
1598
1599/*
1600 * Function that gets called from upper layers such as MTD/YAFFS2 to write a
1601 * page with only main data.
1602 */
1603static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
1604 size_t *retlen, const u_char *buf)
1605{
1606 int ret;
1607 struct mtd_oob_ops ops;
1608
1609 ops.mode = MTD_OPS_PLACE_OOB;
1610 ops.len = len;
1611 ops.retlen = 0;
1612 ops.ooblen = 0;
1613 ops.datbuf = (uint8_t *)buf;
1614 ops.oobbuf = NULL;
1615 ret = msm_nand_write_oob(mtd, to, &ops);
1616 *retlen = ops.retlen;
1617 return ret;
1618}
1619
1620/*
1621 * Structure that contains NANDc register data for commands required
1622 * for Erase operation.
1623 */
1624struct msm_nand_erase_reg_data {
1625 struct msm_nand_common_cfgs cfg;
1626 uint32_t exec;
1627 uint32_t flash_status;
1628 uint32_t clrfstatus;
1629 uint32_t clrrstatus;
1630};
1631
1632/*
1633 * Function that gets called from upper layers such as MTD/YAFFS2 to erase a
1634 * block within NAND device.
1635 */
1636static int msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
1637{
1638 int i, err = 0;
1639 struct msm_nand_info *info = mtd->priv;
1640 struct msm_nand_chip *chip = &info->nand_chip;
1641 uint32_t page = 0;
1642 struct msm_nand_sps_cmd *cmd, *curr_cmd;
1643 struct msm_nand_erase_reg_data data;
1644 struct sps_iovec *iovec;
1645 uint32_t total_cnt = 9;
1646 /*
1647 * The following 9 commands are required to erase a page -
1648 * flash, addr0, addr1, cfg0, cfg1, exec, flash_status(read),
1649 * flash_status(write), read_status.
1650 */
1651 struct {
1652 struct sps_transfer xfer;
1653 struct sps_iovec cmd_iovec[total_cnt];
1654 struct msm_nand_sps_cmd cmd[total_cnt];
1655 uint32_t flash_status;
1656 } *dma_buffer;
1657
1658 if (mtd->writesize == PAGE_SIZE_2K)
1659 page = instr->addr >> 11;
1660
1661 if (mtd->writesize == PAGE_SIZE_4K)
1662 page = instr->addr >> 12;
1663
1664 if (instr->addr & (mtd->erasesize - 1)) {
1665 pr_err("unsupported erase address, 0x%llx\n", instr->addr);
1666 err = -EINVAL;
1667 goto out;
1668 }
1669 if (instr->len != mtd->erasesize) {
1670 pr_err("unsupported erase len, %lld\n", instr->len);
1671 err = -EINVAL;
1672 goto out;
1673 }
1674
1675 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
1676 chip, sizeof(*dma_buffer))));
1677 cmd = dma_buffer->cmd;
1678
1679 memset(&data, 0, sizeof(struct msm_nand_erase_reg_data));
1680 data.cfg.cmd = MSM_NAND_CMD_BLOCK_ERASE;
1681 data.cfg.addr0 = page;
1682 data.cfg.addr1 = 0;
1683 data.cfg.cfg0 = chip->cfg0 & (~(7 << CW_PER_PAGE));
1684 data.cfg.cfg1 = chip->cfg1;
1685 data.exec = 1;
1686 dma_buffer->flash_status = 0xeeeeeeee;
1687 data.clrfstatus = MSM_NAND_RESET_FLASH_STS;
1688 data.clrrstatus = MSM_NAND_RESET_READ_STS;
1689
1690 curr_cmd = cmd;
1691 msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
1692
1693 cmd = curr_cmd;
1694 msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data.exec,
1695 SPS_IOVEC_FLAG_NWD);
1696 cmd++;
1697
1698 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
1699 msm_virt_to_dma(chip, &dma_buffer->flash_status), 0);
1700 cmd++;
1701
1702 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), WRITE,
1703 data.clrfstatus, 0);
1704 cmd++;
1705
1706 msm_nand_prep_ce(cmd, MSM_NAND_READ_STATUS(info), WRITE,
1707 data.clrrstatus,
1708 SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
1709 cmd++;
1710
1711 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
1712 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
1713 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
1714 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
1715 &dma_buffer->cmd_iovec);
1716 iovec = dma_buffer->xfer.iovec;
1717
1718 for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
1719 iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
1720 iovec->size = sizeof(struct sps_command_element);
1721 iovec->flags = dma_buffer->cmd[i].flags;
1722 iovec++;
1723 }
1724 mutex_lock(&info->bam_lock);
1725 err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
1726 if (err) {
1727 pr_err("Failed to submit commands %d\n", err);
1728 mutex_unlock(&info->bam_lock);
1729 goto free_dma;
1730 }
1731 wait_for_completion_io(&info->sps.cmd_pipe.completion);
1732 mutex_unlock(&info->bam_lock);
1733
1734 /* Check for flash status errors */
1735 if (dma_buffer->flash_status & (FS_OP_ERR |
1736 FS_MPU_ERR | FS_DEVICE_STS_ERR)) {
1737 pr_err("MPU/OP/DEV err (0x%x) set\n", dma_buffer->flash_status);
1738 err = -EIO;
1739 }
1740 if (!(dma_buffer->flash_status & FS_DEVICE_WP)) {
1741 pr_err("Device is write protected\n");
1742 err = -EIO;
1743 }
1744 if (err) {
1745 pr_err("Erase failed, 0x%llx\n", instr->addr);
1746 instr->fail_addr = instr->addr;
1747 instr->state = MTD_ERASE_FAILED;
1748 } else {
1749 instr->state = MTD_ERASE_DONE;
1750 instr->fail_addr = 0xffffffff;
1751 mtd_erase_callback(instr);
1752 }
1753free_dma:
1754 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
1755out:
1756 return err;
1757}
1758
1759/*
1760 * Structure that contains NANDc register data for commands required
1761 * for checking if a block is bad.
1762 */
1763struct msm_nand_blk_isbad_data {
1764 struct msm_nand_common_cfgs cfg;
1765 uint32_t ecc_bch_cfg;
1766 uint32_t exec;
1767 uint32_t read_offset;
1768};
1769
1770/*
1771 * Function that gets called from upper layers such as MTD/YAFFS2 to check if
1772 * a block is bad. This is done by reading the first page within a block and
1773 * checking whether the bad block byte location contains 0xFF or not. If it
1774 * doesn't contain 0xFF, then it is considered as bad block.
1775 */
1776static int msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
1777{
1778 struct msm_nand_info *info = mtd->priv;
1779 struct msm_nand_chip *chip = &info->nand_chip;
1780 int i, ret = 0, bad_block = 0;
1781 uint8_t *buf;
1782 uint32_t page = 0, rdata, cwperpage;
1783 struct msm_nand_sps_cmd *cmd, *curr_cmd;
1784 struct msm_nand_blk_isbad_data data;
1785 struct sps_iovec *iovec;
1786 uint32_t total_cnt = 9;
1787 /*
1788 * The following 9 commands are required to check bad block -
1789 * flash, addr0, addr1, cfg0, cfg1, ecc_cfg, read_loc_0,
1790 * exec, flash_status(read).
1791 */
1792 struct {
1793 struct sps_transfer xfer;
1794 struct sps_iovec cmd_iovec[total_cnt];
1795 struct msm_nand_sps_cmd cmd[total_cnt];
1796 uint32_t flash_status;
1797 } *dma_buffer;
1798
1799 if (mtd->writesize == PAGE_SIZE_2K)
1800 page = ofs >> 11;
1801
1802 if (mtd->writesize == PAGE_SIZE_4K)
1803 page = ofs >> 12;
1804
1805 cwperpage = (mtd->writesize >> 9);
1806
1807 if (ofs > mtd->size) {
1808 pr_err("Invalid offset 0x%llx\n", ofs);
1809 bad_block = -EINVAL;
1810 goto out;
1811 }
1812 if (ofs & (mtd->erasesize - 1)) {
1813 pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
1814 bad_block = -EINVAL;
1815 goto out;
1816 }
1817
1818 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
1819 chip , sizeof(*dma_buffer) + 4)));
1820 buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
1821
1822 cmd = dma_buffer->cmd;
1823 memset(&data, 0, sizeof(struct msm_nand_erase_reg_data));
1824 data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
1825 data.cfg.cfg0 = chip->cfg0_raw & ~(7U << CW_PER_PAGE);
1826 data.cfg.cfg1 = chip->cfg1_raw;
1827
1828 if (chip->cfg1 & (1 << WIDE_FLASH))
1829 data.cfg.addr0 = (page << 16) |
1830 ((chip->cw_size * (cwperpage-1)) >> 1);
1831 else
1832 data.cfg.addr0 = (page << 16) |
1833 (chip->cw_size * (cwperpage-1));
1834
1835 data.cfg.addr1 = (page >> 16) & 0xff;
1836 data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
1837 data.exec = 1;
1838 data.read_offset = (mtd->writesize - (chip->cw_size * (cwperpage-1)));
1839 dma_buffer->flash_status = 0xeeeeeeee;
1840
1841 curr_cmd = cmd;
1842 msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
1843
1844 cmd = curr_cmd;
1845 msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
1846 data.ecc_bch_cfg, 0);
1847 cmd++;
1848
1849 rdata = (data.read_offset << 0) | (4 << 16) | (1 << 31);
1850 msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE, rdata, 0);
1851 cmd++;
1852
1853 msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
1854 data.exec, SPS_IOVEC_FLAG_NWD);
1855 cmd++;
1856
1857 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
1858 msm_virt_to_dma(chip, &dma_buffer->flash_status),
1859 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_UNLOCK);
1860 cmd++;
1861
1862 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
1863 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
1864 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
1865 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
1866 &dma_buffer->cmd_iovec);
1867 iovec = dma_buffer->xfer.iovec;
1868
1869 for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
1870 iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
1871 iovec->size = sizeof(struct sps_command_element);
1872 iovec->flags = dma_buffer->cmd[i].flags;
1873 iovec++;
1874 }
1875 mutex_lock(&info->bam_lock);
1876 /* Submit data descriptor */
1877 ret = sps_transfer_one(info->sps.data_prod.handle,
1878 msm_virt_to_dma(chip, buf),
1879 4, NULL, SPS_IOVEC_FLAG_INT);
1880
1881 if (ret) {
1882 pr_err("Failed to submit data desc %d\n", ret);
1883 mutex_unlock(&info->bam_lock);
1884 goto free_dma;
1885 }
1886 /* Submit command descriptor */
1887 ret = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
1888 if (ret) {
1889 pr_err("Failed to submit commands %d\n", ret);
1890 mutex_unlock(&info->bam_lock);
1891 goto free_dma;
1892 }
1893 wait_for_completion_io(&info->sps.cmd_pipe.completion);
1894 wait_for_completion_io(&info->sps.data_prod.completion);
1895 mutex_unlock(&info->bam_lock);
1896
1897 /* Check for flash status errors */
1898 if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
1899 pr_err("MPU/OP err set: %x\n", dma_buffer->flash_status);
1900 bad_block = -EIO;
1901 goto free_dma;
1902 }
1903
1904 /* Check for bad block marker byte */
1905 if (chip->cfg1 & (1 << WIDE_FLASH)) {
1906 if (buf[0] != 0xFF || buf[1] != 0xFF)
1907 bad_block = 1;
1908 } else {
1909 if (buf[0] != 0xFF)
1910 bad_block = 1;
1911 }
1912free_dma:
1913 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4);
1914out:
1915 return ret ? ret : bad_block;
1916}
1917
1918/*
1919 * Function that gets called from upper layers such as MTD/YAFFS2 to mark a
1920 * block as bad. This is done by writing the first page within a block with 0,
1921 * thus setting the bad block byte location as well to 0.
1922 */
1923static int msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
1924{
1925 struct mtd_oob_ops ops;
1926 int ret;
1927 uint8_t *buf;
1928 size_t len;
1929
1930 if (ofs > mtd->size) {
1931 pr_err("Invalid offset 0x%llx\n", ofs);
1932 ret = -EINVAL;
1933 goto out;
1934 }
1935 if (ofs & (mtd->erasesize - 1)) {
1936 pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
1937 ret = -EINVAL;
1938 goto out;
1939 }
1940 len = mtd->writesize + mtd->oobsize;
1941 buf = kzalloc(len, GFP_KERNEL);
1942 if (!buf) {
1943 pr_err("unable to allocate memory for 0x%x size\n", len);
1944 ret = -ENOMEM;
1945 goto out;
1946 }
1947 ops.mode = MTD_OPS_RAW;
1948 ops.len = len;
1949 ops.retlen = 0;
1950 ops.ooblen = 0;
1951 ops.datbuf = buf;
1952 ops.oobbuf = NULL;
1953 ret = msm_nand_write_oob(mtd, ofs, &ops);
1954 kfree(buf);
1955out:
1956 return ret;
1957}
1958
1959/*
1960 * Function that scans for the attached NAND device. This fills out all
1961 * the uninitialized function pointers with the defaults. The flash ID is
1962 * read and the mtd/chip structures are filled with the appropriate values.
1963 */
1964int msm_nand_scan(struct mtd_info *mtd)
1965{
1966 struct msm_nand_info *info = mtd->priv;
1967 struct msm_nand_chip *chip = &info->nand_chip;
1968 struct flash_identification *supported_flash = &info->flash_dev;
1969 int flash_id = 0, err = 0;
1970 uint32_t i, mtd_writesize;
1971 uint8_t dev_found = 0, wide_bus;
1972 uint32_t manid, devid, devcfg;
1973 uint32_t bad_block_byte;
1974 struct nand_flash_dev *flashdev = NULL;
1975 struct nand_manufacturers *flashman = NULL;
1976
1977 /* Probe the Flash device for ONFI compliance */
1978 if (!msm_nand_flash_onfi_probe(info)) {
1979 dev_found = 1;
1980 } else {
1981 err = msm_nand_flash_read_id(info, 0, &flash_id);
1982 if (err < 0) {
1983 pr_err("Failed to read Flash ID\n");
1984 err = -EINVAL;
1985 goto out;
1986 }
1987 manid = flash_id & 0xFF;
1988 devid = (flash_id >> 8) & 0xFF;
1989 devcfg = (flash_id >> 24) & 0xFF;
1990
1991 for (i = 0; !flashman && nand_manuf_ids[i].id; ++i)
1992 if (nand_manuf_ids[i].id == manid)
1993 flashman = &nand_manuf_ids[i];
1994 for (i = 0; !flashdev && nand_flash_ids[i].id; ++i)
1995 if (nand_flash_ids[i].id == devid)
1996 flashdev = &nand_flash_ids[i];
1997 if (!flashdev || !flashman) {
1998 pr_err("unknown nand flashid=%x manuf=%x devid=%x\n",
1999 flash_id, manid, devid);
2000 err = -ENOENT;
2001 goto out;
2002 }
2003 dev_found = 1;
2004 if (!flashdev->pagesize) {
2005 supported_flash->widebus = devcfg & (1 << 6) ? 1 : 0;
2006 supported_flash->pagesize = 1024 << (devcfg & 0x3);
2007 supported_flash->blksize = (64 * 1024) <<
2008 ((devcfg >> 4) & 0x3);
2009 supported_flash->oobsize = (8 << ((devcfg >> 2) & 1)) *
2010 (supported_flash->pagesize >> 9);
2011 } else {
2012 supported_flash->widebus = flashdev->options &
2013 NAND_BUSWIDTH_16 ? 1 : 0;
2014 supported_flash->pagesize = flashdev->pagesize;
2015 supported_flash->blksize = flashdev->erasesize;
2016 supported_flash->oobsize = flashdev->pagesize >> 5;
2017 }
2018 supported_flash->flash_id = flash_id;
2019 supported_flash->density = flashdev->chipsize << 20;
2020 }
2021
2022 if (dev_found) {
2023 wide_bus = supported_flash->widebus;
2024 mtd->size = supported_flash->density;
2025 mtd->writesize = supported_flash->pagesize;
2026 mtd->oobsize = supported_flash->oobsize;
2027 mtd->erasesize = supported_flash->blksize;
2028 mtd_writesize = mtd->writesize;
2029
2030 /* Check whether NAND device support 8bit ECC*/
2031 if (supported_flash->ecc_correctability >= 8)
2032 chip->bch_caps = MSM_NAND_CAP_8_BIT_BCH;
2033 else
2034 chip->bch_caps = MSM_NAND_CAP_4_BIT_BCH;
2035
2036 pr_info("NAND Id: 0x%x Buswidth: %dBits Density: %lld MByte\n",
2037 supported_flash->flash_id, (wide_bus) ? 16 : 8,
2038 (mtd->size >> 20));
2039 pr_info("pagesize: %d Erasesize: %d oobsize: %d (in Bytes)\n",
2040 mtd->writesize, mtd->erasesize, mtd->oobsize);
2041 pr_info("BCH ECC: %d Bit\n",
2042 (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH ? 8 : 4));
2043 }
2044
2045 chip->cw_size = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ? 532 : 528;
2046 chip->cfg0 = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
2047 | (516 << UD_SIZE_BYTES)
2048 | (0 << DISABLE_STATUS_AFTER_WRITE)
2049 | (5 << NUM_ADDR_CYCLES);
2050
2051 bad_block_byte = (mtd_writesize - (chip->cw_size * (
2052 (mtd_writesize >> 9) - 1)) + 1);
2053 chip->cfg1 = (7 << NAND_RECOVERY_CYCLES)
2054 | (0 << CS_ACTIVE_BSY)
2055 | (bad_block_byte << BAD_BLOCK_BYTE_NUM)
2056 | (0 << BAD_BLOCK_IN_SPARE_AREA)
2057 | (2 << WR_RD_BSY_GAP)
2058 | ((wide_bus ? 1 : 0) << WIDE_FLASH)
2059 | (1 << ENABLE_BCH_ECC);
2060
2061 chip->cfg0_raw = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
2062 | (5 << NUM_ADDR_CYCLES)
2063 | (0 << SPARE_SIZE_BYTES)
2064 | (chip->cw_size << UD_SIZE_BYTES);
2065
2066 chip->cfg1_raw = (7 << NAND_RECOVERY_CYCLES)
2067 | (0 << CS_ACTIVE_BSY)
2068 | (17 << BAD_BLOCK_BYTE_NUM)
2069 | (1 << BAD_BLOCK_IN_SPARE_AREA)
2070 | (2 << WR_RD_BSY_GAP)
2071 | ((wide_bus ? 1 : 0) << WIDE_FLASH)
2072 | (1 << DEV0_CFG1_ECC_DISABLE);
2073
2074 chip->ecc_bch_cfg = (0 << ECC_CFG_ECC_DISABLE)
2075 | (0 << ECC_SW_RESET)
2076 | (516 << ECC_NUM_DATA_BYTES)
2077 | (1 << ECC_FORCE_CLK_OPEN);
2078
2079 if (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) {
2080 chip->cfg0 |= (wide_bus ? 0 << SPARE_SIZE_BYTES :
2081 2 << SPARE_SIZE_BYTES);
2082 chip->ecc_bch_cfg |= (1 << ECC_MODE)
2083 | ((wide_bus) ? (14 << ECC_PARITY_SIZE_BYTES) :
2084 (13 << ECC_PARITY_SIZE_BYTES));
2085 } else {
2086 chip->cfg0 |= (wide_bus ? 2 << SPARE_SIZE_BYTES :
2087 4 << SPARE_SIZE_BYTES);
2088 chip->ecc_bch_cfg |= (0 << ECC_MODE)
2089 | ((wide_bus) ? (8 << ECC_PARITY_SIZE_BYTES) :
2090 (7 << ECC_PARITY_SIZE_BYTES));
2091 }
2092
2093 /*
2094 * For 4bit BCH ECC (default ECC), parity bytes = 7(x8) or 8(x16 I/O)
2095 * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O).
2096 */
2097 chip->ecc_parity_bytes = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ?
2098 (wide_bus ? 14 : 13) : (wide_bus ? 8 : 7);
2099 chip->ecc_buf_cfg = 0x203; /* No of bytes covered by ECC - 516 bytes */
2100
2101 pr_info("CFG0: 0x%08x, CFG1: 0x%08x\n"
2102 " RAWCFG0: 0x%08x, RAWCFG1: 0x%08x\n"
2103 " ECCBUFCFG: 0x%08x, ECCBCHCFG: 0x%08x\n"
2104 " BAD BLOCK BYTE: 0x%08x\n", chip->cfg0, chip->cfg1,
2105 chip->cfg0_raw, chip->cfg1_raw, chip->ecc_buf_cfg,
2106 chip->ecc_bch_cfg, bad_block_byte);
2107
2108 if (mtd->oobsize == 64) {
2109 mtd->oobavail = 16;
2110 } else if ((mtd->oobsize == 128) || (mtd->oobsize == 224)) {
2111 mtd->oobavail = 32;
2112 } else {
2113 pr_err("Unsupported NAND oobsize: 0x%x\n", mtd->oobsize);
2114 err = -ENODEV;
2115 goto out;
2116 }
2117
2118 /* Fill in remaining MTD driver data */
2119 mtd->type = MTD_NANDFLASH;
2120 mtd->flags = MTD_CAP_NANDFLASH;
2121 mtd->_erase = msm_nand_erase;
2122 mtd->_block_isbad = msm_nand_block_isbad;
2123 mtd->_block_markbad = msm_nand_block_markbad;
2124 mtd->_read = msm_nand_read;
2125 mtd->_write = msm_nand_write;
2126 mtd->_read_oob = msm_nand_read_oob;
2127 mtd->_write_oob = msm_nand_write_oob;
2128 mtd->owner = THIS_MODULE;
2129out:
2130 return err;
2131}
2132
2133#define BAM_APPS_PIPE_LOCK_GRP 0
2134/*
2135 * This function allocates, configures, connects an end point and
2136 * also registers event notification for an end point. It also allocates
2137 * DMA memory for descriptor FIFO of a pipe.
2138 */
2139static int msm_nand_init_endpoint(struct msm_nand_info *info,
2140 struct msm_nand_sps_endpt *end_point,
2141 uint32_t pipe_index)
2142{
2143 int rc = 0;
2144 struct sps_pipe *pipe_handle;
2145 struct sps_connect *sps_config = &end_point->config;
2146 struct sps_register_event *sps_event = &end_point->event;
2147
2148 pipe_handle = sps_alloc_endpoint();
2149 if (!pipe_handle) {
2150 pr_err("sps_alloc_endpoint() failed\n");
2151 rc = -ENOMEM;
2152 goto out;
2153 }
2154
2155 rc = sps_get_config(pipe_handle, sps_config);
2156 if (rc) {
2157 pr_err("sps_get_config() failed %d\n", rc);
2158 goto free_endpoint;
2159 }
2160
2161 if (pipe_index == SPS_DATA_PROD_PIPE_INDEX) {
2162 /* READ CASE: source - BAM; destination - system memory */
2163 sps_config->source = info->sps.bam_handle;
2164 sps_config->destination = SPS_DEV_HANDLE_MEM;
2165 sps_config->mode = SPS_MODE_SRC;
2166 sps_config->src_pipe_index = pipe_index;
2167 } else if (pipe_index == SPS_DATA_CONS_PIPE_INDEX ||
2168 pipe_index == SPS_CMD_CONS_PIPE_INDEX) {
2169 /* WRITE CASE: source - system memory; destination - BAM */
2170 sps_config->source = SPS_DEV_HANDLE_MEM;
2171 sps_config->destination = info->sps.bam_handle;
2172 sps_config->mode = SPS_MODE_DEST;
2173 sps_config->dest_pipe_index = pipe_index;
2174 }
2175
2176 sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
2177 sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP;
2178 /*
2179 * Descriptor FIFO is a cyclic FIFO. If SPS_MAX_DESC_NUM descriptors
2180 * are allowed to be submitted before we get any ack for any of them,
2181 * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
2182 * sizeof(struct sps_iovec).
2183 */
2184 sps_config->desc.size = (SPS_MAX_DESC_NUM + 1) *
2185 sizeof(struct sps_iovec);
2186 sps_config->desc.base = dmam_alloc_coherent(info->nand_chip.dev,
2187 sps_config->desc.size,
2188 &sps_config->desc.phys_base,
2189 GFP_KERNEL);
2190 if (!sps_config->desc.base) {
2191 pr_err("dmam_alloc_coherent() failed for size %x\n",
2192 sps_config->desc.size);
2193 rc = -ENOMEM;
2194 goto free_endpoint;
2195 }
2196 memset(sps_config->desc.base, 0x00, sps_config->desc.size);
2197
2198 rc = sps_connect(pipe_handle, sps_config);
2199 if (rc) {
2200 pr_err("sps_connect() failed %d\n", rc);
2201 goto free_endpoint;
2202 }
2203
2204 init_completion(&end_point->completion);
2205 sps_event->mode = SPS_TRIGGER_WAIT;
2206 sps_event->options = SPS_O_DESC_DONE;
2207 sps_event->xfer_done = &end_point->completion;
2208 sps_event->user = (void *)info;
2209
2210 rc = sps_register_event(pipe_handle, sps_event);
2211 if (rc) {
2212 pr_err("sps_register_event() failed %d\n", rc);
2213 goto sps_disconnect;
2214 }
2215 end_point->handle = pipe_handle;
2216 pr_debug("pipe handle 0x%x for pipe %d\n", (uint32_t)pipe_handle,
2217 pipe_index);
2218 goto out;
2219sps_disconnect:
2220 sps_disconnect(pipe_handle);
2221free_endpoint:
2222 sps_free_endpoint(pipe_handle);
2223out:
2224 return rc;
2225}
2226
2227/* This function disconnects and frees an end point */
2228static void msm_nand_deinit_endpoint(struct msm_nand_info *info,
2229 struct msm_nand_sps_endpt *end_point)
2230{
2231 sps_disconnect(end_point->handle);
2232 sps_free_endpoint(end_point->handle);
2233}
2234
2235/*
2236 * This function registers BAM device and initializes its end points for
2237 * the following pipes -
2238 * system consumer pipe for data (pipe#0),
2239 * system producer pipe for data (pipe#1),
2240 * system consumer pipe for commands (pipe#2).
2241 */
2242static int msm_nand_bam_init(struct msm_nand_info *nand_info)
2243{
2244 struct sps_bam_props bam = {0};
2245 int rc = 0;
2246
2247 bam.phys_addr = nand_info->bam_phys;
2248 bam.virt_addr = nand_info->bam_base;
2249 bam.irq = nand_info->bam_irq;
2250 /*
2251 * NAND device is accessible from both Apps and Modem processor and
2252 * thus, NANDc and BAM are shared between both the processors. But BAM
2253 * must be enabled and instantiated only once during boot up by
2254 * Trustzone before Modem/Apps is brought out from reset.
2255 *
2256 * This is indicated to SPS driver on Apps by marking flag
2257 * SPS_BAM_MGR_DEVICE_REMOTE. The following are the global
2258 * initializations that will be done by Trustzone - Execution
2259 * Environment, Pipes assignment to Apps/Modem, Pipe Super groups and
2260 * Descriptor summing threshold.
2261 *
2262 * NANDc BAM device supports 2 execution environments - Modem and Apps
2263 * and thus the flag SPS_BAM_MGR_MULTI_EE is set.
2264 */
2265 bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
2266
2267 rc = sps_register_bam_device(&bam, &nand_info->sps.bam_handle);
2268 if (rc) {
2269 pr_err("sps_register_bam_device() failed with %d\n", rc);
2270 goto out;
2271 }
2272 pr_info("BAM device registered: bam_handle 0x%x\n",
2273 nand_info->sps.bam_handle);
2274
2275 rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_prod,
2276 SPS_DATA_PROD_PIPE_INDEX);
2277 if (rc)
2278 goto unregister_bam;
2279
2280 rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_cons,
2281 SPS_DATA_CONS_PIPE_INDEX);
2282 if (rc)
2283 goto deinit_data_prod;
2284
2285 rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.cmd_pipe,
2286 SPS_CMD_CONS_PIPE_INDEX);
2287 if (rc)
2288 goto deinit_data_cons;
2289 goto out;
2290deinit_data_cons:
2291 msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
2292deinit_data_prod:
2293 msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
2294unregister_bam:
2295 sps_deregister_bam_device(nand_info->sps.bam_handle);
2296out:
2297 return rc;
2298}
2299
2300/*
2301 * This function de-registers BAM device, disconnects and frees its end points
2302 * for all the pipes.
2303 */
2304static void msm_nand_bam_free(struct msm_nand_info *nand_info)
2305{
2306 msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
2307 msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
2308 msm_nand_deinit_endpoint(nand_info, &nand_info->sps.cmd_pipe);
2309 sps_deregister_bam_device(nand_info->sps.bam_handle);
2310}
2311
2312/* This function enables DMA support for the NANDc in BAM mode. */
2313static int msm_nand_enable_dma(struct msm_nand_info *info)
2314{
2315 struct msm_nand_sps_cmd *sps_cmd;
2316 struct msm_nand_chip *chip = &info->nand_chip;
2317 int ret;
2318
2319 wait_event(chip->dma_wait_queue,
2320 (sps_cmd = msm_nand_get_dma_buffer(chip, sizeof(*sps_cmd))));
2321
2322 msm_nand_prep_ce(sps_cmd, MSM_NAND_CTRL(info), WRITE,
2323 (1 << BAM_MODE_EN), SPS_IOVEC_FLAG_INT);
2324
2325 ret = sps_transfer_one(info->sps.cmd_pipe.handle,
2326 msm_virt_to_dma(chip, &sps_cmd->ce),
2327 sizeof(struct sps_command_element), NULL,
2328 sps_cmd->flags);
2329 if (ret) {
2330 pr_err("Failed to submit command: %d\n", ret);
2331 goto out;
2332 }
2333 wait_for_completion_io(&info->sps.cmd_pipe.completion);
2334out:
2335 msm_nand_release_dma_buffer(chip, sps_cmd, sizeof(*sps_cmd));
2336 return ret;
2337
2338}
2339
2340/*
2341 * This function gets called when its device named msm-nand is added to
2342 * device tree .dts file with all its resources such as physical addresses
2343 * for NANDc and BAM, BAM IRQ.
2344 *
2345 * It also expects the NAND flash partition information to be passed in .dts
2346 * file so that it can parse the partitions by calling MTD function
2347 * mtd_device_parse_register().
2348 *
2349 */
2350static int __devinit msm_nand_probe(struct platform_device *pdev)
2351{
2352 struct msm_nand_info *info;
2353 struct resource *res;
Murali Palnatiab4c1442012-11-15 09:35:14 +05302354 int err;
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302355 struct device_node *pnode;
2356 struct mtd_part_parser_data parser_data;
2357
2358 if (!pdev->dev.of_node) {
2359 pr_err("No valid device tree info for NANDc\n");
2360 err = -ENODEV;
2361 goto out;
2362 }
2363
2364 /*
2365 * The partition information can also be passed from kernel command
2366 * line. Also, the MTD core layer supports adding the whole device as
2367 * one MTD device when no partition information is available at all.
2368 * Hence, do not bail out when partition information is not availabe
2369 * in device tree.
2370 */
2371 pnode = of_find_node_by_path("/qcom,mtd-partitions");
2372 if (!pnode)
2373 pr_info("No partition info available in device tree\n");
2374 info = devm_kzalloc(&pdev->dev, sizeof(struct msm_nand_info),
2375 GFP_KERNEL);
2376 if (!info) {
2377 pr_err("Unable to allocate memory for msm_nand_info\n");
2378 err = -ENOMEM;
2379 goto out;
2380 }
2381
2382 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2383 "nand_phys");
2384 if (!res || !res->start) {
2385 pr_err("NAND phys address range is not provided\n");
2386 err = -ENODEV;
2387 goto out;
2388 }
2389 info->nand_phys = res->start;
2390 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2391 "bam_phys");
2392 if (!res || !res->start) {
2393 pr_err("BAM phys address range is not provided\n");
2394 err = -ENODEV;
2395 goto out;
2396 }
2397 info->bam_phys = res->start;
2398 info->bam_base = devm_ioremap(&pdev->dev, res->start,
2399 resource_size(res));
2400 if (!info->bam_base) {
2401 pr_err("BAM ioremap() failed for addr 0x%x size 0x%x\n",
2402 res->start, resource_size(res));
2403 err = -ENOMEM;
2404 goto out;
2405 }
2406
2407 info->bam_irq = platform_get_irq_byname(pdev, "bam_irq");
2408 if (info->bam_irq < 0) {
2409 pr_err("BAM IRQ is not provided\n");
2410 err = -ENODEV;
2411 goto out;
2412 }
2413
2414 info->mtd.name = dev_name(&pdev->dev);
2415 info->mtd.priv = info;
2416 info->mtd.owner = THIS_MODULE;
2417 info->nand_chip.dev = &pdev->dev;
2418 init_waitqueue_head(&info->nand_chip.dma_wait_queue);
2419 mutex_init(&info->bam_lock);
2420
2421 info->nand_chip.dma_virt_addr =
2422 dmam_alloc_coherent(&pdev->dev, MSM_NAND_DMA_BUFFER_SIZE,
2423 &info->nand_chip.dma_phys_addr, GFP_KERNEL);
2424 if (!info->nand_chip.dma_virt_addr) {
2425 pr_err("No memory for DMA buffer size %x\n",
2426 MSM_NAND_DMA_BUFFER_SIZE);
2427 err = -ENOMEM;
2428 goto out;
2429 }
2430 err = msm_nand_bam_init(info);
2431 if (err) {
2432 pr_err("msm_nand_bam_init() failed %d\n", err);
2433 goto out;
2434 }
2435 err = msm_nand_enable_dma(info);
2436 if (err) {
2437 pr_err("Failed to enable DMA in NANDc\n");
2438 goto free_bam;
2439 }
2440 if (msm_nand_scan(&info->mtd)) {
2441 pr_err("No nand device found\n");
2442 err = -ENXIO;
2443 goto free_bam;
2444 }
2445 parser_data.of_node = pnode;
Murali Palnatiab4c1442012-11-15 09:35:14 +05302446 err = mtd_device_parse_register(&info->mtd, NULL, &parser_data,
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302447 NULL, 0);
Murali Palnatiab4c1442012-11-15 09:35:14 +05302448 if (err < 0) {
2449 pr_err("Unable to register MTD partitions %d\n", err);
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302450 goto free_bam;
2451 }
2452 dev_set_drvdata(&pdev->dev, info);
2453
2454 pr_info("NANDc phys addr 0x%lx, BAM phys addr 0x%lx, BAM IRQ %d\n",
2455 info->nand_phys, info->bam_phys, info->bam_irq);
2456 pr_info("Allocated DMA buffer at virt_addr 0x%p, phys_addr 0x%x\n",
2457 info->nand_chip.dma_virt_addr, info->nand_chip.dma_phys_addr);
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302458 goto out;
2459free_bam:
2460 msm_nand_bam_free(info);
2461out:
2462 return err;
2463}
2464
2465/*
2466 * Remove functionality that gets called when driver/device msm-nand
2467 * is removed.
2468 */
2469static int __devexit msm_nand_remove(struct platform_device *pdev)
2470{
2471 struct msm_nand_info *info = dev_get_drvdata(&pdev->dev);
2472
2473 dev_set_drvdata(&pdev->dev, NULL);
2474 if (info) {
2475 mtd_device_unregister(&info->mtd);
2476 msm_nand_bam_free(info);
2477 }
2478 return 0;
2479}
2480
2481#define DRIVER_NAME "msm_qpic_nand"
2482static const struct of_device_id msm_nand_match_table[] = {
2483 { .compatible = "qcom,msm-nand", },
2484 {},
2485};
2486static struct platform_driver msm_nand_driver = {
2487 .probe = msm_nand_probe,
2488 .remove = __devexit_p(msm_nand_remove),
2489 .driver = {
2490 .name = DRIVER_NAME,
2491 .of_match_table = msm_nand_match_table,
2492 },
2493};
2494
2495module_platform_driver(msm_nand_driver);
2496
2497MODULE_ALIAS(DRIVER_NAME);
2498MODULE_LICENSE("GPL v2");
2499MODULE_DESCRIPTION("MSM QPIC NAND flash driver");