blob: 570c2573763cbc386400fa014a8427fd90f3a376 [file] [log] [blame]
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05301/*
2 * Copyright (C) 2007 Google, Inc.
Pratibhasagar V0c9fe8b2013-03-04 11:21:56 +05303 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05304 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#define pr_fmt(fmt) "%s: " fmt, __func__
17
18#include <linux/slab.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/nand.h>
23#include <linux/mtd/partitions.h>
24#include <linux/platform_device.h>
25#include <linux/dma-mapping.h>
26#include <linux/io.h>
27#include <linux/crc16.h>
28#include <linux/bitrev.h>
29#include <linux/mutex.h>
30#include <linux/of.h>
Pratibhasagar Ve3b57de2013-03-20 18:17:05 +053031#include <linux/ctype.h>
Sahitya Tummalaf57ae882012-04-02 13:53:33 +053032#include <mach/sps.h>
Pratibhasagar Ve3b57de2013-03-20 18:17:05 +053033#include <mach/msm_smsm.h>
Sahitya Tummalaf57ae882012-04-02 13:53:33 +053034#define PAGE_SIZE_2K 2048
35#define PAGE_SIZE_4K 4096
36#define WRITE 1
37#define READ 0
38/*
39 * The maximum no of descriptors per transfer (page read/write) won't be more
40 * than 64. For more details on what those commands are, please refer to the
41 * page read and page write functions in the driver.
42 */
43#define SPS_MAX_DESC_NUM 64
44#define SPS_DATA_CONS_PIPE_INDEX 0
45#define SPS_DATA_PROD_PIPE_INDEX 1
46#define SPS_CMD_CONS_PIPE_INDEX 2
47
48#define msm_virt_to_dma(chip, vaddr) \
49 ((chip)->dma_phys_addr + \
50 ((uint8_t *)(vaddr) - (chip)->dma_virt_addr))
51
52/*
53 * A single page read/write request would typically need DMA memory of about
54 * 1K memory approximately. So for a single request this memory is more than
55 * enough.
56 *
57 * But to accommodate multiple clients we allocate 8K of memory. Though only
58 * one client request can be submitted to NANDc at any time, other clients can
59 * still prepare the descriptors while waiting for current client request to
60 * be done. Thus for a total memory of 8K, the driver can currently support
61 * maximum clients up to 7 or 8 at a time. The client for which there is no
62 * free DMA memory shall wait on the wait queue until other clients free up
63 * the required memory.
64 */
65#define MSM_NAND_DMA_BUFFER_SIZE SZ_8K
66/*
67 * This defines the granularity at which the buffer management is done. The
68 * total number of slots is based on the size of the atomic_t variable
69 * dma_buffer_busy(number of bits) within the structure msm_nand_chip.
70 */
71#define MSM_NAND_DMA_BUFFER_SLOT_SZ \
72 (MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8))
73
74/* ONFI(Open NAND Flash Interface) parameters */
75#define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800
76#define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000
77#define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d
78#define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d
79#define ONFI_PARAM_INFO_LENGTH 0x0200
80#define ONFI_PARAM_PAGE_LENGTH 0x0100
81#define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F
82#define FLASH_READ_ONFI_SIGNATURE_ADDRESS 0x20
83#define FLASH_READ_ONFI_PARAMETERS_COMMAND 0xEC
84#define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00
85#define FLASH_READ_DEVICE_ID_ADDRESS 0x00
86
87#define MSM_NAND_RESET_FLASH_STS 0x00000020
88#define MSM_NAND_RESET_READ_STS 0x000000C0
89
90/* QPIC NANDc (NAND Controller) Register Set */
91#define MSM_NAND_REG(info, off) (info->nand_phys + off)
92#define MSM_NAND_FLASH_CMD(info) MSM_NAND_REG(info, 0x30000)
93#define MSM_NAND_ADDR0(info) MSM_NAND_REG(info, 0x30004)
94#define MSM_NAND_ADDR1(info) MSM_NAND_REG(info, 0x30008)
95#define MSM_NAND_EXEC_CMD(info) MSM_NAND_REG(info, 0x30010)
96#define MSM_NAND_FLASH_STATUS(info) MSM_NAND_REG(info, 0x30014)
97#define FS_OP_ERR (1 << 4)
98#define FS_MPU_ERR (1 << 8)
99#define FS_DEVICE_STS_ERR (1 << 16)
100#define FS_DEVICE_WP (1 << 23)
101
102#define MSM_NAND_BUFFER_STATUS(info) MSM_NAND_REG(info, 0x30018)
103#define BS_UNCORRECTABLE_BIT (1 << 8)
104#define BS_CORRECTABLE_ERR_MSK 0x1F
105
106#define MSM_NAND_DEV0_CFG0(info) MSM_NAND_REG(info, 0x30020)
107#define DISABLE_STATUS_AFTER_WRITE 4
108#define CW_PER_PAGE 6
109#define UD_SIZE_BYTES 9
110#define SPARE_SIZE_BYTES 23
111#define NUM_ADDR_CYCLES 27
112
113#define MSM_NAND_DEV0_CFG1(info) MSM_NAND_REG(info, 0x30024)
114#define DEV0_CFG1_ECC_DISABLE 0
115#define WIDE_FLASH 1
116#define NAND_RECOVERY_CYCLES 2
117#define CS_ACTIVE_BSY 5
118#define BAD_BLOCK_BYTE_NUM 6
119#define BAD_BLOCK_IN_SPARE_AREA 16
120#define WR_RD_BSY_GAP 17
121#define ENABLE_BCH_ECC 27
122
123#define MSM_NAND_DEV0_ECC_CFG(info) MSM_NAND_REG(info, 0x30028)
124#define ECC_CFG_ECC_DISABLE 0
125#define ECC_SW_RESET 1
126#define ECC_MODE 4
127#define ECC_PARITY_SIZE_BYTES 8
128#define ECC_NUM_DATA_BYTES 16
129#define ECC_FORCE_CLK_OPEN 30
130
131#define MSM_NAND_READ_ID(info) MSM_NAND_REG(info, 0x30040)
132#define MSM_NAND_READ_STATUS(info) MSM_NAND_REG(info, 0x30044)
133#define MSM_NAND_DEV_CMD1(info) MSM_NAND_REG(info, 0x300A4)
134#define MSM_NAND_DEV_CMD_VLD(info) MSM_NAND_REG(info, 0x300AC)
135#define MSM_NAND_EBI2_ECC_BUF_CFG(info) MSM_NAND_REG(info, 0x300F0)
136#define MSM_NAND_ERASED_CW_DETECT_CFG(info) MSM_NAND_REG(info, 0x300E8)
137#define MSM_NAND_ERASED_CW_DETECT_STATUS(info) MSM_NAND_REG(info, 0x300EC)
138
139#define MSM_NAND_CTRL(info) MSM_NAND_REG(info, 0x30F00)
140#define BAM_MODE_EN 0
141
142#define MSM_NAND_READ_LOCATION_0(info) MSM_NAND_REG(info, 0x30F20)
143#define MSM_NAND_READ_LOCATION_1(info) MSM_NAND_REG(info, 0x30F24)
144
145/* device commands */
146#define MSM_NAND_CMD_PAGE_READ 0x32
147#define MSM_NAND_CMD_PAGE_READ_ECC 0x33
148#define MSM_NAND_CMD_PAGE_READ_ALL 0x34
149#define MSM_NAND_CMD_PRG_PAGE 0x36
150#define MSM_NAND_CMD_PRG_PAGE_ECC 0x37
151#define MSM_NAND_CMD_PRG_PAGE_ALL 0x39
152#define MSM_NAND_CMD_BLOCK_ERASE 0x3A
153#define MSM_NAND_CMD_FETCH_ID 0x0B
154
155/* Structure that defines a NAND SPS command element */
156struct msm_nand_sps_cmd {
157 struct sps_command_element ce;
158 uint32_t flags;
159};
160
161/*
162 * Structure that defines the NAND controller properties as per the
163 * NAND flash device/chip that is attached.
164 */
165struct msm_nand_chip {
166 struct device *dev;
167 /*
168 * DMA memory will be allocated only once during probe and this memory
169 * will be used by all NAND clients. This wait queue is needed to
170 * make the applications wait for DMA memory to be free'd when the
171 * complete memory is exhausted.
172 */
173 wait_queue_head_t dma_wait_queue;
174 atomic_t dma_buffer_busy;
175 uint8_t *dma_virt_addr;
176 dma_addr_t dma_phys_addr;
177 uint32_t ecc_parity_bytes;
178 uint32_t bch_caps; /* Controller BCH ECC capabilities */
179#define MSM_NAND_CAP_4_BIT_BCH (1 << 0)
180#define MSM_NAND_CAP_8_BIT_BCH (1 << 1)
181 uint32_t cw_size;
182 /* NANDc register configurations */
183 uint32_t cfg0, cfg1, cfg0_raw, cfg1_raw;
184 uint32_t ecc_buf_cfg;
185 uint32_t ecc_bch_cfg;
186};
187
188/* Structure that defines an SPS end point for a NANDc BAM pipe. */
189struct msm_nand_sps_endpt {
190 struct sps_pipe *handle;
191 struct sps_connect config;
192 struct sps_register_event event;
193 struct completion completion;
194};
195
196/*
197 * Structure that defines NANDc SPS data - BAM handle and an end point
198 * for each BAM pipe.
199 */
200struct msm_nand_sps_info {
201 uint32_t bam_handle;
202 struct msm_nand_sps_endpt data_prod;
203 struct msm_nand_sps_endpt data_cons;
204 struct msm_nand_sps_endpt cmd_pipe;
205};
206
207/*
208 * Structure that contains flash device information. This gets updated after
209 * the NAND flash device detection.
210 */
211struct flash_identification {
212 uint32_t flash_id;
213 uint32_t density;
214 uint32_t widebus;
215 uint32_t pagesize;
216 uint32_t blksize;
217 uint32_t oobsize;
218 uint32_t ecc_correctability;
219};
220
221/* Structure that defines NANDc private data. */
222struct msm_nand_info {
223 struct mtd_info mtd;
224 struct msm_nand_chip nand_chip;
225 struct msm_nand_sps_info sps;
226 unsigned long bam_phys;
227 unsigned long nand_phys;
228 void __iomem *bam_base;
229 int bam_irq;
230 /*
231 * This lock must be acquired before submitting any command or data
232 * descriptors to BAM pipes and must be held until all the submitted
233 * descriptors are processed.
234 *
235 * This is required to ensure that both command and descriptors are
236 * submitted atomically without interruption from other clients,
237 * when there are requests from more than client at any time.
238 * Othewise, data and command descriptors can be submitted out of
239 * order for a request which can cause data corruption.
240 */
241 struct mutex bam_lock;
242 struct flash_identification flash_dev;
243};
244
245/* Structure that defines an ONFI parameter page (512B) */
246struct onfi_param_page {
247 uint32_t parameter_page_signature;
248 uint16_t revision_number;
249 uint16_t features_supported;
250 uint16_t optional_commands_supported;
251 uint8_t reserved0[22];
252 uint8_t device_manufacturer[12];
253 uint8_t device_model[20];
254 uint8_t jedec_manufacturer_id;
255 uint16_t date_code;
256 uint8_t reserved1[13];
257 uint32_t number_of_data_bytes_per_page;
258 uint16_t number_of_spare_bytes_per_page;
259 uint32_t number_of_data_bytes_per_partial_page;
260 uint16_t number_of_spare_bytes_per_partial_page;
261 uint32_t number_of_pages_per_block;
262 uint32_t number_of_blocks_per_logical_unit;
263 uint8_t number_of_logical_units;
264 uint8_t number_of_address_cycles;
265 uint8_t number_of_bits_per_cell;
266 uint16_t maximum_bad_blocks_per_logical_unit;
267 uint16_t block_endurance;
268 uint8_t guaranteed_valid_begin_blocks;
269 uint16_t guaranteed_valid_begin_blocks_endurance;
270 uint8_t number_of_programs_per_page;
271 uint8_t partial_program_attributes;
272 uint8_t number_of_bits_ecc_correctability;
273 uint8_t number_of_interleaved_address_bits;
274 uint8_t interleaved_operation_attributes;
275 uint8_t reserved2[13];
276 uint8_t io_pin_capacitance;
277 uint16_t timing_mode_support;
278 uint16_t program_cache_timing_mode_support;
279 uint16_t maximum_page_programming_time;
280 uint16_t maximum_block_erase_time;
281 uint16_t maximum_page_read_time;
282 uint16_t maximum_change_column_setup_time;
283 uint8_t reserved3[23];
284 uint16_t vendor_specific_revision_number;
285 uint8_t vendor_specific[88];
286 uint16_t integrity_crc;
287} __attribute__((__packed__));
288
Pratibhasagar Ve3b57de2013-03-20 18:17:05 +0530289#define FLASH_PART_MAGIC1 0x55EE73AA
290#define FLASH_PART_MAGIC2 0xE35EBDDB
291#define FLASH_PTABLE_V3 3
292#define FLASH_PTABLE_V4 4
293#define FLASH_PTABLE_MAX_PARTS_V3 16
294#define FLASH_PTABLE_MAX_PARTS_V4 32
295#define FLASH_PTABLE_HDR_LEN (4*sizeof(uint32_t))
296#define FLASH_PTABLE_ENTRY_NAME_SIZE 16
297
298struct flash_partition_entry {
299 char name[FLASH_PTABLE_ENTRY_NAME_SIZE];
300 u32 offset; /* Offset in blocks from beginning of device */
301 u32 length; /* Length of the partition in blocks */
302 u8 attr; /* Flags for this partition */
303};
304
305struct flash_partition_table {
306 u32 magic1;
307 u32 magic2;
308 u32 version;
309 u32 numparts;
310 struct flash_partition_entry part_entry[FLASH_PTABLE_MAX_PARTS_V4];
311};
312
313static struct flash_partition_table ptable;
314
315static struct mtd_partition mtd_part[FLASH_PTABLE_MAX_PARTS_V4];
316
Sahitya Tummalaf57ae882012-04-02 13:53:33 +0530317/*
318 * Get the DMA memory for requested amount of size. It returns the pointer
319 * to free memory available from the allocated pool. Returns NULL if there
320 * is no free memory.
321 */
322static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size)
323{
324 uint32_t bitmask, free_bitmask, old_bitmask;
325 uint32_t need_mask, current_need_mask;
326 int free_index;
327
328 need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
329 - 1;
330 bitmask = atomic_read(&chip->dma_buffer_busy);
331 free_bitmask = ~bitmask;
332 do {
333 free_index = __ffs(free_bitmask);
334 current_need_mask = need_mask << free_index;
335
336 if (size + free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ >=
337 MSM_NAND_DMA_BUFFER_SIZE)
338 return NULL;
339
340 if ((bitmask & current_need_mask) == 0) {
341 old_bitmask =
342 atomic_cmpxchg(&chip->dma_buffer_busy,
343 bitmask,
344 bitmask | current_need_mask);
345 if (old_bitmask == bitmask)
346 return chip->dma_virt_addr +
347 free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ;
348 free_bitmask = 0;/* force return */
349 }
350 /* current free range was too small, clear all free bits */
351 /* below the top busy bit within current_need_mask */
352 free_bitmask &=
353 ~(~0U >> (32 - fls(bitmask & current_need_mask)));
354 } while (free_bitmask);
355
356 return NULL;
357}
358
359/*
360 * Releases the DMA memory used to the free pool and also wakes up any user
361 * thread waiting on wait queue for free memory to be available.
362 */
363static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip,
364 void *buffer, size_t size)
365{
366 int index;
367 uint32_t used_mask;
368
369 used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
370 - 1;
371 index = ((uint8_t *)buffer - chip->dma_virt_addr) /
372 MSM_NAND_DMA_BUFFER_SLOT_SZ;
373 atomic_sub(used_mask << index, &chip->dma_buffer_busy);
374
375 wake_up(&chip->dma_wait_queue);
376}
377
378/*
379 * Calculates page address of the buffer passed, offset of buffer within
380 * that page and then maps it for DMA by calling dma_map_page().
381 */
382static dma_addr_t msm_nand_dma_map(struct device *dev, void *addr, size_t size,
383 enum dma_data_direction dir)
384{
385 struct page *page;
386 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
387 if (virt_addr_valid(addr))
388 page = virt_to_page(addr);
389 else {
390 if (WARN_ON(size + offset > PAGE_SIZE))
391 return ~0;
392 page = vmalloc_to_page(addr);
393 }
394 return dma_map_page(dev, page, offset, size, dir);
395}
396
397/*
398 * Wrapper function to prepare a SPS command element with the data that is
399 * passed to this function.
400 *
401 * Since for any command element it is a must to have this flag
402 * SPS_IOVEC_FLAG_CMD, this function by default updates this flag for a
403 * command element that is passed and thus, the caller need not explicilty
404 * pass this flag. The other flags must be passed based on the need. If a
405 * command element doesn't have any other flag, then 0 can be passed to flags.
406 */
407static inline void msm_nand_prep_ce(struct msm_nand_sps_cmd *sps_cmd,
408 uint32_t addr, uint32_t command,
409 uint32_t data, uint32_t flags)
410{
411 struct sps_command_element *cmd = &sps_cmd->ce;
412
413 cmd->addr = addr;
414 cmd->command = (command & WRITE) ? (uint32_t) SPS_WRITE_COMMAND :
415 (uint32_t) SPS_READ_COMMAND;
416 cmd->data = data;
417 cmd->mask = 0xFFFFFFFF;
418 sps_cmd->flags = SPS_IOVEC_FLAG_CMD | flags;
419}
420
421/*
422 * Read a single NANDc register as mentioned by its parameter addr. The return
423 * value indicates whether read is successful or not. The register value read
424 * is stored in val.
425 */
426static int msm_nand_flash_rd_reg(struct msm_nand_info *info, uint32_t addr,
427 uint32_t *val)
428{
429 int ret = 0;
430 struct msm_nand_sps_cmd *cmd;
431 struct msm_nand_chip *chip = &info->nand_chip;
432 struct {
433 struct msm_nand_sps_cmd cmd;
434 uint32_t data;
435 } *dma_buffer;
436
437 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
438 chip, sizeof(*dma_buffer))));
439 cmd = &dma_buffer->cmd;
440 msm_nand_prep_ce(cmd, addr, READ, msm_virt_to_dma(chip,
441 &dma_buffer->data), SPS_IOVEC_FLAG_INT);
442
443 ret = sps_transfer_one(info->sps.cmd_pipe.handle,
444 msm_virt_to_dma(chip, &cmd->ce),
445 sizeof(struct sps_command_element), NULL, cmd->flags);
446 if (ret) {
447 pr_err("failed to submit command %x ret %d\n", addr, ret);
448 goto out;
449 }
450 wait_for_completion_io(&info->sps.cmd_pipe.completion);
451 *val = dma_buffer->data;
452out:
453 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
454 return ret;
455}
456
457/*
458 * Read the Flash ID from the Nand Flash Device. The return value < 0
459 * indicates failure. When successful, the Flash ID is stored in parameter
460 * read_id.
461 */
462static int msm_nand_flash_read_id(struct msm_nand_info *info,
463 bool read_onfi_signature,
464 uint32_t *read_id)
465{
466 int err = 0, i;
467 struct msm_nand_sps_cmd *cmd;
468 struct sps_iovec *iovec;
469 struct msm_nand_chip *chip = &info->nand_chip;
470 uint32_t total_cnt = 4;
471 /*
472 * The following 4 commands are required to read id -
473 * write commands - addr0, flash, exec
474 * read_commands - read_id
475 */
476 struct {
477 struct sps_transfer xfer;
478 struct sps_iovec cmd_iovec[total_cnt];
479 struct msm_nand_sps_cmd cmd[total_cnt];
480 uint32_t data[total_cnt];
481 } *dma_buffer;
482
483 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
484 (chip, sizeof(*dma_buffer))));
485 if (read_onfi_signature)
486 dma_buffer->data[0] = FLASH_READ_ONFI_SIGNATURE_ADDRESS;
487 else
488 dma_buffer->data[0] = FLASH_READ_DEVICE_ID_ADDRESS;
489
490 dma_buffer->data[1] = MSM_NAND_CMD_FETCH_ID;
491 dma_buffer->data[2] = 1;
492 dma_buffer->data[3] = 0xeeeeeeee;
493
494 cmd = dma_buffer->cmd;
495 msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE,
496 dma_buffer->data[0], SPS_IOVEC_FLAG_LOCK);
497 cmd++;
498
499 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE,
500 dma_buffer->data[1], 0);
501 cmd++;
502
503 msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
504 dma_buffer->data[2], SPS_IOVEC_FLAG_NWD);
505 cmd++;
506
507 msm_nand_prep_ce(cmd, MSM_NAND_READ_ID(info), READ,
508 msm_virt_to_dma(chip, &dma_buffer->data[3]),
509 SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
510 cmd++;
511
512 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
513 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
514 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
515 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
516 &dma_buffer->cmd_iovec);
517 iovec = dma_buffer->xfer.iovec;
518
519 for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
520 iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
521 iovec->size = sizeof(struct sps_command_element);
522 iovec->flags = dma_buffer->cmd[i].flags;
523 iovec++;
524 }
525
526 mutex_lock(&info->bam_lock);
527 err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
528 if (err) {
529 pr_err("Failed to submit commands %d\n", err);
530 mutex_unlock(&info->bam_lock);
531 goto out;
532 }
533 wait_for_completion_io(&info->sps.cmd_pipe.completion);
534 mutex_unlock(&info->bam_lock);
535
536 pr_debug("Read ID register value 0x%x\n", dma_buffer->data[3]);
537 if (!read_onfi_signature)
538 pr_debug("nandid: %x maker %02x device %02x\n",
539 dma_buffer->data[3], dma_buffer->data[3] & 0xff,
540 (dma_buffer->data[3] >> 8) & 0xff);
541 *read_id = dma_buffer->data[3];
542out:
543 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
544 return err;
545}
546
547/*
548 * Contains data for common configuration registers that must be programmed
549 * for every NANDc operation.
550 */
551struct msm_nand_common_cfgs {
552 uint32_t cmd;
553 uint32_t addr0;
554 uint32_t addr1;
555 uint32_t cfg0;
556 uint32_t cfg1;
557};
558
559/*
560 * Function to prepare SPS command elements to write into NANDc configuration
561 * registers as per the data defined in struct msm_nand_common_cfgs. This is
562 * required for the following NANDc operations - Erase, Bad Block checking
563 * and for reading ONFI parameter page.
564 */
565static void msm_nand_prep_cfg_cmd_desc(struct msm_nand_info *info,
566 struct msm_nand_common_cfgs data,
567 struct msm_nand_sps_cmd **curr_cmd)
568{
569 struct msm_nand_sps_cmd *cmd;
570
571 cmd = *curr_cmd;
572 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE, data.cmd,
573 SPS_IOVEC_FLAG_LOCK);
574 cmd++;
575
576 msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE, data.addr0, 0);
577 cmd++;
578
579 msm_nand_prep_ce(cmd, MSM_NAND_ADDR1(info), WRITE, data.addr1, 0);
580 cmd++;
581
582 msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG0(info), WRITE, data.cfg0, 0);
583 cmd++;
584
585 msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG1(info), WRITE, data.cfg1, 0);
586 cmd++;
587 *curr_cmd = cmd;
588}
589
590/*
591 * Function to check the CRC integrity check on ONFI parameter page read.
592 * For ONFI parameter page read, the controller ECC will be disabled. Hence,
593 * it is mandatory to manually compute CRC and check it against the value
594 * stored within ONFI page.
595 */
596static uint16_t msm_nand_flash_onfi_crc_check(uint8_t *buffer, uint16_t count)
597{
598 int i;
599 uint16_t result;
600
601 for (i = 0; i < count; i++)
602 buffer[i] = bitrev8(buffer[i]);
603
604 result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count));
605
606 for (i = 0; i < count; i++)
607 buffer[i] = bitrev8(buffer[i]);
608
609 return result;
610}
611
612/*
613 * Structure that contains NANDc register data for commands required
614 * for reading ONFI paramter page.
615 */
616struct msm_nand_flash_onfi_data {
617 struct msm_nand_common_cfgs cfg;
618 uint32_t exec;
619 uint32_t devcmd1_orig;
620 uint32_t devcmdvld_orig;
621 uint32_t devcmd1_mod;
622 uint32_t devcmdvld_mod;
623 uint32_t ecc_bch_cfg;
624};
625
626/*
627 * Function to identify whether the attached NAND flash device is
628 * complaint to ONFI spec or not. If yes, then it reads the ONFI parameter
629 * page to get the device parameters.
630 */
631static int msm_nand_flash_onfi_probe(struct msm_nand_info *info)
632{
633 struct msm_nand_chip *chip = &info->nand_chip;
634 struct flash_identification *flash = &info->flash_dev;
635 uint32_t crc_chk_count = 0, page_address = 0;
636 int ret = 0, i;
637
638 /* SPS parameters */
639 struct msm_nand_sps_cmd *cmd, *curr_cmd;
640 struct sps_iovec *iovec;
641 uint32_t rdata;
642
643 /* ONFI Identifier/Parameter Page parameters */
644 uint8_t *onfi_param_info_buf = NULL;
645 dma_addr_t dma_addr_param_info = 0;
646 struct onfi_param_page *onfi_param_page_ptr;
647 struct msm_nand_flash_onfi_data data;
648 uint32_t onfi_signature;
649
650 /* SPS command/data descriptors */
651 uint32_t total_cnt = 13;
652 /*
653 * The following 13 commands are required to get onfi parameters -
654 * flash, addr0, addr1, cfg0, cfg1, dev0_ecc_cfg, cmd_vld, dev_cmd1,
655 * read_loc_0, exec, flash_status (read cmd), dev_cmd1, cmd_vld.
656 */
657 struct {
658 struct sps_transfer xfer;
659 struct sps_iovec cmd_iovec[total_cnt];
660 struct msm_nand_sps_cmd cmd[total_cnt];
661 uint32_t flash_status;
662 } *dma_buffer;
663
664 wait_event(chip->dma_wait_queue, (onfi_param_info_buf =
665 msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH)));
666 dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf);
667
668 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
669 (chip, sizeof(*dma_buffer))));
670
671 ret = msm_nand_flash_read_id(info, 1, &onfi_signature);
672 if (ret < 0) {
673 pr_err("Failed to read ONFI signature\n");
674 goto free_dma;
675 }
676 if (onfi_signature != ONFI_PARAMETER_PAGE_SIGNATURE) {
677 pr_info("Found a non ONFI device\n");
678 ret = -EIO;
679 goto free_dma;
680 }
681
682 memset(&data, 0, sizeof(struct msm_nand_flash_onfi_data));
683 ret = msm_nand_flash_rd_reg(info, MSM_NAND_DEV_CMD1(info),
684 &data.devcmd1_orig);
685 if (ret < 0)
686 goto free_dma;
687 ret = msm_nand_flash_rd_reg(info, MSM_NAND_DEV_CMD_VLD(info),
688 &data.devcmdvld_orig);
689 if (ret < 0)
690 goto free_dma;
691
Pratibhasagar Vf818dd12013-04-02 10:09:21 +0530692 /* Lookup the 'APPS' partition's first page address */
693 for (i = 0; i < FLASH_PTABLE_MAX_PARTS_V4; i++) {
694 if (!strncmp("apps", ptable.part_entry[i].name,
695 strlen(ptable.part_entry[i].name))) {
696 page_address = ptable.part_entry[i].offset << 6;
697 break;
698 }
699 }
Sahitya Tummalaf57ae882012-04-02 13:53:33 +0530700 data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
701 data.exec = 1;
702 data.cfg.addr0 = (page_address << 16) |
703 FLASH_READ_ONFI_PARAMETERS_ADDRESS;
704 data.cfg.addr1 = (page_address >> 16) & 0xFF;
705 data.cfg.cfg0 = MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO;
706 data.cfg.cfg1 = MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO;
707 data.devcmd1_mod = (data.devcmd1_orig & 0xFFFFFF00) |
708 FLASH_READ_ONFI_PARAMETERS_COMMAND;
709 data.devcmdvld_mod = data.devcmdvld_orig & 0xFFFFFFFE;
710 data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
711 dma_buffer->flash_status = 0xeeeeeeee;
712
713 curr_cmd = cmd = dma_buffer->cmd;
714 msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
715
716 cmd = curr_cmd;
717 msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
718 data.ecc_bch_cfg, 0);
719 cmd++;
720
721 msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD_VLD(info), WRITE,
722 data.devcmdvld_mod, 0);
723 cmd++;
724
725 msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD1(info), WRITE,
726 data.devcmd1_mod, 0);
727 cmd++;
728
729 rdata = (0 << 0) | (ONFI_PARAM_INFO_LENGTH << 16) | (1 << 31);
730 msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
731 rdata, 0);
732 cmd++;
733
734 msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
735 data.exec, SPS_IOVEC_FLAG_NWD);
736 cmd++;
737
738 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
739 msm_virt_to_dma(chip, &dma_buffer->flash_status), 0);
740 cmd++;
741
742 msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD1(info), WRITE,
743 data.devcmd1_orig, 0);
744 cmd++;
745
746 msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD_VLD(info), WRITE,
747 data.devcmdvld_orig,
748 SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
749 cmd++;
750
751 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
752 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
753 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
754 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
755 &dma_buffer->cmd_iovec);
756 iovec = dma_buffer->xfer.iovec;
757
758 for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
759 iovec->addr = msm_virt_to_dma(chip,
760 &dma_buffer->cmd[i].ce);
761 iovec->size = sizeof(struct sps_command_element);
762 iovec->flags = dma_buffer->cmd[i].flags;
763 iovec++;
764 }
765 mutex_lock(&info->bam_lock);
766 /* Submit data descriptor */
767 ret = sps_transfer_one(info->sps.data_prod.handle, dma_addr_param_info,
768 ONFI_PARAM_INFO_LENGTH, NULL, SPS_IOVEC_FLAG_INT);
769 if (ret) {
770 pr_err("Failed to submit data descriptors %d\n", ret);
771 mutex_unlock(&info->bam_lock);
772 goto free_dma;
773 }
774 /* Submit command descriptors */
775 ret = sps_transfer(info->sps.cmd_pipe.handle,
776 &dma_buffer->xfer);
777 if (ret) {
778 pr_err("Failed to submit commands %d\n", ret);
779 mutex_unlock(&info->bam_lock);
780 goto free_dma;
781 }
782 wait_for_completion_io(&info->sps.cmd_pipe.completion);
783 wait_for_completion_io(&info->sps.data_prod.completion);
784 mutex_unlock(&info->bam_lock);
785
786 /* Check for flash status errors */
787 if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
788 pr_err("MPU/OP err (0x%x) is set\n", dma_buffer->flash_status);
789 ret = -EIO;
790 goto free_dma;
791 }
792
793 for (crc_chk_count = 0; crc_chk_count < ONFI_PARAM_INFO_LENGTH
794 / ONFI_PARAM_PAGE_LENGTH; crc_chk_count++) {
795 onfi_param_page_ptr =
796 (struct onfi_param_page *)
797 (&(onfi_param_info_buf
798 [ONFI_PARAM_PAGE_LENGTH *
799 crc_chk_count]));
800 if (msm_nand_flash_onfi_crc_check(
801 (uint8_t *)onfi_param_page_ptr,
802 ONFI_PARAM_PAGE_LENGTH - 2) ==
803 onfi_param_page_ptr->integrity_crc) {
804 break;
805 }
806 }
807 if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH
808 / ONFI_PARAM_PAGE_LENGTH) {
809 pr_err("CRC Check failed on param page\n");
810 ret = -EIO;
811 goto free_dma;
812 }
813 ret = msm_nand_flash_read_id(info, 0, &flash->flash_id);
814 if (ret < 0) {
815 pr_err("Failed to read flash ID\n");
816 goto free_dma;
817 }
818 flash->widebus = onfi_param_page_ptr->features_supported & 0x01;
819 flash->pagesize = onfi_param_page_ptr->number_of_data_bytes_per_page;
820 flash->blksize = onfi_param_page_ptr->number_of_pages_per_block *
821 flash->pagesize;
822 flash->oobsize = onfi_param_page_ptr->number_of_spare_bytes_per_page;
823 flash->density = onfi_param_page_ptr->number_of_blocks_per_logical_unit
824 * flash->blksize;
825 flash->ecc_correctability = onfi_param_page_ptr->
826 number_of_bits_ecc_correctability;
827
828 pr_info("Found an ONFI compliant device %s\n",
829 onfi_param_page_ptr->device_model);
830 /*
831 * Temporary hack for MT29F4G08ABC device.
832 * Since the device is not properly adhering
833 * to ONFi specification it is reporting
834 * as 16 bit device though it is 8 bit device!!!
835 */
836 if (!strncmp(onfi_param_page_ptr->device_model, "MT29F4G08ABC", 12))
837 flash->widebus = 0;
838free_dma:
839 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
840 msm_nand_release_dma_buffer(chip, onfi_param_info_buf,
841 ONFI_PARAM_INFO_LENGTH);
842 return ret;
843}
844
845/*
846 * Structure that contains read/write parameters required for reading/writing
847 * from/to a page.
848 */
849struct msm_nand_rw_params {
850 uint32_t page;
851 uint32_t page_count;
852 uint32_t sectordatasize;
853 uint32_t sectoroobsize;
854 uint32_t cwperpage;
855 uint32_t oob_len_cmd;
856 uint32_t oob_len_data;
857 uint32_t start_sector;
858 uint32_t oob_col;
859 dma_addr_t data_dma_addr;
860 dma_addr_t oob_dma_addr;
861 dma_addr_t data_dma_addr_curr;
862 dma_addr_t oob_dma_addr_curr;
863 bool read;
864};
865
866/*
867 * Structure that contains NANDc register data required for reading/writing
868 * from/to a page.
869 */
870struct msm_nand_rw_reg_data {
871 uint32_t cmd;
872 uint32_t addr0;
873 uint32_t addr1;
874 uint32_t cfg0;
875 uint32_t cfg1;
876 uint32_t ecc_bch_cfg;
877 uint32_t exec;
878 uint32_t ecc_cfg;
879 uint32_t clrfstatus;
880 uint32_t clrrstatus;
881};
882
883/*
884 * Function that validates page read/write MTD parameters received from upper
885 * layers such as MTD/YAFFS2 and returns error for any unsupported operations
886 * by the driver. In case of success, it also maps the data and oob buffer
887 * received for DMA.
888 */
889static int msm_nand_validate_mtd_params(struct mtd_info *mtd, bool read,
890 loff_t offset,
891 struct mtd_oob_ops *ops,
892 struct msm_nand_rw_params *args)
893{
894 struct msm_nand_info *info = mtd->priv;
895 struct msm_nand_chip *chip = &info->nand_chip;
896 int err = 0;
897
898 pr_debug("========================================================\n");
899 pr_debug("offset 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x\n",
900 offset, ops->mode, ops->datbuf, ops->len);
901 pr_debug("oobbuf 0x%p ooblen 0x%x\n", ops->oobbuf, ops->ooblen);
902
903 if (ops->mode == MTD_OPS_PLACE_OOB) {
904 pr_err("MTD_OPS_PLACE_OOB is not supported\n");
905 err = -EINVAL;
906 goto out;
907 }
908
909 if (mtd->writesize == PAGE_SIZE_2K)
910 args->page = offset >> 11;
911
912 if (mtd->writesize == PAGE_SIZE_4K)
913 args->page = offset >> 12;
914
915 args->oob_len_cmd = ops->ooblen;
916 args->oob_len_data = ops->ooblen;
917 args->cwperpage = (mtd->writesize >> 9);
918 args->read = (read ? true : false);
919
920 if (offset & (mtd->writesize - 1)) {
921 pr_err("unsupported offset 0x%llx\n", offset);
922 err = -EINVAL;
923 goto out;
924 }
925
926 if (!read && !ops->datbuf) {
927 pr_err("No data buffer provided for write!!\n");
928 err = -EINVAL;
929 goto out;
930 }
931
932 if (ops->mode == MTD_OPS_RAW) {
933 if (!ops->datbuf) {
934 pr_err("No data buffer provided for RAW mode\n");
935 err = -EINVAL;
936 goto out;
937 } else if ((ops->len % (mtd->writesize +
938 mtd->oobsize)) != 0) {
939 pr_err("unsupported data len %d for RAW mode\n",
940 ops->len);
941 err = -EINVAL;
942 goto out;
943 }
944 args->page_count = ops->len / (mtd->writesize + mtd->oobsize);
945
946 } else if (ops->mode == MTD_OPS_AUTO_OOB) {
947 if (ops->datbuf && (ops->len % mtd->writesize) != 0) {
948 /* when ops->datbuf is NULL, ops->len can be ooblen */
949 pr_err("unsupported data len %d for AUTO mode\n",
950 ops->len);
951 err = -EINVAL;
952 goto out;
953 }
954 if (read && ops->oobbuf && !ops->datbuf) {
955 args->start_sector = args->cwperpage - 1;
956 args->page_count = ops->ooblen / mtd->oobavail;
957 if ((args->page_count == 0) && (ops->ooblen))
958 args->page_count = 1;
959 } else if (ops->datbuf) {
960 args->page_count = ops->len / mtd->writesize;
961 }
962 }
963
964 if (ops->datbuf) {
965 args->data_dma_addr_curr = args->data_dma_addr =
966 msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
967 (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
968 if (dma_mapping_error(chip->dev, args->data_dma_addr)) {
969 pr_err("dma mapping failed for 0x%p\n", ops->datbuf);
970 err = -EIO;
971 goto out;
972 }
973 }
974 if (ops->oobbuf) {
975 if (read)
976 memset(ops->oobbuf, 0xFF, ops->ooblen);
977 args->oob_dma_addr_curr = args->oob_dma_addr =
978 msm_nand_dma_map(chip->dev, ops->oobbuf, ops->ooblen,
979 (read ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE));
980 if (dma_mapping_error(chip->dev, args->oob_dma_addr)) {
981 pr_err("dma mapping failed for 0x%p\n", ops->oobbuf);
982 err = -EIO;
983 goto dma_map_oobbuf_failed;
984 }
985 }
986 goto out;
987dma_map_oobbuf_failed:
988 if (ops->datbuf)
989 dma_unmap_page(chip->dev, args->data_dma_addr, ops->len,
990 (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
991out:
992 return err;
993}
994
995/*
996 * Function that updates NANDc register data (struct msm_nand_rw_reg_data)
997 * required for page read/write.
998 */
999static void msm_nand_update_rw_reg_data(struct msm_nand_chip *chip,
1000 struct mtd_oob_ops *ops,
1001 struct msm_nand_rw_params *args,
1002 struct msm_nand_rw_reg_data *data)
1003{
1004 if (args->read) {
1005 if (ops->mode != MTD_OPS_RAW) {
1006 data->cmd = MSM_NAND_CMD_PAGE_READ_ECC;
1007 data->cfg0 =
1008 (chip->cfg0 & ~(7U << CW_PER_PAGE)) |
1009 (((args->cwperpage-1) - args->start_sector)
1010 << CW_PER_PAGE);
1011 data->cfg1 = chip->cfg1;
1012 data->ecc_bch_cfg = chip->ecc_bch_cfg;
1013 } else {
1014 data->cmd = MSM_NAND_CMD_PAGE_READ_ALL;
1015 data->cfg0 = chip->cfg0_raw;
1016 data->cfg1 = chip->cfg1_raw;
1017 data->ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
1018 }
1019
1020 } else {
1021 if (ops->mode != MTD_OPS_RAW) {
1022 data->cfg0 = chip->cfg0;
1023 data->cfg1 = chip->cfg1;
1024 data->ecc_bch_cfg = chip->ecc_bch_cfg;
1025 } else {
1026 data->cfg0 = chip->cfg0_raw;
1027 data->cfg1 = chip->cfg1_raw;
1028 data->ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
1029 }
1030 data->cmd = MSM_NAND_CMD_PRG_PAGE;
1031 data->clrfstatus = MSM_NAND_RESET_FLASH_STS;
1032 data->clrrstatus = MSM_NAND_RESET_READ_STS;
1033 }
1034 data->exec = 1;
1035 data->ecc_cfg = chip->ecc_buf_cfg;
1036}
1037
1038/*
1039 * Function to prepare series of SPS command descriptors required for a page
1040 * read/write operation.
1041 */
1042static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops,
1043 struct msm_nand_rw_params *args,
1044 struct msm_nand_rw_reg_data *data,
1045 struct msm_nand_info *info,
1046 uint32_t curr_cw,
1047 struct msm_nand_sps_cmd **curr_cmd)
1048{
1049 struct msm_nand_chip *chip = &info->nand_chip;
1050 struct msm_nand_sps_cmd *cmd;
1051 uint32_t rdata;
1052 /* read_location register parameters */
1053 uint32_t offset, size, last_read;
1054
1055 cmd = *curr_cmd;
1056 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE, data->cmd,
1057 ((curr_cw == args->start_sector) ?
1058 SPS_IOVEC_FLAG_LOCK : 0));
1059 cmd++;
1060
1061 if (curr_cw == args->start_sector) {
1062 msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE,
1063 data->addr0, 0);
1064 cmd++;
1065
1066 msm_nand_prep_ce(cmd, MSM_NAND_ADDR1(info), WRITE,
1067 data->addr1, 0);
1068 cmd++;
1069
1070 msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG0(info), WRITE,
1071 data->cfg0, 0);
1072 cmd++;
1073
1074 msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG1(info), WRITE,
1075 data->cfg1, 0);
1076 cmd++;
1077
1078 msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
1079 data->ecc_bch_cfg, 0);
1080 cmd++;
1081
1082 msm_nand_prep_ce(cmd, MSM_NAND_EBI2_ECC_BUF_CFG(info),
1083 WRITE, data->ecc_cfg, 0);
1084 cmd++;
1085 }
1086
1087 if (!args->read)
1088 goto sub_exec_cmd;
1089
1090 if (ops->mode == MTD_OPS_RAW) {
1091 rdata = (0 << 0) | (chip->cw_size << 16) | (1 << 31);
1092 msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
1093 rdata, 0);
1094 cmd++;
1095 }
1096 if (ops->mode == MTD_OPS_AUTO_OOB && ops->datbuf) {
1097 offset = 0;
1098 size = (curr_cw < (args->cwperpage - 1)) ? 516 :
1099 (512 - ((args->cwperpage - 1) << 2));
1100 last_read = (curr_cw < (args->cwperpage - 1)) ? 1 :
1101 (ops->oobbuf ? 0 : 1);
1102 rdata = (offset << 0) | (size << 16) | (last_read << 31);
1103 msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
1104 rdata, 0);
1105 cmd++;
1106 }
1107 if (ops->mode == MTD_OPS_AUTO_OOB && ops->oobbuf
1108 && (curr_cw == (args->cwperpage - 1))) {
1109 offset = 512 - ((args->cwperpage - 1) << 2);
1110 size = (args->cwperpage) << 2;
1111 if (size > args->oob_len_cmd)
1112 size = args->oob_len_cmd;
1113 args->oob_len_cmd -= size;
1114 last_read = 1;
1115 rdata = (offset << 0) | (size << 16) | (last_read << 31);
1116 if (ops->datbuf) {
1117 msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_1(info),
1118 WRITE, rdata, 0);
1119 } else {
1120 msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info),
1121 WRITE, rdata, 0);
1122 }
1123 cmd++;
1124 }
1125sub_exec_cmd:
1126 msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data->exec,
1127 SPS_IOVEC_FLAG_NWD);
1128 cmd++;
1129 *curr_cmd = cmd;
1130}
1131
1132/*
1133 * Function to prepare and submit SPS data descriptors required for a page
1134 * read/write operation.
1135 */
1136static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops,
1137 struct msm_nand_rw_params *args,
1138 struct msm_nand_info *info,
1139 uint32_t curr_cw)
1140{
1141 struct msm_nand_chip *chip = &info->nand_chip;
1142 struct sps_pipe *data_pipe_handle;
1143 uint32_t sectordatasize, sectoroobsize;
1144 uint32_t sps_flags = 0;
1145 int err = 0;
1146
1147 if (args->read)
1148 data_pipe_handle = info->sps.data_prod.handle;
1149 else
1150 data_pipe_handle = info->sps.data_cons.handle;
1151
1152 if (ops->mode == MTD_OPS_RAW) {
1153 sectordatasize = chip->cw_size;
1154 if (!args->read)
1155 sps_flags = SPS_IOVEC_FLAG_EOT;
1156 if (curr_cw == (args->cwperpage - 1))
1157 sps_flags |= SPS_IOVEC_FLAG_INT;
1158
1159 err = sps_transfer_one(data_pipe_handle,
1160 args->data_dma_addr_curr,
1161 sectordatasize, NULL,
1162 sps_flags);
1163 if (err)
1164 goto out;
1165 args->data_dma_addr_curr += sectordatasize;
1166
1167 } else if (ops->mode == MTD_OPS_AUTO_OOB) {
1168 if (ops->datbuf) {
1169 sectordatasize = (curr_cw < (args->cwperpage - 1))
1170 ? 516 : (512 - ((args->cwperpage - 1) << 2));
1171
1172 if (!args->read) {
1173 sps_flags = SPS_IOVEC_FLAG_EOT;
1174 if (curr_cw == (args->cwperpage - 1) &&
1175 ops->oobbuf)
1176 sps_flags = 0;
1177 }
1178 if ((curr_cw == (args->cwperpage - 1)) && !ops->oobbuf)
1179 sps_flags |= SPS_IOVEC_FLAG_INT;
1180
1181 err = sps_transfer_one(data_pipe_handle,
1182 args->data_dma_addr_curr,
1183 sectordatasize, NULL,
1184 sps_flags);
1185 if (err)
1186 goto out;
1187 args->data_dma_addr_curr += sectordatasize;
1188 }
1189
1190 if (ops->oobbuf && (curr_cw == (args->cwperpage - 1))) {
1191 sectoroobsize = args->cwperpage << 2;
1192 if (sectoroobsize > args->oob_len_data)
1193 sectoroobsize = args->oob_len_data;
1194
1195 if (!args->read)
1196 sps_flags |= SPS_IOVEC_FLAG_EOT;
1197 sps_flags |= SPS_IOVEC_FLAG_INT;
1198 err = sps_transfer_one(data_pipe_handle,
1199 args->oob_dma_addr_curr,
1200 sectoroobsize, NULL,
1201 sps_flags);
1202 if (err)
1203 goto out;
1204 args->oob_dma_addr_curr += sectoroobsize;
1205 args->oob_len_data -= sectoroobsize;
1206 }
1207 }
1208out:
1209 return err;
1210}
1211
1212/*
1213 * Function that gets called from upper layers such as MTD/YAFFS2 to read a
1214 * page with main or/and spare data.
1215 */
1216static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from,
1217 struct mtd_oob_ops *ops)
1218{
1219 struct msm_nand_info *info = mtd->priv;
1220 struct msm_nand_chip *chip = &info->nand_chip;
1221 uint32_t cwperpage = (mtd->writesize >> 9);
1222 int err, pageerr = 0, rawerr = 0;
1223 uint32_t n = 0, pages_read = 0;
1224 uint32_t ecc_errors = 0, total_ecc_errors = 0;
1225 struct msm_nand_rw_params rw_params;
1226 struct msm_nand_rw_reg_data data;
1227 struct msm_nand_sps_cmd *cmd, *curr_cmd;
1228 struct sps_iovec *iovec;
1229 /*
1230 * The following 6 commands will be sent only once for the first
1231 * codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
1232 * dev0_ecc_cfg, ebi2_ecc_buf_cfg. The following 6 commands will
1233 * be sent for every CW - flash, read_location_0, read_location_1,
1234 * exec, flash_status and buffer_status.
1235 */
1236 uint32_t total_cnt = (6 * cwperpage) + 6;
1237 struct {
1238 struct sps_transfer xfer;
1239 struct sps_iovec cmd_iovec[total_cnt];
1240 struct msm_nand_sps_cmd cmd[total_cnt];
1241 struct {
1242 uint32_t flash_status;
1243 uint32_t buffer_status;
1244 } result[cwperpage];
1245 } *dma_buffer;
1246
1247 memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
1248 err = msm_nand_validate_mtd_params(mtd, true, from, ops, &rw_params);
1249 if (err)
1250 goto validate_mtd_params_failed;
1251
1252 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
1253 chip, sizeof(*dma_buffer))));
1254
1255 rw_params.oob_col = rw_params.start_sector * chip->cw_size;
1256 if (chip->cfg1 & (1 << WIDE_FLASH))
1257 rw_params.oob_col >>= 1;
1258
1259 memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
1260 msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
1261
1262 while (rw_params.page_count-- > 0) {
1263 data.addr0 = (rw_params.page << 16) | rw_params.oob_col;
1264 data.addr1 = (rw_params.page >> 16) & 0xff;
1265 cmd = dma_buffer->cmd;
1266 for (n = rw_params.start_sector; n < cwperpage; n++) {
1267 dma_buffer->result[n].flash_status = 0xeeeeeeee;
1268 dma_buffer->result[n].buffer_status = 0xeeeeeeee;
1269
1270 curr_cmd = cmd;
1271 msm_nand_prep_rw_cmd_desc(ops, &rw_params,
1272 &data, info, n, &curr_cmd);
1273
1274 cmd = curr_cmd;
1275 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
1276 READ, msm_virt_to_dma(chip,
1277 &dma_buffer->result[n].flash_status), 0);
1278 cmd++;
1279
1280 msm_nand_prep_ce(cmd, MSM_NAND_BUFFER_STATUS(info),
1281 READ, msm_virt_to_dma(chip,
1282 &dma_buffer->result[n].buffer_status),
1283 ((n == (cwperpage - 1)) ?
1284 (SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT) :
1285 0));
1286 cmd++;
1287 }
1288
1289 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
1290 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
1291 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
1292 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
1293 &dma_buffer->cmd_iovec);
1294 iovec = dma_buffer->xfer.iovec;
1295
1296 for (n = 0; n < dma_buffer->xfer.iovec_count; n++) {
1297 iovec->addr = msm_virt_to_dma(chip,
1298 &dma_buffer->cmd[n].ce);
1299 iovec->size = sizeof(struct sps_command_element);
1300 iovec->flags = dma_buffer->cmd[n].flags;
1301 iovec++;
1302 }
1303 mutex_lock(&info->bam_lock);
1304 /* Submit data descriptors */
1305 for (n = rw_params.start_sector; n < cwperpage; n++) {
1306 err = msm_nand_submit_rw_data_desc(ops,
1307 &rw_params, info, n);
1308 if (err) {
1309 pr_err("Failed to submit data descs %d\n", err);
1310 mutex_unlock(&info->bam_lock);
1311 goto free_dma;
1312 }
1313 }
1314 /* Submit command descriptors */
1315 err = sps_transfer(info->sps.cmd_pipe.handle,
1316 &dma_buffer->xfer);
1317 if (err) {
1318 pr_err("Failed to submit commands %d\n", err);
1319 mutex_unlock(&info->bam_lock);
1320 goto free_dma;
1321 }
1322 wait_for_completion_io(&info->sps.cmd_pipe.completion);
1323 wait_for_completion_io(&info->sps.data_prod.completion);
1324 mutex_unlock(&info->bam_lock);
1325 /* Check for flash status errors */
1326 pageerr = rawerr = 0;
1327 for (n = rw_params.start_sector; n < cwperpage; n++) {
1328 if (dma_buffer->result[n].flash_status & (FS_OP_ERR |
1329 FS_MPU_ERR)) {
1330 rawerr = -EIO;
1331 break;
1332 }
1333 }
1334 /* Check for ECC correction on empty block */
1335 if (rawerr && ops->datbuf && ops->mode != MTD_OPS_RAW) {
1336 uint8_t *datbuf = ops->datbuf +
1337 pages_read * mtd->writesize;
1338
1339 dma_sync_single_for_cpu(chip->dev,
1340 rw_params.data_dma_addr_curr - mtd->writesize,
1341 mtd->writesize, DMA_BIDIRECTIONAL);
1342
1343 for (n = 0; n < mtd->writesize; n++) {
1344 /* TODO: check offset for 4bit BCHECC */
1345 if ((n % 516 == 3 || n % 516 == 175)
1346 && datbuf[n] == 0x54)
1347 datbuf[n] = 0xff;
1348 if (datbuf[n] != 0xff) {
1349 pageerr = rawerr;
1350 break;
1351 }
1352 }
1353
1354 dma_sync_single_for_device(chip->dev,
1355 rw_params.data_dma_addr_curr - mtd->writesize,
1356 mtd->writesize, DMA_BIDIRECTIONAL);
1357 }
1358 if (rawerr && ops->oobbuf) {
1359 dma_sync_single_for_cpu(chip->dev,
1360 rw_params.oob_dma_addr_curr - (ops->ooblen -
1361 rw_params.oob_len_data),
1362 ops->ooblen - rw_params.oob_len_data,
1363 DMA_BIDIRECTIONAL);
1364
1365 for (n = 0; n < ops->ooblen; n++) {
1366 if (ops->oobbuf[n] != 0xff) {
1367 pageerr = rawerr;
1368 break;
1369 }
1370 }
1371
1372 dma_sync_single_for_device(chip->dev,
1373 rw_params.oob_dma_addr_curr - (ops->ooblen -
1374 rw_params.oob_len_data),
1375 ops->ooblen - rw_params.oob_len_data,
1376 DMA_BIDIRECTIONAL);
1377 }
1378 /* check for uncorrectable errors */
1379 if (pageerr) {
1380 for (n = rw_params.start_sector; n < cwperpage; n++) {
1381 if (dma_buffer->result[n].buffer_status &
1382 BS_UNCORRECTABLE_BIT) {
1383 mtd->ecc_stats.failed++;
1384 pageerr = -EBADMSG;
1385 break;
1386 }
1387 }
1388 }
1389 /* check for correctable errors */
1390 if (!rawerr) {
1391 for (n = rw_params.start_sector; n < cwperpage; n++) {
1392 ecc_errors =
1393 dma_buffer->result[n].buffer_status
1394 & BS_CORRECTABLE_ERR_MSK;
1395 if (ecc_errors) {
1396 total_ecc_errors += ecc_errors;
1397 mtd->ecc_stats.corrected += ecc_errors;
1398 /*
1399 * For Micron devices it is observed
1400 * that correctable errors upto 3 bits
1401 * are very common.
1402 */
1403 if (ecc_errors > 3)
1404 pageerr = -EUCLEAN;
1405 }
1406 }
1407 }
1408 if (pageerr && (pageerr != -EUCLEAN || err == 0))
1409 err = pageerr;
1410
1411 if (rawerr && !pageerr) {
1412 pr_debug("%llx %x %x empty page\n",
1413 (loff_t)rw_params.page * mtd->writesize,
1414 ops->len, ops->ooblen);
1415 } else {
1416 for (n = rw_params.start_sector; n < cwperpage; n++)
1417 pr_debug("cw %d: flash_sts %x buffr_sts %x\n",
1418 n, dma_buffer->result[n].flash_status,
1419 dma_buffer->result[n].buffer_status);
1420 }
1421 if (err && err != -EUCLEAN && err != -EBADMSG)
1422 goto free_dma;
1423 pages_read++;
1424 rw_params.page++;
1425 }
1426free_dma:
1427 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
1428 if (ops->oobbuf)
1429 dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
1430 ops->ooblen, DMA_FROM_DEVICE);
1431 if (ops->datbuf)
1432 dma_unmap_page(chip->dev, rw_params.data_dma_addr,
1433 ops->len, DMA_BIDIRECTIONAL);
1434validate_mtd_params_failed:
1435 if (ops->mode != MTD_OPS_RAW)
1436 ops->retlen = mtd->writesize * pages_read;
1437 else
1438 ops->retlen = (mtd->writesize + mtd->oobsize) * pages_read;
1439 ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
1440 if (err)
1441 pr_err("0x%llx datalen 0x%x ooblen %x err %d corrected %d\n",
1442 from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
1443 total_ecc_errors);
1444 pr_debug("ret %d, retlen %d oobretlen %d\n",
1445 err, ops->retlen, ops->oobretlen);
1446
1447 pr_debug("========================================================\n");
1448 return err;
1449}
1450
1451/*
1452 * Function that gets called from upper layers such as MTD/YAFFS2 to read a
1453 * page with only main data.
1454 */
1455static int msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1456 size_t *retlen, u_char *buf)
1457{
1458 int ret;
1459 struct mtd_oob_ops ops;
1460
1461 ops.mode = MTD_OPS_PLACE_OOB;
1462 ops.len = len;
1463 ops.retlen = 0;
1464 ops.ooblen = 0;
1465 ops.datbuf = buf;
1466 ops.oobbuf = NULL;
1467 ret = msm_nand_read_oob(mtd, from, &ops);
1468 *retlen = ops.retlen;
1469 return ret;
1470}
1471
1472/*
1473 * Function that gets called from upper layers such as MTD/YAFFS2 to write a
1474 * page with both main and spare data.
1475 */
1476static int msm_nand_write_oob(struct mtd_info *mtd, loff_t to,
1477 struct mtd_oob_ops *ops)
1478{
1479 struct msm_nand_info *info = mtd->priv;
1480 struct msm_nand_chip *chip = &info->nand_chip;
1481 uint32_t cwperpage = (mtd->writesize >> 9);
1482 uint32_t n, flash_sts, pages_written = 0;
1483 int err = 0;
1484 struct msm_nand_rw_params rw_params;
1485 struct msm_nand_rw_reg_data data;
1486 struct msm_nand_sps_cmd *cmd, *curr_cmd;
1487 struct sps_iovec *iovec;
1488 /*
1489 * The following 7 commands will be sent only once :
1490 * For first codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
1491 * dev0_ecc_cfg, ebi2_ecc_buf_cfg.
1492 * For last codeword (CW) - read_status(write)
1493 *
1494 * The following 4 commands will be sent for every CW :
1495 * flash, exec, flash_status (read), flash_status (write).
1496 */
1497 uint32_t total_cnt = (4 * cwperpage) + 7;
1498 struct {
1499 struct sps_transfer xfer;
1500 struct sps_iovec cmd_iovec[total_cnt];
1501 struct msm_nand_sps_cmd cmd[total_cnt];
1502 struct {
1503 uint32_t flash_status[cwperpage];
1504 } data;
1505 } *dma_buffer;
1506
1507 memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
1508 err = msm_nand_validate_mtd_params(mtd, false, to, ops, &rw_params);
1509 if (err)
1510 goto validate_mtd_params_failed;
1511
1512 wait_event(chip->dma_wait_queue, (dma_buffer =
1513 msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
1514
1515 memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
1516 msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
1517
1518 while (rw_params.page_count-- > 0) {
1519 data.addr0 = (rw_params.page << 16);
1520 data.addr1 = (rw_params.page >> 16) & 0xff;
1521 cmd = dma_buffer->cmd;
1522
1523 for (n = 0; n < cwperpage ; n++) {
1524 dma_buffer->data.flash_status[n] = 0xeeeeeeee;
1525
1526 curr_cmd = cmd;
1527 msm_nand_prep_rw_cmd_desc(ops, &rw_params,
1528 &data, info, n, &curr_cmd);
1529
1530 cmd = curr_cmd;
1531 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
1532 READ, msm_virt_to_dma(chip,
1533 &dma_buffer->data.flash_status[n]), 0);
1534 cmd++;
1535
1536 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
1537 WRITE, data.clrfstatus, 0);
1538 cmd++;
1539
1540 if (n == (cwperpage - 1)) {
1541 msm_nand_prep_ce(cmd,
1542 MSM_NAND_READ_STATUS(info), WRITE,
1543 data.clrrstatus, SPS_IOVEC_FLAG_UNLOCK
1544 | SPS_IOVEC_FLAG_INT);
1545 cmd++;
1546 }
1547 }
1548
1549 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
1550 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
1551 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
1552 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
1553 &dma_buffer->cmd_iovec);
1554 iovec = dma_buffer->xfer.iovec;
1555
1556 for (n = 0; n < dma_buffer->xfer.iovec_count; n++) {
1557 iovec->addr = msm_virt_to_dma(chip,
1558 &dma_buffer->cmd[n].ce);
1559 iovec->size = sizeof(struct sps_command_element);
1560 iovec->flags = dma_buffer->cmd[n].flags;
1561 iovec++;
1562 }
1563 mutex_lock(&info->bam_lock);
1564 /* Submit data descriptors */
1565 for (n = 0; n < cwperpage; n++) {
1566 err = msm_nand_submit_rw_data_desc(ops,
1567 &rw_params, info, n);
1568 if (err) {
1569 pr_err("Failed to submit data descs %d\n", err);
1570 mutex_unlock(&info->bam_lock);
1571 goto free_dma;
1572 }
1573 }
1574 /* Submit command descriptors */
1575 err = sps_transfer(info->sps.cmd_pipe.handle,
1576 &dma_buffer->xfer);
1577 if (err) {
1578 pr_err("Failed to submit commands %d\n", err);
1579 mutex_unlock(&info->bam_lock);
1580 goto free_dma;
1581 }
1582 wait_for_completion_io(&info->sps.cmd_pipe.completion);
1583 wait_for_completion_io(&info->sps.data_cons.completion);
1584 mutex_unlock(&info->bam_lock);
1585
1586 for (n = 0; n < cwperpage; n++)
1587 pr_debug("write pg %d: flash_status[%d] = %x\n",
1588 rw_params.page, n,
1589 dma_buffer->data.flash_status[n]);
1590
1591 /* Check for flash status errors */
1592 for (n = 0; n < cwperpage; n++) {
1593 flash_sts = dma_buffer->data.flash_status[n];
1594 if (flash_sts & (FS_OP_ERR | FS_MPU_ERR)) {
1595 pr_err("MPU/OP err (0x%x) set\n", flash_sts);
1596 err = -EIO;
1597 goto free_dma;
1598 }
1599 if (n == (cwperpage - 1)) {
1600 if (!(flash_sts & FS_DEVICE_WP) ||
1601 (flash_sts & FS_DEVICE_STS_ERR)) {
1602 pr_err("Dev sts err 0x%x\n", flash_sts);
1603 err = -EIO;
1604 goto free_dma;
1605 }
1606 }
1607 }
1608 pages_written++;
1609 rw_params.page++;
1610 }
1611free_dma:
1612 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
1613 if (ops->oobbuf)
1614 dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
1615 ops->ooblen, DMA_TO_DEVICE);
1616 if (ops->datbuf)
1617 dma_unmap_page(chip->dev, rw_params.data_dma_addr,
1618 ops->len, DMA_TO_DEVICE);
1619validate_mtd_params_failed:
1620 if (ops->mode != MTD_OPS_RAW)
1621 ops->retlen = mtd->writesize * pages_written;
1622 else
1623 ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
1624
1625 ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
1626 if (err)
1627 pr_err("to %llx datalen %x ooblen %x failed with err %d\n",
1628 to, ops->len, ops->ooblen, err);
1629 pr_debug("ret %d, retlen %d oobretlen %d\n",
1630 err, ops->retlen, ops->oobretlen);
1631
1632 pr_debug("================================================\n");
1633 return err;
1634}
1635
1636/*
1637 * Function that gets called from upper layers such as MTD/YAFFS2 to write a
1638 * page with only main data.
1639 */
1640static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
1641 size_t *retlen, const u_char *buf)
1642{
1643 int ret;
1644 struct mtd_oob_ops ops;
1645
1646 ops.mode = MTD_OPS_PLACE_OOB;
1647 ops.len = len;
1648 ops.retlen = 0;
1649 ops.ooblen = 0;
1650 ops.datbuf = (uint8_t *)buf;
1651 ops.oobbuf = NULL;
1652 ret = msm_nand_write_oob(mtd, to, &ops);
1653 *retlen = ops.retlen;
1654 return ret;
1655}
1656
1657/*
1658 * Structure that contains NANDc register data for commands required
1659 * for Erase operation.
1660 */
1661struct msm_nand_erase_reg_data {
1662 struct msm_nand_common_cfgs cfg;
1663 uint32_t exec;
1664 uint32_t flash_status;
1665 uint32_t clrfstatus;
1666 uint32_t clrrstatus;
1667};
1668
1669/*
1670 * Function that gets called from upper layers such as MTD/YAFFS2 to erase a
1671 * block within NAND device.
1672 */
1673static int msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
1674{
1675 int i, err = 0;
1676 struct msm_nand_info *info = mtd->priv;
1677 struct msm_nand_chip *chip = &info->nand_chip;
1678 uint32_t page = 0;
1679 struct msm_nand_sps_cmd *cmd, *curr_cmd;
1680 struct msm_nand_erase_reg_data data;
1681 struct sps_iovec *iovec;
1682 uint32_t total_cnt = 9;
1683 /*
1684 * The following 9 commands are required to erase a page -
1685 * flash, addr0, addr1, cfg0, cfg1, exec, flash_status(read),
1686 * flash_status(write), read_status.
1687 */
1688 struct {
1689 struct sps_transfer xfer;
1690 struct sps_iovec cmd_iovec[total_cnt];
1691 struct msm_nand_sps_cmd cmd[total_cnt];
1692 uint32_t flash_status;
1693 } *dma_buffer;
1694
1695 if (mtd->writesize == PAGE_SIZE_2K)
1696 page = instr->addr >> 11;
1697
1698 if (mtd->writesize == PAGE_SIZE_4K)
1699 page = instr->addr >> 12;
1700
1701 if (instr->addr & (mtd->erasesize - 1)) {
1702 pr_err("unsupported erase address, 0x%llx\n", instr->addr);
1703 err = -EINVAL;
1704 goto out;
1705 }
1706 if (instr->len != mtd->erasesize) {
1707 pr_err("unsupported erase len, %lld\n", instr->len);
1708 err = -EINVAL;
1709 goto out;
1710 }
1711
1712 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
1713 chip, sizeof(*dma_buffer))));
1714 cmd = dma_buffer->cmd;
1715
1716 memset(&data, 0, sizeof(struct msm_nand_erase_reg_data));
1717 data.cfg.cmd = MSM_NAND_CMD_BLOCK_ERASE;
1718 data.cfg.addr0 = page;
1719 data.cfg.addr1 = 0;
1720 data.cfg.cfg0 = chip->cfg0 & (~(7 << CW_PER_PAGE));
1721 data.cfg.cfg1 = chip->cfg1;
1722 data.exec = 1;
1723 dma_buffer->flash_status = 0xeeeeeeee;
1724 data.clrfstatus = MSM_NAND_RESET_FLASH_STS;
1725 data.clrrstatus = MSM_NAND_RESET_READ_STS;
1726
1727 curr_cmd = cmd;
1728 msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
1729
1730 cmd = curr_cmd;
1731 msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data.exec,
1732 SPS_IOVEC_FLAG_NWD);
1733 cmd++;
1734
1735 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
1736 msm_virt_to_dma(chip, &dma_buffer->flash_status), 0);
1737 cmd++;
1738
1739 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), WRITE,
1740 data.clrfstatus, 0);
1741 cmd++;
1742
1743 msm_nand_prep_ce(cmd, MSM_NAND_READ_STATUS(info), WRITE,
1744 data.clrrstatus,
1745 SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
1746 cmd++;
1747
1748 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
1749 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
1750 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
1751 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
1752 &dma_buffer->cmd_iovec);
1753 iovec = dma_buffer->xfer.iovec;
1754
1755 for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
1756 iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
1757 iovec->size = sizeof(struct sps_command_element);
1758 iovec->flags = dma_buffer->cmd[i].flags;
1759 iovec++;
1760 }
1761 mutex_lock(&info->bam_lock);
1762 err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
1763 if (err) {
1764 pr_err("Failed to submit commands %d\n", err);
1765 mutex_unlock(&info->bam_lock);
1766 goto free_dma;
1767 }
1768 wait_for_completion_io(&info->sps.cmd_pipe.completion);
1769 mutex_unlock(&info->bam_lock);
1770
1771 /* Check for flash status errors */
1772 if (dma_buffer->flash_status & (FS_OP_ERR |
1773 FS_MPU_ERR | FS_DEVICE_STS_ERR)) {
1774 pr_err("MPU/OP/DEV err (0x%x) set\n", dma_buffer->flash_status);
1775 err = -EIO;
1776 }
1777 if (!(dma_buffer->flash_status & FS_DEVICE_WP)) {
1778 pr_err("Device is write protected\n");
1779 err = -EIO;
1780 }
1781 if (err) {
1782 pr_err("Erase failed, 0x%llx\n", instr->addr);
1783 instr->fail_addr = instr->addr;
1784 instr->state = MTD_ERASE_FAILED;
1785 } else {
1786 instr->state = MTD_ERASE_DONE;
1787 instr->fail_addr = 0xffffffff;
1788 mtd_erase_callback(instr);
1789 }
1790free_dma:
1791 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
1792out:
1793 return err;
1794}
1795
1796/*
1797 * Structure that contains NANDc register data for commands required
1798 * for checking if a block is bad.
1799 */
1800struct msm_nand_blk_isbad_data {
1801 struct msm_nand_common_cfgs cfg;
1802 uint32_t ecc_bch_cfg;
1803 uint32_t exec;
1804 uint32_t read_offset;
1805};
1806
1807/*
1808 * Function that gets called from upper layers such as MTD/YAFFS2 to check if
1809 * a block is bad. This is done by reading the first page within a block and
1810 * checking whether the bad block byte location contains 0xFF or not. If it
1811 * doesn't contain 0xFF, then it is considered as bad block.
1812 */
1813static int msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
1814{
1815 struct msm_nand_info *info = mtd->priv;
1816 struct msm_nand_chip *chip = &info->nand_chip;
1817 int i, ret = 0, bad_block = 0;
1818 uint8_t *buf;
1819 uint32_t page = 0, rdata, cwperpage;
1820 struct msm_nand_sps_cmd *cmd, *curr_cmd;
1821 struct msm_nand_blk_isbad_data data;
1822 struct sps_iovec *iovec;
1823 uint32_t total_cnt = 9;
1824 /*
1825 * The following 9 commands are required to check bad block -
1826 * flash, addr0, addr1, cfg0, cfg1, ecc_cfg, read_loc_0,
1827 * exec, flash_status(read).
1828 */
1829 struct {
1830 struct sps_transfer xfer;
1831 struct sps_iovec cmd_iovec[total_cnt];
1832 struct msm_nand_sps_cmd cmd[total_cnt];
1833 uint32_t flash_status;
1834 } *dma_buffer;
1835
1836 if (mtd->writesize == PAGE_SIZE_2K)
1837 page = ofs >> 11;
1838
1839 if (mtd->writesize == PAGE_SIZE_4K)
1840 page = ofs >> 12;
1841
1842 cwperpage = (mtd->writesize >> 9);
1843
1844 if (ofs > mtd->size) {
1845 pr_err("Invalid offset 0x%llx\n", ofs);
1846 bad_block = -EINVAL;
1847 goto out;
1848 }
1849 if (ofs & (mtd->erasesize - 1)) {
1850 pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
1851 bad_block = -EINVAL;
1852 goto out;
1853 }
1854
1855 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
1856 chip , sizeof(*dma_buffer) + 4)));
1857 buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
1858
1859 cmd = dma_buffer->cmd;
1860 memset(&data, 0, sizeof(struct msm_nand_erase_reg_data));
1861 data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
1862 data.cfg.cfg0 = chip->cfg0_raw & ~(7U << CW_PER_PAGE);
1863 data.cfg.cfg1 = chip->cfg1_raw;
1864
1865 if (chip->cfg1 & (1 << WIDE_FLASH))
1866 data.cfg.addr0 = (page << 16) |
1867 ((chip->cw_size * (cwperpage-1)) >> 1);
1868 else
1869 data.cfg.addr0 = (page << 16) |
1870 (chip->cw_size * (cwperpage-1));
1871
1872 data.cfg.addr1 = (page >> 16) & 0xff;
1873 data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
1874 data.exec = 1;
1875 data.read_offset = (mtd->writesize - (chip->cw_size * (cwperpage-1)));
1876 dma_buffer->flash_status = 0xeeeeeeee;
1877
1878 curr_cmd = cmd;
1879 msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
1880
1881 cmd = curr_cmd;
1882 msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
1883 data.ecc_bch_cfg, 0);
1884 cmd++;
1885
1886 rdata = (data.read_offset << 0) | (4 << 16) | (1 << 31);
1887 msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE, rdata, 0);
1888 cmd++;
1889
1890 msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
1891 data.exec, SPS_IOVEC_FLAG_NWD);
1892 cmd++;
1893
1894 msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
1895 msm_virt_to_dma(chip, &dma_buffer->flash_status),
1896 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_UNLOCK);
1897 cmd++;
1898
1899 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
1900 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
1901 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
1902 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
1903 &dma_buffer->cmd_iovec);
1904 iovec = dma_buffer->xfer.iovec;
1905
1906 for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
1907 iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
1908 iovec->size = sizeof(struct sps_command_element);
1909 iovec->flags = dma_buffer->cmd[i].flags;
1910 iovec++;
1911 }
1912 mutex_lock(&info->bam_lock);
1913 /* Submit data descriptor */
1914 ret = sps_transfer_one(info->sps.data_prod.handle,
1915 msm_virt_to_dma(chip, buf),
1916 4, NULL, SPS_IOVEC_FLAG_INT);
1917
1918 if (ret) {
1919 pr_err("Failed to submit data desc %d\n", ret);
1920 mutex_unlock(&info->bam_lock);
1921 goto free_dma;
1922 }
1923 /* Submit command descriptor */
1924 ret = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
1925 if (ret) {
1926 pr_err("Failed to submit commands %d\n", ret);
1927 mutex_unlock(&info->bam_lock);
1928 goto free_dma;
1929 }
1930 wait_for_completion_io(&info->sps.cmd_pipe.completion);
1931 wait_for_completion_io(&info->sps.data_prod.completion);
1932 mutex_unlock(&info->bam_lock);
1933
1934 /* Check for flash status errors */
1935 if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
1936 pr_err("MPU/OP err set: %x\n", dma_buffer->flash_status);
1937 bad_block = -EIO;
1938 goto free_dma;
1939 }
1940
1941 /* Check for bad block marker byte */
1942 if (chip->cfg1 & (1 << WIDE_FLASH)) {
1943 if (buf[0] != 0xFF || buf[1] != 0xFF)
1944 bad_block = 1;
1945 } else {
1946 if (buf[0] != 0xFF)
1947 bad_block = 1;
1948 }
1949free_dma:
1950 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4);
1951out:
1952 return ret ? ret : bad_block;
1953}
1954
1955/*
1956 * Function that gets called from upper layers such as MTD/YAFFS2 to mark a
1957 * block as bad. This is done by writing the first page within a block with 0,
1958 * thus setting the bad block byte location as well to 0.
1959 */
1960static int msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
1961{
1962 struct mtd_oob_ops ops;
1963 int ret;
1964 uint8_t *buf;
1965 size_t len;
1966
1967 if (ofs > mtd->size) {
1968 pr_err("Invalid offset 0x%llx\n", ofs);
1969 ret = -EINVAL;
1970 goto out;
1971 }
1972 if (ofs & (mtd->erasesize - 1)) {
1973 pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
1974 ret = -EINVAL;
1975 goto out;
1976 }
1977 len = mtd->writesize + mtd->oobsize;
1978 buf = kzalloc(len, GFP_KERNEL);
1979 if (!buf) {
1980 pr_err("unable to allocate memory for 0x%x size\n", len);
1981 ret = -ENOMEM;
1982 goto out;
1983 }
1984 ops.mode = MTD_OPS_RAW;
1985 ops.len = len;
1986 ops.retlen = 0;
1987 ops.ooblen = 0;
1988 ops.datbuf = buf;
1989 ops.oobbuf = NULL;
1990 ret = msm_nand_write_oob(mtd, ofs, &ops);
1991 kfree(buf);
1992out:
1993 return ret;
1994}
1995
1996/*
1997 * Function that scans for the attached NAND device. This fills out all
1998 * the uninitialized function pointers with the defaults. The flash ID is
1999 * read and the mtd/chip structures are filled with the appropriate values.
2000 */
2001int msm_nand_scan(struct mtd_info *mtd)
2002{
2003 struct msm_nand_info *info = mtd->priv;
2004 struct msm_nand_chip *chip = &info->nand_chip;
2005 struct flash_identification *supported_flash = &info->flash_dev;
2006 int flash_id = 0, err = 0;
2007 uint32_t i, mtd_writesize;
2008 uint8_t dev_found = 0, wide_bus;
2009 uint32_t manid, devid, devcfg;
2010 uint32_t bad_block_byte;
2011 struct nand_flash_dev *flashdev = NULL;
2012 struct nand_manufacturers *flashman = NULL;
2013
2014 /* Probe the Flash device for ONFI compliance */
2015 if (!msm_nand_flash_onfi_probe(info)) {
2016 dev_found = 1;
2017 } else {
2018 err = msm_nand_flash_read_id(info, 0, &flash_id);
2019 if (err < 0) {
2020 pr_err("Failed to read Flash ID\n");
2021 err = -EINVAL;
2022 goto out;
2023 }
2024 manid = flash_id & 0xFF;
2025 devid = (flash_id >> 8) & 0xFF;
2026 devcfg = (flash_id >> 24) & 0xFF;
2027
2028 for (i = 0; !flashman && nand_manuf_ids[i].id; ++i)
2029 if (nand_manuf_ids[i].id == manid)
2030 flashman = &nand_manuf_ids[i];
2031 for (i = 0; !flashdev && nand_flash_ids[i].id; ++i)
2032 if (nand_flash_ids[i].id == devid)
2033 flashdev = &nand_flash_ids[i];
2034 if (!flashdev || !flashman) {
2035 pr_err("unknown nand flashid=%x manuf=%x devid=%x\n",
2036 flash_id, manid, devid);
2037 err = -ENOENT;
2038 goto out;
2039 }
2040 dev_found = 1;
2041 if (!flashdev->pagesize) {
2042 supported_flash->widebus = devcfg & (1 << 6) ? 1 : 0;
2043 supported_flash->pagesize = 1024 << (devcfg & 0x3);
2044 supported_flash->blksize = (64 * 1024) <<
2045 ((devcfg >> 4) & 0x3);
2046 supported_flash->oobsize = (8 << ((devcfg >> 2) & 1)) *
2047 (supported_flash->pagesize >> 9);
2048 } else {
2049 supported_flash->widebus = flashdev->options &
2050 NAND_BUSWIDTH_16 ? 1 : 0;
2051 supported_flash->pagesize = flashdev->pagesize;
2052 supported_flash->blksize = flashdev->erasesize;
2053 supported_flash->oobsize = flashdev->pagesize >> 5;
2054 }
2055 supported_flash->flash_id = flash_id;
2056 supported_flash->density = flashdev->chipsize << 20;
2057 }
2058
2059 if (dev_found) {
2060 wide_bus = supported_flash->widebus;
2061 mtd->size = supported_flash->density;
2062 mtd->writesize = supported_flash->pagesize;
2063 mtd->oobsize = supported_flash->oobsize;
2064 mtd->erasesize = supported_flash->blksize;
2065 mtd_writesize = mtd->writesize;
2066
2067 /* Check whether NAND device support 8bit ECC*/
2068 if (supported_flash->ecc_correctability >= 8)
2069 chip->bch_caps = MSM_NAND_CAP_8_BIT_BCH;
2070 else
2071 chip->bch_caps = MSM_NAND_CAP_4_BIT_BCH;
2072
2073 pr_info("NAND Id: 0x%x Buswidth: %dBits Density: %lld MByte\n",
2074 supported_flash->flash_id, (wide_bus) ? 16 : 8,
2075 (mtd->size >> 20));
2076 pr_info("pagesize: %d Erasesize: %d oobsize: %d (in Bytes)\n",
2077 mtd->writesize, mtd->erasesize, mtd->oobsize);
2078 pr_info("BCH ECC: %d Bit\n",
2079 (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH ? 8 : 4));
2080 }
2081
2082 chip->cw_size = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ? 532 : 528;
2083 chip->cfg0 = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
2084 | (516 << UD_SIZE_BYTES)
2085 | (0 << DISABLE_STATUS_AFTER_WRITE)
2086 | (5 << NUM_ADDR_CYCLES);
2087
2088 bad_block_byte = (mtd_writesize - (chip->cw_size * (
2089 (mtd_writesize >> 9) - 1)) + 1);
2090 chip->cfg1 = (7 << NAND_RECOVERY_CYCLES)
2091 | (0 << CS_ACTIVE_BSY)
2092 | (bad_block_byte << BAD_BLOCK_BYTE_NUM)
2093 | (0 << BAD_BLOCK_IN_SPARE_AREA)
2094 | (2 << WR_RD_BSY_GAP)
2095 | ((wide_bus ? 1 : 0) << WIDE_FLASH)
2096 | (1 << ENABLE_BCH_ECC);
2097
2098 chip->cfg0_raw = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
2099 | (5 << NUM_ADDR_CYCLES)
2100 | (0 << SPARE_SIZE_BYTES)
2101 | (chip->cw_size << UD_SIZE_BYTES);
2102
2103 chip->cfg1_raw = (7 << NAND_RECOVERY_CYCLES)
2104 | (0 << CS_ACTIVE_BSY)
2105 | (17 << BAD_BLOCK_BYTE_NUM)
2106 | (1 << BAD_BLOCK_IN_SPARE_AREA)
2107 | (2 << WR_RD_BSY_GAP)
2108 | ((wide_bus ? 1 : 0) << WIDE_FLASH)
2109 | (1 << DEV0_CFG1_ECC_DISABLE);
2110
2111 chip->ecc_bch_cfg = (0 << ECC_CFG_ECC_DISABLE)
2112 | (0 << ECC_SW_RESET)
2113 | (516 << ECC_NUM_DATA_BYTES)
2114 | (1 << ECC_FORCE_CLK_OPEN);
2115
2116 if (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) {
2117 chip->cfg0 |= (wide_bus ? 0 << SPARE_SIZE_BYTES :
2118 2 << SPARE_SIZE_BYTES);
2119 chip->ecc_bch_cfg |= (1 << ECC_MODE)
2120 | ((wide_bus) ? (14 << ECC_PARITY_SIZE_BYTES) :
2121 (13 << ECC_PARITY_SIZE_BYTES));
2122 } else {
2123 chip->cfg0 |= (wide_bus ? 2 << SPARE_SIZE_BYTES :
2124 4 << SPARE_SIZE_BYTES);
2125 chip->ecc_bch_cfg |= (0 << ECC_MODE)
2126 | ((wide_bus) ? (8 << ECC_PARITY_SIZE_BYTES) :
2127 (7 << ECC_PARITY_SIZE_BYTES));
2128 }
2129
2130 /*
2131 * For 4bit BCH ECC (default ECC), parity bytes = 7(x8) or 8(x16 I/O)
2132 * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O).
2133 */
2134 chip->ecc_parity_bytes = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ?
2135 (wide_bus ? 14 : 13) : (wide_bus ? 8 : 7);
2136 chip->ecc_buf_cfg = 0x203; /* No of bytes covered by ECC - 516 bytes */
2137
2138 pr_info("CFG0: 0x%08x, CFG1: 0x%08x\n"
2139 " RAWCFG0: 0x%08x, RAWCFG1: 0x%08x\n"
2140 " ECCBUFCFG: 0x%08x, ECCBCHCFG: 0x%08x\n"
2141 " BAD BLOCK BYTE: 0x%08x\n", chip->cfg0, chip->cfg1,
2142 chip->cfg0_raw, chip->cfg1_raw, chip->ecc_buf_cfg,
2143 chip->ecc_bch_cfg, bad_block_byte);
2144
2145 if (mtd->oobsize == 64) {
2146 mtd->oobavail = 16;
2147 } else if ((mtd->oobsize == 128) || (mtd->oobsize == 224)) {
2148 mtd->oobavail = 32;
2149 } else {
2150 pr_err("Unsupported NAND oobsize: 0x%x\n", mtd->oobsize);
2151 err = -ENODEV;
2152 goto out;
2153 }
2154
2155 /* Fill in remaining MTD driver data */
2156 mtd->type = MTD_NANDFLASH;
2157 mtd->flags = MTD_CAP_NANDFLASH;
2158 mtd->_erase = msm_nand_erase;
2159 mtd->_block_isbad = msm_nand_block_isbad;
2160 mtd->_block_markbad = msm_nand_block_markbad;
2161 mtd->_read = msm_nand_read;
2162 mtd->_write = msm_nand_write;
2163 mtd->_read_oob = msm_nand_read_oob;
2164 mtd->_write_oob = msm_nand_write_oob;
2165 mtd->owner = THIS_MODULE;
2166out:
2167 return err;
2168}
2169
2170#define BAM_APPS_PIPE_LOCK_GRP 0
2171/*
2172 * This function allocates, configures, connects an end point and
2173 * also registers event notification for an end point. It also allocates
2174 * DMA memory for descriptor FIFO of a pipe.
2175 */
2176static int msm_nand_init_endpoint(struct msm_nand_info *info,
2177 struct msm_nand_sps_endpt *end_point,
2178 uint32_t pipe_index)
2179{
2180 int rc = 0;
2181 struct sps_pipe *pipe_handle;
2182 struct sps_connect *sps_config = &end_point->config;
2183 struct sps_register_event *sps_event = &end_point->event;
2184
2185 pipe_handle = sps_alloc_endpoint();
2186 if (!pipe_handle) {
2187 pr_err("sps_alloc_endpoint() failed\n");
2188 rc = -ENOMEM;
2189 goto out;
2190 }
2191
2192 rc = sps_get_config(pipe_handle, sps_config);
2193 if (rc) {
2194 pr_err("sps_get_config() failed %d\n", rc);
2195 goto free_endpoint;
2196 }
2197
2198 if (pipe_index == SPS_DATA_PROD_PIPE_INDEX) {
2199 /* READ CASE: source - BAM; destination - system memory */
2200 sps_config->source = info->sps.bam_handle;
2201 sps_config->destination = SPS_DEV_HANDLE_MEM;
2202 sps_config->mode = SPS_MODE_SRC;
2203 sps_config->src_pipe_index = pipe_index;
2204 } else if (pipe_index == SPS_DATA_CONS_PIPE_INDEX ||
2205 pipe_index == SPS_CMD_CONS_PIPE_INDEX) {
2206 /* WRITE CASE: source - system memory; destination - BAM */
2207 sps_config->source = SPS_DEV_HANDLE_MEM;
2208 sps_config->destination = info->sps.bam_handle;
2209 sps_config->mode = SPS_MODE_DEST;
2210 sps_config->dest_pipe_index = pipe_index;
2211 }
2212
2213 sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
2214 sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP;
2215 /*
2216 * Descriptor FIFO is a cyclic FIFO. If SPS_MAX_DESC_NUM descriptors
2217 * are allowed to be submitted before we get any ack for any of them,
2218 * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
2219 * sizeof(struct sps_iovec).
2220 */
2221 sps_config->desc.size = (SPS_MAX_DESC_NUM + 1) *
2222 sizeof(struct sps_iovec);
2223 sps_config->desc.base = dmam_alloc_coherent(info->nand_chip.dev,
2224 sps_config->desc.size,
2225 &sps_config->desc.phys_base,
2226 GFP_KERNEL);
2227 if (!sps_config->desc.base) {
2228 pr_err("dmam_alloc_coherent() failed for size %x\n",
2229 sps_config->desc.size);
2230 rc = -ENOMEM;
2231 goto free_endpoint;
2232 }
2233 memset(sps_config->desc.base, 0x00, sps_config->desc.size);
2234
2235 rc = sps_connect(pipe_handle, sps_config);
2236 if (rc) {
2237 pr_err("sps_connect() failed %d\n", rc);
2238 goto free_endpoint;
2239 }
2240
2241 init_completion(&end_point->completion);
2242 sps_event->mode = SPS_TRIGGER_WAIT;
2243 sps_event->options = SPS_O_DESC_DONE;
2244 sps_event->xfer_done = &end_point->completion;
2245 sps_event->user = (void *)info;
2246
2247 rc = sps_register_event(pipe_handle, sps_event);
2248 if (rc) {
2249 pr_err("sps_register_event() failed %d\n", rc);
2250 goto sps_disconnect;
2251 }
2252 end_point->handle = pipe_handle;
2253 pr_debug("pipe handle 0x%x for pipe %d\n", (uint32_t)pipe_handle,
2254 pipe_index);
2255 goto out;
2256sps_disconnect:
2257 sps_disconnect(pipe_handle);
2258free_endpoint:
2259 sps_free_endpoint(pipe_handle);
2260out:
2261 return rc;
2262}
2263
2264/* This function disconnects and frees an end point */
2265static void msm_nand_deinit_endpoint(struct msm_nand_info *info,
2266 struct msm_nand_sps_endpt *end_point)
2267{
2268 sps_disconnect(end_point->handle);
2269 sps_free_endpoint(end_point->handle);
2270}
2271
2272/*
2273 * This function registers BAM device and initializes its end points for
2274 * the following pipes -
2275 * system consumer pipe for data (pipe#0),
2276 * system producer pipe for data (pipe#1),
2277 * system consumer pipe for commands (pipe#2).
2278 */
2279static int msm_nand_bam_init(struct msm_nand_info *nand_info)
2280{
2281 struct sps_bam_props bam = {0};
2282 int rc = 0;
2283
2284 bam.phys_addr = nand_info->bam_phys;
2285 bam.virt_addr = nand_info->bam_base;
2286 bam.irq = nand_info->bam_irq;
2287 /*
2288 * NAND device is accessible from both Apps and Modem processor and
2289 * thus, NANDc and BAM are shared between both the processors. But BAM
2290 * must be enabled and instantiated only once during boot up by
2291 * Trustzone before Modem/Apps is brought out from reset.
2292 *
2293 * This is indicated to SPS driver on Apps by marking flag
2294 * SPS_BAM_MGR_DEVICE_REMOTE. The following are the global
2295 * initializations that will be done by Trustzone - Execution
2296 * Environment, Pipes assignment to Apps/Modem, Pipe Super groups and
2297 * Descriptor summing threshold.
2298 *
2299 * NANDc BAM device supports 2 execution environments - Modem and Apps
2300 * and thus the flag SPS_BAM_MGR_MULTI_EE is set.
2301 */
2302 bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
2303
Pratibhasagar V0c9fe8b2013-03-04 11:21:56 +05302304 rc = sps_phy2h(bam.phys_addr, &nand_info->sps.bam_handle);
2305 if (!rc)
2306 goto init_sps_ep;
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302307 rc = sps_register_bam_device(&bam, &nand_info->sps.bam_handle);
2308 if (rc) {
Pratibhasagar V0c9fe8b2013-03-04 11:21:56 +05302309 pr_err("%s: sps_register_bam_device() failed with %d\n",
2310 __func__, rc);
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302311 goto out;
2312 }
Pratibhasagar V0c9fe8b2013-03-04 11:21:56 +05302313 pr_info("%s: BAM device registered: bam_handle 0x%x\n",
2314 __func__, nand_info->sps.bam_handle);
2315init_sps_ep:
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302316 rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_prod,
2317 SPS_DATA_PROD_PIPE_INDEX);
2318 if (rc)
Pratibhasagar V0c9fe8b2013-03-04 11:21:56 +05302319 goto out;
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302320 rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_cons,
2321 SPS_DATA_CONS_PIPE_INDEX);
2322 if (rc)
2323 goto deinit_data_prod;
2324
2325 rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.cmd_pipe,
2326 SPS_CMD_CONS_PIPE_INDEX);
2327 if (rc)
2328 goto deinit_data_cons;
2329 goto out;
2330deinit_data_cons:
2331 msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
2332deinit_data_prod:
2333 msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302334out:
2335 return rc;
2336}
2337
2338/*
Pratibhasagar V0c9fe8b2013-03-04 11:21:56 +05302339 * This function disconnects and frees its end points for all the pipes.
2340 * Since the BAM is shared resource, it is not deregistered as its handle
2341 * might be in use with LCDC.
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302342 */
2343static void msm_nand_bam_free(struct msm_nand_info *nand_info)
2344{
2345 msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
2346 msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
2347 msm_nand_deinit_endpoint(nand_info, &nand_info->sps.cmd_pipe);
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302348}
2349
2350/* This function enables DMA support for the NANDc in BAM mode. */
2351static int msm_nand_enable_dma(struct msm_nand_info *info)
2352{
2353 struct msm_nand_sps_cmd *sps_cmd;
2354 struct msm_nand_chip *chip = &info->nand_chip;
2355 int ret;
2356
2357 wait_event(chip->dma_wait_queue,
2358 (sps_cmd = msm_nand_get_dma_buffer(chip, sizeof(*sps_cmd))));
2359
2360 msm_nand_prep_ce(sps_cmd, MSM_NAND_CTRL(info), WRITE,
2361 (1 << BAM_MODE_EN), SPS_IOVEC_FLAG_INT);
2362
2363 ret = sps_transfer_one(info->sps.cmd_pipe.handle,
2364 msm_virt_to_dma(chip, &sps_cmd->ce),
2365 sizeof(struct sps_command_element), NULL,
2366 sps_cmd->flags);
2367 if (ret) {
2368 pr_err("Failed to submit command: %d\n", ret);
2369 goto out;
2370 }
2371 wait_for_completion_io(&info->sps.cmd_pipe.completion);
2372out:
2373 msm_nand_release_dma_buffer(chip, sps_cmd, sizeof(*sps_cmd));
2374 return ret;
2375
2376}
2377
Pratibhasagar Ve3b57de2013-03-20 18:17:05 +05302378#ifdef CONFIG_MSM_SMD
2379static int msm_nand_parse_smem_ptable(int *nr_parts)
2380{
2381
2382 uint32_t i, j;
2383 uint32_t len = FLASH_PTABLE_HDR_LEN;
2384 struct flash_partition_entry *pentry;
2385 char *delimiter = ":";
2386
2387 pr_info("Parsing partition table info from SMEM\n");
2388 /* Read only the header portion of ptable */
2389 ptable = *(struct flash_partition_table *)
2390 (smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len));
2391 /* Verify ptable magic */
2392 if (ptable.magic1 != FLASH_PART_MAGIC1 ||
2393 ptable.magic2 != FLASH_PART_MAGIC2) {
2394 pr_err("Partition table magic verification failed\n");
2395 goto out;
2396 }
2397 /* Ensure that # of partitions is less than the max we have allocated */
2398 if (ptable.numparts > FLASH_PTABLE_MAX_PARTS_V4) {
2399 pr_err("Partition numbers exceed the max limit\n");
2400 goto out;
2401 }
2402 /* Find out length of partition data based on table version. */
2403 if (ptable.version <= FLASH_PTABLE_V3) {
2404 len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V3 *
2405 sizeof(struct flash_partition_entry);
2406 } else if (ptable.version == FLASH_PTABLE_V4) {
2407 len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V4 *
2408 sizeof(struct flash_partition_entry);
2409 } else {
2410 pr_err("Unknown ptable version (%d)", ptable.version);
2411 goto out;
2412 }
2413
2414 *nr_parts = ptable.numparts;
2415 ptable = *(struct flash_partition_table *)
2416 (smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len));
2417 for (i = 0; i < ptable.numparts; i++) {
2418 pentry = &ptable.part_entry[i];
2419 if (pentry->name == '\0')
2420 continue;
2421 /* Convert name to lower case and discard the initial chars */
2422 mtd_part[i].name = pentry->name;
2423 for (j = 0; j < strlen(mtd_part[i].name); j++)
2424 *(mtd_part[i].name + j) =
2425 tolower(*(mtd_part[i].name + j));
2426 strsep(&(mtd_part[i].name), delimiter);
2427 mtd_part[i].offset = pentry->offset;
2428 mtd_part[i].mask_flags = pentry->attr;
2429 mtd_part[i].size = pentry->length;
2430 pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
2431 i, pentry->name, pentry->offset, pentry->length,
2432 pentry->attr);
2433 }
2434 pr_info("SMEM partition table found: ver: %d len: %d\n",
2435 ptable.version, ptable.numparts);
2436 return 0;
2437out:
2438 return -EINVAL;
2439}
2440#else
2441static int msm_nand_parse_smem_ptable(int *nr_parts)
2442{
2443 return -ENODEV;
2444}
2445#endif
2446
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302447/*
2448 * This function gets called when its device named msm-nand is added to
2449 * device tree .dts file with all its resources such as physical addresses
2450 * for NANDc and BAM, BAM IRQ.
2451 *
2452 * It also expects the NAND flash partition information to be passed in .dts
2453 * file so that it can parse the partitions by calling MTD function
2454 * mtd_device_parse_register().
2455 *
2456 */
2457static int __devinit msm_nand_probe(struct platform_device *pdev)
2458{
2459 struct msm_nand_info *info;
2460 struct resource *res;
Pratibhasagar Ve3b57de2013-03-20 18:17:05 +05302461 int i, err, nr_parts;
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302462
2463 /*
2464 * The partition information can also be passed from kernel command
2465 * line. Also, the MTD core layer supports adding the whole device as
2466 * one MTD device when no partition information is available at all.
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302467 */
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302468 info = devm_kzalloc(&pdev->dev, sizeof(struct msm_nand_info),
2469 GFP_KERNEL);
2470 if (!info) {
2471 pr_err("Unable to allocate memory for msm_nand_info\n");
2472 err = -ENOMEM;
2473 goto out;
2474 }
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302475 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2476 "nand_phys");
2477 if (!res || !res->start) {
2478 pr_err("NAND phys address range is not provided\n");
2479 err = -ENODEV;
2480 goto out;
2481 }
2482 info->nand_phys = res->start;
2483 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2484 "bam_phys");
2485 if (!res || !res->start) {
2486 pr_err("BAM phys address range is not provided\n");
2487 err = -ENODEV;
2488 goto out;
2489 }
2490 info->bam_phys = res->start;
2491 info->bam_base = devm_ioremap(&pdev->dev, res->start,
2492 resource_size(res));
2493 if (!info->bam_base) {
2494 pr_err("BAM ioremap() failed for addr 0x%x size 0x%x\n",
2495 res->start, resource_size(res));
2496 err = -ENOMEM;
2497 goto out;
2498 }
2499
2500 info->bam_irq = platform_get_irq_byname(pdev, "bam_irq");
2501 if (info->bam_irq < 0) {
2502 pr_err("BAM IRQ is not provided\n");
2503 err = -ENODEV;
2504 goto out;
2505 }
2506
2507 info->mtd.name = dev_name(&pdev->dev);
2508 info->mtd.priv = info;
2509 info->mtd.owner = THIS_MODULE;
2510 info->nand_chip.dev = &pdev->dev;
2511 init_waitqueue_head(&info->nand_chip.dma_wait_queue);
2512 mutex_init(&info->bam_lock);
2513
2514 info->nand_chip.dma_virt_addr =
2515 dmam_alloc_coherent(&pdev->dev, MSM_NAND_DMA_BUFFER_SIZE,
2516 &info->nand_chip.dma_phys_addr, GFP_KERNEL);
2517 if (!info->nand_chip.dma_virt_addr) {
2518 pr_err("No memory for DMA buffer size %x\n",
2519 MSM_NAND_DMA_BUFFER_SIZE);
2520 err = -ENOMEM;
2521 goto out;
2522 }
2523 err = msm_nand_bam_init(info);
2524 if (err) {
2525 pr_err("msm_nand_bam_init() failed %d\n", err);
2526 goto out;
2527 }
2528 err = msm_nand_enable_dma(info);
2529 if (err) {
2530 pr_err("Failed to enable DMA in NANDc\n");
2531 goto free_bam;
2532 }
Pratibhasagar Ve3b57de2013-03-20 18:17:05 +05302533 err = msm_nand_parse_smem_ptable(&nr_parts);
2534 if (err < 0) {
2535 pr_err("Failed to parse partition table in SMEM\n");
2536 goto free_bam;
2537 }
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302538 if (msm_nand_scan(&info->mtd)) {
2539 pr_err("No nand device found\n");
2540 err = -ENXIO;
2541 goto free_bam;
2542 }
Pratibhasagar Ve3b57de2013-03-20 18:17:05 +05302543 for (i = 0; i < nr_parts; i++) {
2544 mtd_part[i].offset *= info->mtd.erasesize;
2545 mtd_part[i].size *= info->mtd.erasesize;
2546 }
2547 err = mtd_device_parse_register(&info->mtd, NULL, NULL,
2548 &mtd_part[0], nr_parts);
Murali Palnatiab4c1442012-11-15 09:35:14 +05302549 if (err < 0) {
2550 pr_err("Unable to register MTD partitions %d\n", err);
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302551 goto free_bam;
2552 }
2553 dev_set_drvdata(&pdev->dev, info);
2554
2555 pr_info("NANDc phys addr 0x%lx, BAM phys addr 0x%lx, BAM IRQ %d\n",
2556 info->nand_phys, info->bam_phys, info->bam_irq);
2557 pr_info("Allocated DMA buffer at virt_addr 0x%p, phys_addr 0x%x\n",
2558 info->nand_chip.dma_virt_addr, info->nand_chip.dma_phys_addr);
Sahitya Tummalaf57ae882012-04-02 13:53:33 +05302559 goto out;
2560free_bam:
2561 msm_nand_bam_free(info);
2562out:
2563 return err;
2564}
2565
2566/*
2567 * Remove functionality that gets called when driver/device msm-nand
2568 * is removed.
2569 */
2570static int __devexit msm_nand_remove(struct platform_device *pdev)
2571{
2572 struct msm_nand_info *info = dev_get_drvdata(&pdev->dev);
2573
2574 dev_set_drvdata(&pdev->dev, NULL);
2575 if (info) {
2576 mtd_device_unregister(&info->mtd);
2577 msm_nand_bam_free(info);
2578 }
2579 return 0;
2580}
2581
2582#define DRIVER_NAME "msm_qpic_nand"
2583static const struct of_device_id msm_nand_match_table[] = {
2584 { .compatible = "qcom,msm-nand", },
2585 {},
2586};
2587static struct platform_driver msm_nand_driver = {
2588 .probe = msm_nand_probe,
2589 .remove = __devexit_p(msm_nand_remove),
2590 .driver = {
2591 .name = DRIVER_NAME,
2592 .of_match_table = msm_nand_match_table,
2593 },
2594};
2595
2596module_platform_driver(msm_nand_driver);
2597
2598MODULE_ALIAS(DRIVER_NAME);
2599MODULE_LICENSE("GPL v2");
2600MODULE_DESCRIPTION("MSM QPIC NAND flash driver");