blob: 3bd4c37363215260e8a9e0ffcc6e9aefd99516b3 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/*
2 * Copyright (C) 2007 Google, Inc.
Duy Truong790f06d2013-02-13 16:38:12 -08003 * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/slab.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/mtd/mtd.h>
20#include <linux/mtd/nand.h>
21#include <linux/mtd/partitions.h>
22#include <linux/platform_device.h>
23#include <linux/sched.h>
24#include <linux/dma-mapping.h>
25#include <linux/io.h>
26#include <linux/crc16.h>
27#include <linux/bitrev.h>
28
29#include <asm/dma.h>
30#include <asm/mach/flash.h>
31
32#include <mach/dma.h>
33
34#include "msm_nand.h"
35
36unsigned long msm_nand_phys;
37unsigned long msm_nandc01_phys;
38unsigned long msm_nandc10_phys;
39unsigned long msm_nandc11_phys;
40unsigned long ebi2_register_base;
41uint32_t dual_nand_ctlr_present;
42uint32_t interleave_enable;
43uint32_t enable_bch_ecc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044
45#define MSM_NAND_DMA_BUFFER_SIZE SZ_8K
46#define MSM_NAND_DMA_BUFFER_SLOTS \
47 (MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8))
48
49#define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800
50#define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000
51#define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d
52#define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d
53
54#define ONFI_IDENTIFIER_LENGTH 0x0004
55#define ONFI_PARAM_INFO_LENGTH 0x0200
56#define ONFI_PARAM_PAGE_LENGTH 0x0100
57
58#define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F
59
60#define FLASH_READ_ONFI_IDENTIFIER_COMMAND 0x90
61#define FLASH_READ_ONFI_IDENTIFIER_ADDRESS 0x20
62#define FLASH_READ_ONFI_PARAMETERS_COMMAND 0xEC
63#define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00
64
65#define VERBOSE 0
66
67struct msm_nand_chip {
68 struct device *dev;
69 wait_queue_head_t wait_queue;
70 atomic_t dma_buffer_busy;
71 unsigned dma_channel;
72 uint8_t *dma_buffer;
73 dma_addr_t dma_addr;
74 unsigned CFG0, CFG1, CFG0_RAW, CFG1_RAW;
75 uint32_t ecc_buf_cfg;
76 uint32_t ecc_bch_cfg;
77 uint32_t ecc_parity_bytes;
78 unsigned cw_size;
Sujit Reddy Thummaec9b3252012-04-23 15:53:45 +053079 unsigned int uncorrectable_bit_mask;
80 unsigned int num_err_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081};
82
83#define CFG1_WIDE_FLASH (1U << 1)
84
85/* TODO: move datamover code out */
86
87#define SRC_CRCI_NAND_CMD CMD_SRC_CRCI(DMOV_NAND_CRCI_CMD)
88#define DST_CRCI_NAND_CMD CMD_DST_CRCI(DMOV_NAND_CRCI_CMD)
89#define SRC_CRCI_NAND_DATA CMD_SRC_CRCI(DMOV_NAND_CRCI_DATA)
90#define DST_CRCI_NAND_DATA CMD_DST_CRCI(DMOV_NAND_CRCI_DATA)
91
92#define msm_virt_to_dma(chip, vaddr) \
93 ((chip)->dma_addr + \
94 ((uint8_t *)(vaddr) - (chip)->dma_buffer))
95
96/**
97 * msm_nand_oob_64 - oob info for 2KB page
98 */
99static struct nand_ecclayout msm_nand_oob_64 = {
100 .eccbytes = 40,
101 .eccpos = {
102 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
103 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
104 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
105 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
106 },
107 .oobavail = 16,
108 .oobfree = {
109 {30, 16},
110 }
111};
112
113/**
114 * msm_nand_oob_128 - oob info for 4KB page
115 */
116static struct nand_ecclayout msm_nand_oob_128 = {
117 .eccbytes = 80,
118 .eccpos = {
119 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
120 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
121 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
122 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
123 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
124 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
125 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
126 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
127 },
128 .oobavail = 32,
129 .oobfree = {
130 {70, 32},
131 }
132};
133
134/**
135 * msm_nand_oob_224 - oob info for 4KB page 8Bit interface
136 */
137static struct nand_ecclayout msm_nand_oob_224_x8 = {
138 .eccbytes = 104,
139 .eccpos = {
140 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
141 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
142 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
143 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
144 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
145 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
146 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
147 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
148 },
149 .oobavail = 32,
150 .oobfree = {
151 {91, 32},
152 }
153};
154
155/**
156 * msm_nand_oob_224 - oob info for 4KB page 16Bit interface
157 */
158static struct nand_ecclayout msm_nand_oob_224_x16 = {
159 .eccbytes = 112,
160 .eccpos = {
161 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
162 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
163 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
164 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
165 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
166 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
167 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
168 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
169 },
170 .oobavail = 32,
171 .oobfree = {
172 {98, 32},
173 }
174};
175
176/**
177 * msm_nand_oob_256 - oob info for 8KB page
178 */
179static struct nand_ecclayout msm_nand_oob_256 = {
180 .eccbytes = 160,
181 .eccpos = {
182 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
183 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
184 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
185 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
186 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
187 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
188 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
189 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
190 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
191 90, 91, 92, 93, 94, 96, 97, 98 , 99, 100,
192 101, 102, 103, 104, 105, 106, 107, 108, 109, 110,
193 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
194 121, 122, 123, 124, 125, 126, 127, 128, 129, 130,
195 131, 132, 133, 134, 135, 136, 137, 138, 139, 140,
196 141, 142, 143, 144, 145, 146, 147, 148, 149, 150,
197 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
198 },
199 .oobavail = 64,
200 .oobfree = {
201 {151, 64},
202 }
203};
204
205/**
206 * msm_onenand_oob_64 - oob info for large (2KB) page
207 */
208static struct nand_ecclayout msm_onenand_oob_64 = {
209 .eccbytes = 20,
210 .eccpos = {
211 8, 9, 10, 11, 12,
212 24, 25, 26, 27, 28,
213 40, 41, 42, 43, 44,
214 56, 57, 58, 59, 60,
215 },
216 .oobavail = 20,
217 .oobfree = {
218 {2, 3}, {14, 2}, {18, 3}, {30, 2},
219 {34, 3}, {46, 2}, {50, 3}, {62, 2}
220 }
221};
222
223static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size)
224{
225 unsigned int bitmask, free_bitmask, old_bitmask;
226 unsigned int need_mask, current_need_mask;
227 int free_index;
228
229 need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOTS)) - 1;
230 bitmask = atomic_read(&chip->dma_buffer_busy);
231 free_bitmask = ~bitmask;
232 do {
233 free_index = __ffs(free_bitmask);
234 current_need_mask = need_mask << free_index;
235
236 if (size + free_index * MSM_NAND_DMA_BUFFER_SLOTS >=
237 MSM_NAND_DMA_BUFFER_SIZE)
238 return NULL;
239
240 if ((bitmask & current_need_mask) == 0) {
241 old_bitmask =
242 atomic_cmpxchg(&chip->dma_buffer_busy,
243 bitmask,
244 bitmask | current_need_mask);
245 if (old_bitmask == bitmask)
246 return chip->dma_buffer +
247 free_index * MSM_NAND_DMA_BUFFER_SLOTS;
248 free_bitmask = 0; /* force return */
249 }
250 /* current free range was too small, clear all free bits */
251 /* below the top busy bit within current_need_mask */
252 free_bitmask &=
253 ~(~0U >> (32 - fls(bitmask & current_need_mask)));
254 } while (free_bitmask);
255
256 return NULL;
257}
258
259static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip,
260 void *buffer, size_t size)
261{
262 int index;
263 unsigned int used_mask;
264
265 used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOTS)) - 1;
266 index = ((uint8_t *)buffer - chip->dma_buffer) /
267 MSM_NAND_DMA_BUFFER_SLOTS;
268 atomic_sub(used_mask << index, &chip->dma_buffer_busy);
269
270 wake_up(&chip->wait_queue);
271}
272
273
274unsigned flash_rd_reg(struct msm_nand_chip *chip, unsigned addr)
275{
276 struct {
277 dmov_s cmd;
278 unsigned cmdptr;
279 unsigned data;
280 } *dma_buffer;
281 unsigned rv;
282
283 wait_event(chip->wait_queue,
284 (dma_buffer = msm_nand_get_dma_buffer(
285 chip, sizeof(*dma_buffer))));
286
287 dma_buffer->cmd.cmd = CMD_LC | CMD_OCB | CMD_OCU;
288 dma_buffer->cmd.src = addr;
289 dma_buffer->cmd.dst = msm_virt_to_dma(chip, &dma_buffer->data);
290 dma_buffer->cmd.len = 4;
291
292 dma_buffer->cmdptr =
293 (msm_virt_to_dma(chip, &dma_buffer->cmd) >> 3) | CMD_PTR_LP;
294 dma_buffer->data = 0xeeeeeeee;
295
296 mb();
297 msm_dmov_exec_cmd(
Jeff Ohlsteindc39f972011-09-02 13:55:16 -0700298 chip->dma_channel, DMOV_CMD_PTR_LIST |
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299 DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
300 mb();
301
302 rv = dma_buffer->data;
303
304 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
305
306 return rv;
307}
308
309void flash_wr_reg(struct msm_nand_chip *chip, unsigned addr, unsigned val)
310{
311 struct {
312 dmov_s cmd;
313 unsigned cmdptr;
314 unsigned data;
315 } *dma_buffer;
316
317 wait_event(chip->wait_queue,
318 (dma_buffer = msm_nand_get_dma_buffer(
319 chip, sizeof(*dma_buffer))));
320
321 dma_buffer->cmd.cmd = CMD_LC | CMD_OCB | CMD_OCU;
322 dma_buffer->cmd.src = msm_virt_to_dma(chip, &dma_buffer->data);
323 dma_buffer->cmd.dst = addr;
324 dma_buffer->cmd.len = 4;
325
326 dma_buffer->cmdptr =
327 (msm_virt_to_dma(chip, &dma_buffer->cmd) >> 3) | CMD_PTR_LP;
328 dma_buffer->data = val;
329
330 mb();
331 msm_dmov_exec_cmd(
Jeff Ohlsteindc39f972011-09-02 13:55:16 -0700332 chip->dma_channel, DMOV_CMD_PTR_LIST |
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700333 DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
334 mb();
335
336 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
337}
338
339static dma_addr_t
340msm_nand_dma_map(struct device *dev, void *addr, size_t size,
341 enum dma_data_direction dir)
342{
343 struct page *page;
344 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
345 if (virt_addr_valid(addr))
346 page = virt_to_page(addr);
347 else {
348 if (WARN_ON(size + offset > PAGE_SIZE))
349 return ~0;
350 page = vmalloc_to_page(addr);
351 }
352 return dma_map_page(dev, page, offset, size, dir);
353}
354
355uint32_t flash_read_id(struct msm_nand_chip *chip)
356{
357 struct {
358 dmov_s cmd[7];
359 unsigned cmdptr;
360 unsigned data[7];
361 } *dma_buffer;
362 uint32_t rv;
363
364 wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
365 (chip, sizeof(*dma_buffer))));
366
367 dma_buffer->data[0] = 0 | 4;
368 dma_buffer->data[1] = MSM_NAND_CMD_FETCH_ID;
369 dma_buffer->data[2] = 1;
370 dma_buffer->data[3] = 0xeeeeeeee;
371 dma_buffer->data[4] = 0xeeeeeeee;
372 dma_buffer->data[5] = flash_rd_reg(chip, MSM_NAND_SFLASHC_BURST_CFG);
373 dma_buffer->data[6] = 0x00000000;
374 BUILD_BUG_ON(6 != ARRAY_SIZE(dma_buffer->data) - 1);
375
376 dma_buffer->cmd[0].cmd = 0 | CMD_OCB;
377 dma_buffer->cmd[0].src = msm_virt_to_dma(chip, &dma_buffer->data[6]);
378 dma_buffer->cmd[0].dst = MSM_NAND_SFLASHC_BURST_CFG;
379 dma_buffer->cmd[0].len = 4;
380
381 dma_buffer->cmd[1].cmd = 0;
382 dma_buffer->cmd[1].src = msm_virt_to_dma(chip, &dma_buffer->data[0]);
383 dma_buffer->cmd[1].dst = MSM_NAND_FLASH_CHIP_SELECT;
384 dma_buffer->cmd[1].len = 4;
385
386 dma_buffer->cmd[2].cmd = DST_CRCI_NAND_CMD;
387 dma_buffer->cmd[2].src = msm_virt_to_dma(chip, &dma_buffer->data[1]);
388 dma_buffer->cmd[2].dst = MSM_NAND_FLASH_CMD;
389 dma_buffer->cmd[2].len = 4;
390
391 dma_buffer->cmd[3].cmd = 0;
392 dma_buffer->cmd[3].src = msm_virt_to_dma(chip, &dma_buffer->data[2]);
393 dma_buffer->cmd[3].dst = MSM_NAND_EXEC_CMD;
394 dma_buffer->cmd[3].len = 4;
395
396 dma_buffer->cmd[4].cmd = SRC_CRCI_NAND_DATA;
397 dma_buffer->cmd[4].src = MSM_NAND_FLASH_STATUS;
398 dma_buffer->cmd[4].dst = msm_virt_to_dma(chip, &dma_buffer->data[3]);
399 dma_buffer->cmd[4].len = 4;
400
401 dma_buffer->cmd[5].cmd = 0;
402 dma_buffer->cmd[5].src = MSM_NAND_READ_ID;
403 dma_buffer->cmd[5].dst = msm_virt_to_dma(chip, &dma_buffer->data[4]);
404 dma_buffer->cmd[5].len = 4;
405
406 dma_buffer->cmd[6].cmd = CMD_OCU | CMD_LC;
407 dma_buffer->cmd[6].src = msm_virt_to_dma(chip, &dma_buffer->data[5]);
408 dma_buffer->cmd[6].dst = MSM_NAND_SFLASHC_BURST_CFG;
409 dma_buffer->cmd[6].len = 4;
410
411 BUILD_BUG_ON(6 != ARRAY_SIZE(dma_buffer->cmd) - 1);
412
413 dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3
414 ) | CMD_PTR_LP;
415
416 mb();
Jeff Ohlsteindc39f972011-09-02 13:55:16 -0700417 msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST |
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418 DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
419 mb();
420
421 pr_info("status: %x\n", dma_buffer->data[3]);
422 pr_info("nandid: %x maker %02x device %02x\n",
423 dma_buffer->data[4], dma_buffer->data[4] & 0xff,
424 (dma_buffer->data[4] >> 8) & 0xff);
425 rv = dma_buffer->data[4];
426 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
427 return rv;
428}
429
430struct flash_identification {
431 uint32_t flash_id;
432 uint32_t density;
433 uint32_t widebus;
434 uint32_t pagesize;
435 uint32_t blksize;
436 uint32_t oobsize;
437 uint32_t ecc_correctability;
438} supported_flash;
439
440uint16_t flash_onfi_crc_check(uint8_t *buffer, uint16_t count)
441{
442 int i;
443 uint16_t result;
444
445 for (i = 0; i < count; i++)
446 buffer[i] = bitrev8(buffer[i]);
447
448 result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count));
449
450 for (i = 0; i < count; i++)
451 buffer[i] = bitrev8(buffer[i]);
452
453 return result;
454}
455
456
457uint32_t flash_onfi_probe(struct msm_nand_chip *chip)
458{
459 struct onfi_param_page {
460 uint32_t parameter_page_signature;
461 uint16_t revision_number;
462 uint16_t features_supported;
463 uint16_t optional_commands_supported;
464 uint8_t reserved0[22];
465 uint8_t device_manufacturer[12];
466 uint8_t device_model[20];
467 uint8_t jedec_manufacturer_id;
468 uint16_t date_code;
469 uint8_t reserved1[13];
470 uint32_t number_of_data_bytes_per_page;
471 uint16_t number_of_spare_bytes_per_page;
472 uint32_t number_of_data_bytes_per_partial_page;
473 uint16_t number_of_spare_bytes_per_partial_page;
474 uint32_t number_of_pages_per_block;
475 uint32_t number_of_blocks_per_logical_unit;
476 uint8_t number_of_logical_units;
477 uint8_t number_of_address_cycles;
478 uint8_t number_of_bits_per_cell;
479 uint16_t maximum_bad_blocks_per_logical_unit;
480 uint16_t block_endurance;
481 uint8_t guaranteed_valid_begin_blocks;
482 uint16_t guaranteed_valid_begin_blocks_endurance;
483 uint8_t number_of_programs_per_page;
484 uint8_t partial_program_attributes;
485 uint8_t number_of_bits_ecc_correctability;
486 uint8_t number_of_interleaved_address_bits;
487 uint8_t interleaved_operation_attributes;
488 uint8_t reserved2[13];
489 uint8_t io_pin_capacitance;
490 uint16_t timing_mode_support;
491 uint16_t program_cache_timing_mode_support;
492 uint16_t maximum_page_programming_time;
493 uint16_t maximum_block_erase_time;
494 uint16_t maximum_page_read_time;
495 uint16_t maximum_change_column_setup_time;
496 uint8_t reserved3[23];
497 uint16_t vendor_specific_revision_number;
498 uint8_t vendor_specific[88];
499 uint16_t integrity_crc;
500
501 } __attribute__((__packed__));
502
503 struct onfi_param_page *onfi_param_page_ptr;
504 uint8_t *onfi_identifier_buf = NULL;
505 uint8_t *onfi_param_info_buf = NULL;
506
507 struct {
508 dmov_s cmd[11];
509 unsigned cmdptr;
510 struct {
511 uint32_t cmd;
512 uint32_t addr0;
513 uint32_t addr1;
514 uint32_t cfg0;
515 uint32_t cfg1;
516 uint32_t exec;
517 uint32_t flash_status;
518 uint32_t devcmd1_orig;
519 uint32_t devcmdvld_orig;
520 uint32_t devcmd1_mod;
521 uint32_t devcmdvld_mod;
522 uint32_t sflash_bcfg_orig;
523 uint32_t sflash_bcfg_mod;
524 } data;
525 } *dma_buffer;
526 dmov_s *cmd;
527
528 unsigned page_address = 0;
529 int err = 0;
530 dma_addr_t dma_addr_param_info = 0;
531 dma_addr_t dma_addr_identifier = 0;
532 unsigned cmd_set_count = 2;
533 unsigned crc_chk_count = 0;
534
535 if (msm_nand_data.nr_parts) {
536 page_address = ((msm_nand_data.parts[0]).offset << 6);
537 } else {
538 pr_err("flash_onfi_probe: "
539 "No partition info available\n");
540 err = -EIO;
541 return err;
542 }
543
544 wait_event(chip->wait_queue, (onfi_identifier_buf =
545 msm_nand_get_dma_buffer(chip, ONFI_IDENTIFIER_LENGTH)));
546 dma_addr_identifier = msm_virt_to_dma(chip, onfi_identifier_buf);
547
548 wait_event(chip->wait_queue, (onfi_param_info_buf =
549 msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH)));
550 dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf);
551
552 wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
553 (chip, sizeof(*dma_buffer))));
554
555 dma_buffer->data.sflash_bcfg_orig = flash_rd_reg
556 (chip, MSM_NAND_SFLASHC_BURST_CFG);
557 dma_buffer->data.devcmd1_orig = flash_rd_reg(chip, MSM_NAND_DEV_CMD1);
558 dma_buffer->data.devcmdvld_orig = flash_rd_reg(chip,
559 MSM_NAND_DEV_CMD_VLD);
560
561 while (cmd_set_count-- > 0) {
562 cmd = dma_buffer->cmd;
563
564 dma_buffer->data.devcmd1_mod = (dma_buffer->data.devcmd1_orig &
565 0xFFFFFF00) | (cmd_set_count
566 ? FLASH_READ_ONFI_IDENTIFIER_COMMAND
567 : FLASH_READ_ONFI_PARAMETERS_COMMAND);
568 dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
569 dma_buffer->data.addr0 = (page_address << 16) | (cmd_set_count
570 ? FLASH_READ_ONFI_IDENTIFIER_ADDRESS
571 : FLASH_READ_ONFI_PARAMETERS_ADDRESS);
572 dma_buffer->data.addr1 = (page_address >> 16) & 0xFF;
573 dma_buffer->data.cfg0 = (cmd_set_count
574 ? MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER
575 : MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO);
576 dma_buffer->data.cfg1 = (cmd_set_count
577 ? MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER
578 : MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO);
579 dma_buffer->data.sflash_bcfg_mod = 0x00000000;
580 dma_buffer->data.devcmdvld_mod = (dma_buffer->
581 data.devcmdvld_orig & 0xFFFFFFFE);
582 dma_buffer->data.exec = 1;
583 dma_buffer->data.flash_status = 0xeeeeeeee;
584
585 /* Put the Nand ctlr in Async mode and disable SFlash ctlr */
586 cmd->cmd = 0;
587 cmd->src = msm_virt_to_dma(chip,
588 &dma_buffer->data.sflash_bcfg_mod);
589 cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
590 cmd->len = 4;
591 cmd++;
592
593 /* Block on cmd ready, & write CMD,ADDR0,ADDR1,CHIPSEL regs */
594 cmd->cmd = DST_CRCI_NAND_CMD;
595 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
596 cmd->dst = MSM_NAND_FLASH_CMD;
597 cmd->len = 12;
598 cmd++;
599
600 /* Configure the CFG0 and CFG1 registers */
601 cmd->cmd = 0;
602 cmd->src = msm_virt_to_dma(chip,
603 &dma_buffer->data.cfg0);
604 cmd->dst = MSM_NAND_DEV0_CFG0;
605 cmd->len = 8;
606 cmd++;
607
608 /* Configure the DEV_CMD_VLD register */
609 cmd->cmd = 0;
610 cmd->src = msm_virt_to_dma(chip,
611 &dma_buffer->data.devcmdvld_mod);
612 cmd->dst = MSM_NAND_DEV_CMD_VLD;
613 cmd->len = 4;
614 cmd++;
615
616 /* Configure the DEV_CMD1 register */
617 cmd->cmd = 0;
618 cmd->src = msm_virt_to_dma(chip,
619 &dma_buffer->data.devcmd1_mod);
620 cmd->dst = MSM_NAND_DEV_CMD1;
621 cmd->len = 4;
622 cmd++;
623
624 /* Kick the execute command */
625 cmd->cmd = 0;
626 cmd->src = msm_virt_to_dma(chip,
627 &dma_buffer->data.exec);
628 cmd->dst = MSM_NAND_EXEC_CMD;
629 cmd->len = 4;
630 cmd++;
631
632 /* Block on data ready, and read the two status registers */
633 cmd->cmd = SRC_CRCI_NAND_DATA;
634 cmd->src = MSM_NAND_FLASH_STATUS;
635 cmd->dst = msm_virt_to_dma(chip,
636 &dma_buffer->data.flash_status);
637 cmd->len = 4;
638 cmd++;
639
640 /* Read data block - valid only if status says success */
641 cmd->cmd = 0;
642 cmd->src = MSM_NAND_FLASH_BUFFER;
643 cmd->dst = (cmd_set_count ? dma_addr_identifier :
644 dma_addr_param_info);
645 cmd->len = (cmd_set_count ? ONFI_IDENTIFIER_LENGTH :
646 ONFI_PARAM_INFO_LENGTH);
647 cmd++;
648
649 /* Restore the DEV_CMD1 register */
650 cmd->cmd = 0 ;
651 cmd->src = msm_virt_to_dma(chip,
652 &dma_buffer->data.devcmd1_orig);
653 cmd->dst = MSM_NAND_DEV_CMD1;
654 cmd->len = 4;
655 cmd++;
656
657 /* Restore the DEV_CMD_VLD register */
658 cmd->cmd = 0;
659 cmd->src = msm_virt_to_dma(chip,
660 &dma_buffer->data.devcmdvld_orig);
661 cmd->dst = MSM_NAND_DEV_CMD_VLD;
662 cmd->len = 4;
663 cmd++;
664
665 /* Restore the SFLASH_BURST_CONFIG register */
666 cmd->cmd = 0;
667 cmd->src = msm_virt_to_dma(chip,
668 &dma_buffer->data.sflash_bcfg_orig);
669 cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
670 cmd->len = 4;
671 cmd++;
672
673 BUILD_BUG_ON(11 != ARRAY_SIZE(dma_buffer->cmd));
674 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
675 dma_buffer->cmd[0].cmd |= CMD_OCB;
676 cmd[-1].cmd |= CMD_OCU | CMD_LC;
677
678 dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
679 >> 3) | CMD_PTR_LP;
680
681 mb();
Jeff Ohlsteindc39f972011-09-02 13:55:16 -0700682 msm_dmov_exec_cmd(chip->dma_channel,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700683 DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
684 &dma_buffer->cmdptr)));
685 mb();
686
687 /* Check for errors, protection violations etc */
688 if (dma_buffer->data.flash_status & 0x110) {
689 pr_info("MPU/OP error (0x%x) during "
690 "ONFI probe\n",
691 dma_buffer->data.flash_status);
692 err = -EIO;
693 break;
694 }
695
696 if (cmd_set_count) {
697 onfi_param_page_ptr = (struct onfi_param_page *)
698 (&(onfi_identifier_buf[0]));
699 if (onfi_param_page_ptr->parameter_page_signature !=
700 ONFI_PARAMETER_PAGE_SIGNATURE) {
701 pr_info("ONFI probe : Found a non"
702 "ONFI Compliant device \n");
703 err = -EIO;
704 break;
705 }
706 } else {
707 for (crc_chk_count = 0; crc_chk_count <
708 ONFI_PARAM_INFO_LENGTH
709 / ONFI_PARAM_PAGE_LENGTH;
710 crc_chk_count++) {
711 onfi_param_page_ptr =
712 (struct onfi_param_page *)
713 (&(onfi_param_info_buf
714 [ONFI_PARAM_PAGE_LENGTH *
715 crc_chk_count]));
716 if (flash_onfi_crc_check(
717 (uint8_t *)onfi_param_page_ptr,
718 ONFI_PARAM_PAGE_LENGTH - 2) ==
719 onfi_param_page_ptr->integrity_crc) {
720 break;
721 }
722 }
723 if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH
724 / ONFI_PARAM_PAGE_LENGTH) {
725 pr_info("ONFI probe : CRC Check "
726 "failed on ONFI Parameter "
727 "data \n");
728 err = -EIO;
729 break;
730 } else {
731 supported_flash.flash_id =
732 flash_read_id(chip);
733 supported_flash.widebus =
734 onfi_param_page_ptr->
735 features_supported & 0x01;
736 supported_flash.pagesize =
737 onfi_param_page_ptr->
738 number_of_data_bytes_per_page;
739 supported_flash.blksize =
740 onfi_param_page_ptr->
741 number_of_pages_per_block *
742 supported_flash.pagesize;
743 supported_flash.oobsize =
744 onfi_param_page_ptr->
745 number_of_spare_bytes_per_page;
746 supported_flash.density =
747 onfi_param_page_ptr->
748 number_of_blocks_per_logical_unit
749 * supported_flash.blksize;
750 supported_flash.ecc_correctability =
751 onfi_param_page_ptr->
752 number_of_bits_ecc_correctability;
753
754 pr_info("ONFI probe : Found an ONFI "
755 "compliant device %s\n",
756 onfi_param_page_ptr->device_model);
757
758 /* Temporary hack for MT29F4G08ABC device.
759 * Since the device is not properly adhering
760 * to ONFi specification it is reporting
761 * as 16 bit device though it is 8 bit device!!!
762 */
763 if (!strncmp(onfi_param_page_ptr->device_model,
764 "MT29F4G08ABC", 12))
765 supported_flash.widebus = 0;
766 }
767 }
768 }
769
770 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
771 msm_nand_release_dma_buffer(chip, onfi_param_info_buf,
772 ONFI_PARAM_INFO_LENGTH);
773 msm_nand_release_dma_buffer(chip, onfi_identifier_buf,
774 ONFI_IDENTIFIER_LENGTH);
775
776 return err;
777}
778
779static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from,
780 struct mtd_oob_ops *ops)
781{
782 struct msm_nand_chip *chip = mtd->priv;
783
784 struct {
785 dmov_s cmd[8 * 5 + 2];
786 unsigned cmdptr;
787 struct {
788 uint32_t cmd;
789 uint32_t addr0;
790 uint32_t addr1;
791 uint32_t chipsel;
792 uint32_t cfg0;
793 uint32_t cfg1;
794 uint32_t eccbchcfg;
795 uint32_t exec;
796 uint32_t ecccfg;
797 struct {
798 uint32_t flash_status;
799 uint32_t buffer_status;
800 } result[8];
801 } data;
802 } *dma_buffer;
803 dmov_s *cmd;
804 unsigned n;
805 unsigned page = 0;
806 uint32_t oob_len;
807 uint32_t sectordatasize;
808 uint32_t sectoroobsize;
809 int err, pageerr, rawerr;
810 dma_addr_t data_dma_addr = 0;
811 dma_addr_t oob_dma_addr = 0;
812 dma_addr_t data_dma_addr_curr = 0;
813 dma_addr_t oob_dma_addr_curr = 0;
814 uint32_t oob_col = 0;
815 unsigned page_count;
816 unsigned pages_read = 0;
817 unsigned start_sector = 0;
818 uint32_t ecc_errors;
819 uint32_t total_ecc_errors = 0;
820 unsigned cwperpage;
821#if VERBOSE
822 pr_info("================================================="
823 "================\n");
824 pr_info("%s:\nfrom 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
825 "\noobbuf 0x%p ooblen 0x%x\n",
826 __func__, from, ops->mode, ops->datbuf, ops->len,
827 ops->oobbuf, ops->ooblen);
828#endif
829
830 if (mtd->writesize == 2048)
831 page = from >> 11;
832
833 if (mtd->writesize == 4096)
834 page = from >> 12;
835
836 oob_len = ops->ooblen;
837 cwperpage = (mtd->writesize >> 9);
838
839 if (from & (mtd->writesize - 1)) {
840 pr_err("%s: unsupported from, 0x%llx\n",
841 __func__, from);
842 return -EINVAL;
843 }
Steve Mucklef132c6c2012-06-06 18:30:57 -0700844 if (ops->mode != MTD_OPS_RAW) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845 if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
846 /* when ops->datbuf is NULL, ops->len can be ooblen */
847 pr_err("%s: unsupported ops->len, %d\n",
848 __func__, ops->len);
849 return -EINVAL;
850 }
851 } else {
852 if (ops->datbuf != NULL &&
853 (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
854 pr_err("%s: unsupported ops->len,"
Steve Mucklef132c6c2012-06-06 18:30:57 -0700855 " %d for MTD_OPS_RAW\n", __func__, ops->len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700856 return -EINVAL;
857 }
858 }
859
Steve Mucklef132c6c2012-06-06 18:30:57 -0700860 if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700861 pr_err("%s: unsupported ops->ooboffs, %d\n",
862 __func__, ops->ooboffs);
863 return -EINVAL;
864 }
865
Steve Mucklef132c6c2012-06-06 18:30:57 -0700866 if (ops->oobbuf && !ops->datbuf && ops->mode == MTD_OPS_AUTO_OOB)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700867 start_sector = cwperpage - 1;
868
869 if (ops->oobbuf && !ops->datbuf) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700870 page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700871 mtd->oobavail : mtd->oobsize);
872 if ((page_count == 0) && (ops->ooblen))
873 page_count = 1;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700874 } else if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700875 page_count = ops->len / mtd->writesize;
876 else
877 page_count = ops->len / (mtd->writesize + mtd->oobsize);
878
879 if (ops->datbuf) {
880 data_dma_addr_curr = data_dma_addr =
881 msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
882 DMA_FROM_DEVICE);
883 if (dma_mapping_error(chip->dev, data_dma_addr)) {
884 pr_err("msm_nand_read_oob: failed to get dma addr "
885 "for %p\n", ops->datbuf);
886 return -EIO;
887 }
888 }
889 if (ops->oobbuf) {
890 memset(ops->oobbuf, 0xff, ops->ooblen);
891 oob_dma_addr_curr = oob_dma_addr =
892 msm_nand_dma_map(chip->dev, ops->oobbuf,
893 ops->ooblen, DMA_BIDIRECTIONAL);
894 if (dma_mapping_error(chip->dev, oob_dma_addr)) {
895 pr_err("msm_nand_read_oob: failed to get dma addr "
896 "for %p\n", ops->oobbuf);
897 err = -EIO;
898 goto err_dma_map_oobbuf_failed;
899 }
900 }
901
902 wait_event(chip->wait_queue,
903 (dma_buffer = msm_nand_get_dma_buffer(
904 chip, sizeof(*dma_buffer))));
905
906 oob_col = start_sector * chip->cw_size;
907 if (chip->CFG1 & CFG1_WIDE_FLASH)
908 oob_col >>= 1;
909
910 err = 0;
911 while (page_count-- > 0) {
912 cmd = dma_buffer->cmd;
913
914 /* CMD / ADDR0 / ADDR1 / CHIPSEL program values */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700915 if (ops->mode != MTD_OPS_RAW) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700916 dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ_ECC;
917 dma_buffer->data.cfg0 =
918 (chip->CFG0 & ~(7U << 6))
919 | (((cwperpage-1) - start_sector) << 6);
920 dma_buffer->data.cfg1 = chip->CFG1;
921 if (enable_bch_ecc)
922 dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
923 } else {
924 dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
925 dma_buffer->data.cfg0 = (chip->CFG0_RAW
926 & ~(7U << 6)) | ((cwperpage-1) << 6);
927 dma_buffer->data.cfg1 = chip->CFG1_RAW |
928 (chip->CFG1 & CFG1_WIDE_FLASH);
929 }
930
931 dma_buffer->data.addr0 = (page << 16) | oob_col;
932 dma_buffer->data.addr1 = (page >> 16) & 0xff;
933 /* chipsel_0 + enable DM interface */
934 dma_buffer->data.chipsel = 0 | 4;
935
936
937 /* GO bit for the EXEC register */
938 dma_buffer->data.exec = 1;
939
940
941 BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->data.result));
942
943 for (n = start_sector; n < cwperpage; n++) {
944 /* flash + buffer status return words */
945 dma_buffer->data.result[n].flash_status = 0xeeeeeeee;
946 dma_buffer->data.result[n].buffer_status = 0xeeeeeeee;
947
948 /* block on cmd ready, then
949 * write CMD / ADDR0 / ADDR1 / CHIPSEL
950 * regs in a burst
951 */
952 cmd->cmd = DST_CRCI_NAND_CMD;
953 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
954 cmd->dst = MSM_NAND_FLASH_CMD;
955 if (n == start_sector)
956 cmd->len = 16;
957 else
958 cmd->len = 4;
959 cmd++;
960
961 if (n == start_sector) {
962 cmd->cmd = 0;
963 cmd->src = msm_virt_to_dma(chip,
964 &dma_buffer->data.cfg0);
965 cmd->dst = MSM_NAND_DEV0_CFG0;
966 if (enable_bch_ecc)
967 cmd->len = 12;
968 else
969 cmd->len = 8;
970 cmd++;
971
972 dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
973 cmd->cmd = 0;
974 cmd->src = msm_virt_to_dma(chip,
975 &dma_buffer->data.ecccfg);
976 cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG;
977 cmd->len = 4;
978 cmd++;
979 }
980
981 /* kick the execute register */
982 cmd->cmd = 0;
983 cmd->src =
984 msm_virt_to_dma(chip, &dma_buffer->data.exec);
985 cmd->dst = MSM_NAND_EXEC_CMD;
986 cmd->len = 4;
987 cmd++;
988
989 /* block on data ready, then
990 * read the status register
991 */
992 cmd->cmd = SRC_CRCI_NAND_DATA;
993 cmd->src = MSM_NAND_FLASH_STATUS;
994 cmd->dst = msm_virt_to_dma(chip,
995 &dma_buffer->data.result[n]);
996 /* MSM_NAND_FLASH_STATUS + MSM_NAND_BUFFER_STATUS */
997 cmd->len = 8;
998 cmd++;
999
1000 /* read data block
1001 * (only valid if status says success)
1002 */
1003 if (ops->datbuf) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001004 if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001005 sectordatasize = (n < (cwperpage - 1))
1006 ? 516 : (512 - ((cwperpage - 1) << 2));
1007 else
1008 sectordatasize = chip->cw_size;
1009
1010 cmd->cmd = 0;
1011 cmd->src = MSM_NAND_FLASH_BUFFER;
1012 cmd->dst = data_dma_addr_curr;
1013 data_dma_addr_curr += sectordatasize;
1014 cmd->len = sectordatasize;
1015 cmd++;
1016 }
1017
1018 if (ops->oobbuf && (n == (cwperpage - 1)
Steve Mucklef132c6c2012-06-06 18:30:57 -07001019 || ops->mode != MTD_OPS_AUTO_OOB)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001020 cmd->cmd = 0;
1021 if (n == (cwperpage - 1)) {
1022 cmd->src = MSM_NAND_FLASH_BUFFER +
1023 (512 - ((cwperpage - 1) << 2));
1024 sectoroobsize = (cwperpage << 2);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001025 if (ops->mode != MTD_OPS_AUTO_OOB)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001026 sectoroobsize +=
1027 chip->ecc_parity_bytes;
1028 } else {
1029 cmd->src = MSM_NAND_FLASH_BUFFER + 516;
1030 sectoroobsize = chip->ecc_parity_bytes;
1031 }
1032
1033 cmd->dst = oob_dma_addr_curr;
1034 if (sectoroobsize < oob_len)
1035 cmd->len = sectoroobsize;
1036 else
1037 cmd->len = oob_len;
1038 oob_dma_addr_curr += cmd->len;
1039 oob_len -= cmd->len;
1040 if (cmd->len > 0)
1041 cmd++;
1042 }
1043 }
1044
1045 BUILD_BUG_ON(8 * 5 + 2 != ARRAY_SIZE(dma_buffer->cmd));
1046 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
1047 dma_buffer->cmd[0].cmd |= CMD_OCB;
1048 cmd[-1].cmd |= CMD_OCU | CMD_LC;
1049
1050 dma_buffer->cmdptr =
1051 (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
1052 | CMD_PTR_LP;
1053
1054 mb();
Jeff Ohlsteindc39f972011-09-02 13:55:16 -07001055 msm_dmov_exec_cmd(chip->dma_channel,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001056 DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
1057 &dma_buffer->cmdptr)));
1058 mb();
1059
1060 /* if any of the writes failed (0x10), or there
1061 * was a protection violation (0x100), we lose
1062 */
1063 pageerr = rawerr = 0;
1064 for (n = start_sector; n < cwperpage; n++) {
1065 if (dma_buffer->data.result[n].flash_status & 0x110) {
1066 rawerr = -EIO;
1067 break;
1068 }
1069 }
1070 if (rawerr) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001071 if (ops->datbuf && ops->mode != MTD_OPS_RAW) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072 uint8_t *datbuf = ops->datbuf +
1073 pages_read * mtd->writesize;
1074
1075 dma_sync_single_for_cpu(chip->dev,
1076 data_dma_addr_curr-mtd->writesize,
1077 mtd->writesize, DMA_BIDIRECTIONAL);
1078
1079 for (n = 0; n < mtd->writesize; n++) {
1080 /* empty blocks read 0x54 at
1081 * these offsets
1082 */
1083 if ((n % 516 == 3 || n % 516 == 175)
1084 && datbuf[n] == 0x54)
1085 datbuf[n] = 0xff;
1086 if (datbuf[n] != 0xff) {
1087 pageerr = rawerr;
1088 break;
1089 }
1090 }
1091
1092 dma_sync_single_for_device(chip->dev,
1093 data_dma_addr_curr-mtd->writesize,
1094 mtd->writesize, DMA_BIDIRECTIONAL);
1095
1096 }
1097 if (ops->oobbuf) {
1098 dma_sync_single_for_cpu(chip->dev,
1099 oob_dma_addr_curr - (ops->ooblen - oob_len),
1100 ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
1101
1102 for (n = 0; n < ops->ooblen; n++) {
1103 if (ops->oobbuf[n] != 0xff) {
1104 pageerr = rawerr;
1105 break;
1106 }
1107 }
1108
1109 dma_sync_single_for_device(chip->dev,
1110 oob_dma_addr_curr - (ops->ooblen - oob_len),
1111 ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
1112 }
1113 }
1114 if (pageerr) {
1115 for (n = start_sector; n < cwperpage; n++) {
Sujit Reddy Thummaec9b3252012-04-23 15:53:45 +05301116 if (dma_buffer->data.result[n].buffer_status &
1117 chip->uncorrectable_bit_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118 /* not thread safe */
1119 mtd->ecc_stats.failed++;
1120 pageerr = -EBADMSG;
1121 break;
1122 }
1123 }
1124 }
1125 if (!rawerr) { /* check for corretable errors */
1126 for (n = start_sector; n < cwperpage; n++) {
Sujit Reddy Thummaec9b3252012-04-23 15:53:45 +05301127 ecc_errors =
1128 (dma_buffer->data.result[n].buffer_status
1129 & chip->num_err_mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001130 if (ecc_errors) {
1131 total_ecc_errors += ecc_errors;
1132 /* not thread safe */
1133 mtd->ecc_stats.corrected += ecc_errors;
1134 if (ecc_errors > 1)
1135 pageerr = -EUCLEAN;
1136 }
1137 }
1138 }
1139 if (pageerr && (pageerr != -EUCLEAN || err == 0))
1140 err = pageerr;
1141
1142#if VERBOSE
1143 if (rawerr && !pageerr) {
1144 pr_err("msm_nand_read_oob %llx %x %x empty page\n",
1145 (loff_t)page * mtd->writesize, ops->len,
1146 ops->ooblen);
1147 } else {
1148 for (n = start_sector; n < cwperpage; n++)
1149 pr_info("flash_status[%d] = %x,\
1150 buffr_status[%d] = %x\n",
1151 n, dma_buffer->data.result[n].flash_status,
1152 n, dma_buffer->data.result[n].buffer_status);
1153 }
1154#endif
1155 if (err && err != -EUCLEAN && err != -EBADMSG)
1156 break;
1157 pages_read++;
1158 page++;
1159 }
1160 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
1161
1162 if (ops->oobbuf) {
1163 dma_unmap_page(chip->dev, oob_dma_addr,
1164 ops->ooblen, DMA_FROM_DEVICE);
1165 }
1166err_dma_map_oobbuf_failed:
1167 if (ops->datbuf) {
1168 dma_unmap_page(chip->dev, data_dma_addr,
1169 ops->len, DMA_BIDIRECTIONAL);
1170 }
1171
Steve Mucklef132c6c2012-06-06 18:30:57 -07001172 if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001173 ops->retlen = mtd->writesize * pages_read;
1174 else
1175 ops->retlen = (mtd->writesize + mtd->oobsize) *
1176 pages_read;
1177 ops->oobretlen = ops->ooblen - oob_len;
1178 if (err)
1179 pr_err("msm_nand_read_oob %llx %x %x failed %d, corrected %d\n",
1180 from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
1181 total_ecc_errors);
1182#if VERBOSE
1183 pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
1184 __func__, err, ops->retlen, ops->oobretlen);
1185
1186 pr_info("==================================================="
1187 "==============\n");
1188#endif
1189 return err;
1190}
1191
1192static int msm_nand_read_oob_dualnandc(struct mtd_info *mtd, loff_t from,
1193 struct mtd_oob_ops *ops)
1194{
1195 struct msm_nand_chip *chip = mtd->priv;
1196
1197 struct {
1198 dmov_s cmd[16 * 6 + 20];
1199 unsigned cmdptr;
1200 struct {
1201 uint32_t cmd;
1202 uint32_t nandc01_addr0;
1203 uint32_t nandc10_addr0;
1204 uint32_t nandc11_addr1;
1205 uint32_t chipsel_cs0;
1206 uint32_t chipsel_cs1;
1207 uint32_t cfg0;
1208 uint32_t cfg1;
1209 uint32_t eccbchcfg;
1210 uint32_t exec;
1211 uint32_t ecccfg;
1212 uint32_t ebi2_chip_select_cfg0;
1213 uint32_t adm_mux_data_ack_req_nc01;
1214 uint32_t adm_mux_cmd_ack_req_nc01;
1215 uint32_t adm_mux_data_ack_req_nc10;
1216 uint32_t adm_mux_cmd_ack_req_nc10;
1217 uint32_t adm_default_mux;
1218 uint32_t default_ebi2_chip_select_cfg0;
1219 uint32_t nc10_flash_dev_cmd_vld;
1220 uint32_t nc10_flash_dev_cmd1;
1221 uint32_t nc10_flash_dev_cmd_vld_default;
1222 uint32_t nc10_flash_dev_cmd1_default;
1223 struct {
1224 uint32_t flash_status;
1225 uint32_t buffer_status;
1226 } result[16];
1227 } data;
1228 } *dma_buffer;
1229 dmov_s *cmd;
1230 unsigned n;
1231 unsigned page = 0;
1232 uint32_t oob_len;
1233 uint32_t sectordatasize;
1234 uint32_t sectoroobsize;
1235 int err, pageerr, rawerr;
1236 dma_addr_t data_dma_addr = 0;
1237 dma_addr_t oob_dma_addr = 0;
1238 dma_addr_t data_dma_addr_curr = 0;
1239 dma_addr_t oob_dma_addr_curr = 0;
1240 uint32_t oob_col = 0;
1241 unsigned page_count;
1242 unsigned pages_read = 0;
1243 unsigned start_sector = 0;
1244 uint32_t ecc_errors;
1245 uint32_t total_ecc_errors = 0;
1246 unsigned cwperpage;
1247 unsigned cw_offset = chip->cw_size;
1248#if VERBOSE
1249 pr_info("================================================="
1250 "============\n");
1251 pr_info("%s:\nfrom 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
1252 "\noobbuf 0x%p ooblen 0x%x\n\n",
1253 __func__, from, ops->mode, ops->datbuf,
1254 ops->len, ops->oobbuf, ops->ooblen);
1255#endif
1256
1257 if (mtd->writesize == 2048)
1258 page = from >> 11;
1259
1260 if (mtd->writesize == 4096)
1261 page = from >> 12;
1262
1263 if (interleave_enable)
1264 page = (from >> 1) >> 12;
1265
1266 oob_len = ops->ooblen;
1267 cwperpage = (mtd->writesize >> 9);
1268
1269 if (from & (mtd->writesize - 1)) {
1270 pr_err("%s: unsupported from, 0x%llx\n",
1271 __func__, from);
1272 return -EINVAL;
1273 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07001274 if (ops->mode != MTD_OPS_RAW) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275 if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
1276 pr_err("%s: unsupported ops->len, %d\n",
1277 __func__, ops->len);
1278 return -EINVAL;
1279 }
1280 } else {
1281 if (ops->datbuf != NULL &&
1282 (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
1283 pr_err("%s: unsupported ops->len,"
Steve Mucklef132c6c2012-06-06 18:30:57 -07001284 " %d for MTD_OPS_RAW\n", __func__, ops->len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001285 return -EINVAL;
1286 }
1287 }
1288
Steve Mucklef132c6c2012-06-06 18:30:57 -07001289 if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001290 pr_err("%s: unsupported ops->ooboffs, %d\n",
1291 __func__, ops->ooboffs);
1292 return -EINVAL;
1293 }
1294
Steve Mucklef132c6c2012-06-06 18:30:57 -07001295 if (ops->oobbuf && !ops->datbuf && ops->mode == MTD_OPS_AUTO_OOB)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001296 start_sector = cwperpage - 1;
1297
1298 if (ops->oobbuf && !ops->datbuf) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001299 page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001300 mtd->oobavail : mtd->oobsize);
1301 if ((page_count == 0) && (ops->ooblen))
1302 page_count = 1;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001303 } else if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001304 page_count = ops->len / mtd->writesize;
1305 else
1306 page_count = ops->len / (mtd->writesize + mtd->oobsize);
1307
1308 if (ops->datbuf) {
1309 data_dma_addr_curr = data_dma_addr =
1310 msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
1311 DMA_FROM_DEVICE);
1312 if (dma_mapping_error(chip->dev, data_dma_addr)) {
1313 pr_err("msm_nand_read_oob_dualnandc: "
1314 "failed to get dma addr for %p\n",
1315 ops->datbuf);
1316 return -EIO;
1317 }
1318 }
1319 if (ops->oobbuf) {
1320 memset(ops->oobbuf, 0xff, ops->ooblen);
1321 oob_dma_addr_curr = oob_dma_addr =
1322 msm_nand_dma_map(chip->dev, ops->oobbuf,
1323 ops->ooblen, DMA_BIDIRECTIONAL);
1324 if (dma_mapping_error(chip->dev, oob_dma_addr)) {
1325 pr_err("msm_nand_read_oob_dualnandc: "
1326 "failed to get dma addr for %p\n",
1327 ops->oobbuf);
1328 err = -EIO;
1329 goto err_dma_map_oobbuf_failed;
1330 }
1331 }
1332
1333 wait_event(chip->wait_queue,
1334 (dma_buffer = msm_nand_get_dma_buffer(
1335 chip, sizeof(*dma_buffer))));
1336
1337 oob_col = start_sector * chip->cw_size;
1338 if (chip->CFG1 & CFG1_WIDE_FLASH) {
1339 oob_col >>= 1;
1340 cw_offset >>= 1;
1341 }
1342
1343 err = 0;
1344 while (page_count-- > 0) {
1345 cmd = dma_buffer->cmd;
1346
Steve Mucklef132c6c2012-06-06 18:30:57 -07001347 if (ops->mode != MTD_OPS_RAW) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001348 dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ_ECC;
1349 if (start_sector == (cwperpage - 1)) {
1350 dma_buffer->data.cfg0 = (chip->CFG0 &
1351 ~(7U << 6));
1352 } else {
1353 dma_buffer->data.cfg0 = (chip->CFG0 &
1354 ~(7U << 6))
1355 | (((cwperpage >> 1)-1) << 6);
1356 }
1357 dma_buffer->data.cfg1 = chip->CFG1;
1358 if (enable_bch_ecc)
1359 dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
1360 } else {
1361 dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
1362 dma_buffer->data.cfg0 = ((chip->CFG0_RAW &
1363 ~(7U << 6)) | ((((cwperpage >> 1)-1) << 6)));
1364 dma_buffer->data.cfg1 = chip->CFG1_RAW |
1365 (chip->CFG1 & CFG1_WIDE_FLASH);
1366 }
1367
1368 if (!interleave_enable) {
1369 if (start_sector == (cwperpage - 1)) {
1370 dma_buffer->data.nandc10_addr0 =
1371 (page << 16) | oob_col;
1372 dma_buffer->data.nc10_flash_dev_cmd_vld = 0xD;
1373 dma_buffer->data.nc10_flash_dev_cmd1 =
1374 0xF00F3000;
1375 } else {
1376 dma_buffer->data.nandc01_addr0 = page << 16;
1377 /* NC10 ADDR0 points to the next code word */
1378 dma_buffer->data.nandc10_addr0 = (page << 16) |
1379 cw_offset;
1380 dma_buffer->data.nc10_flash_dev_cmd_vld = 0x1D;
1381 dma_buffer->data.nc10_flash_dev_cmd1 =
1382 0xF00FE005;
1383 }
1384 } else {
1385 dma_buffer->data.nandc01_addr0 =
1386 dma_buffer->data.nandc10_addr0 =
1387 (page << 16) | oob_col;
1388 }
1389 /* ADDR1 */
1390 dma_buffer->data.nandc11_addr1 = (page >> 16) & 0xff;
1391
1392 dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
1393 dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
1394 dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
1395 dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
1396 dma_buffer->data.adm_default_mux = 0x00000FC0;
1397 dma_buffer->data.nc10_flash_dev_cmd_vld_default = 0x1D;
1398 dma_buffer->data.nc10_flash_dev_cmd1_default = 0xF00F3000;
1399
1400 dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
1401 dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
1402
1403 /* chipsel_0 + enable DM interface */
1404 dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
1405 /* chipsel_1 + enable DM interface */
1406 dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
1407
1408 /* GO bit for the EXEC register */
1409 dma_buffer->data.exec = 1;
1410
1411 BUILD_BUG_ON(16 != ARRAY_SIZE(dma_buffer->data.result));
1412
1413 for (n = start_sector; n < cwperpage; n++) {
1414 /* flash + buffer status return words */
1415 dma_buffer->data.result[n].flash_status = 0xeeeeeeee;
1416 dma_buffer->data.result[n].buffer_status = 0xeeeeeeee;
1417
1418 if (n == start_sector) {
1419 if (!interleave_enable) {
1420 cmd->cmd = 0;
1421 cmd->src = msm_virt_to_dma(chip,
1422 &dma_buffer->
1423 data.nc10_flash_dev_cmd_vld);
1424 cmd->dst = NC10(MSM_NAND_DEV_CMD_VLD);
1425 cmd->len = 4;
1426 cmd++;
1427
1428 cmd->cmd = 0;
1429 cmd->src = msm_virt_to_dma(chip,
1430 &dma_buffer->data.nc10_flash_dev_cmd1);
1431 cmd->dst = NC10(MSM_NAND_DEV_CMD1);
1432 cmd->len = 4;
1433 cmd++;
1434
1435 /* NC01, NC10 --> ADDR1 */
1436 cmd->cmd = 0;
1437 cmd->src = msm_virt_to_dma(chip,
1438 &dma_buffer->data.nandc11_addr1);
1439 cmd->dst = NC11(MSM_NAND_ADDR1);
1440 cmd->len = 8;
1441 cmd++;
1442
1443 cmd->cmd = 0;
1444 cmd->src = msm_virt_to_dma(chip,
1445 &dma_buffer->data.cfg0);
1446 cmd->dst = NC11(MSM_NAND_DEV0_CFG0);
1447 if (enable_bch_ecc)
1448 cmd->len = 12;
1449 else
1450 cmd->len = 8;
1451 cmd++;
1452 } else {
1453 /* enable CS0 & CS1 */
1454 cmd->cmd = 0;
1455 cmd->src = msm_virt_to_dma(chip,
1456 &dma_buffer->
1457 data.ebi2_chip_select_cfg0);
1458 cmd->dst = EBI2_CHIP_SELECT_CFG0;
1459 cmd->len = 4;
1460 cmd++;
1461
1462 /* NC01, NC10 --> ADDR1 */
1463 cmd->cmd = 0;
1464 cmd->src = msm_virt_to_dma(chip,
1465 &dma_buffer->data.nandc11_addr1);
1466 cmd->dst = NC11(MSM_NAND_ADDR1);
1467 cmd->len = 4;
1468 cmd++;
1469
1470 /* Enable CS0 for NC01 */
1471 cmd->cmd = 0;
1472 cmd->src = msm_virt_to_dma(chip,
1473 &dma_buffer->data.chipsel_cs0);
1474 cmd->dst =
1475 NC01(MSM_NAND_FLASH_CHIP_SELECT);
1476 cmd->len = 4;
1477 cmd++;
1478
1479 /* Enable CS1 for NC10 */
1480 cmd->cmd = 0;
1481 cmd->src = msm_virt_to_dma(chip,
1482 &dma_buffer->data.chipsel_cs1);
1483 cmd->dst =
1484 NC10(MSM_NAND_FLASH_CHIP_SELECT);
1485 cmd->len = 4;
1486 cmd++;
1487
1488 /* config DEV0_CFG0 & CFG1 for CS0 */
1489 cmd->cmd = 0;
1490 cmd->src = msm_virt_to_dma(chip,
1491 &dma_buffer->data.cfg0);
1492 cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
1493 cmd->len = 8;
1494 cmd++;
1495
1496 /* config DEV1_CFG0 & CFG1 for CS1 */
1497 cmd->cmd = 0;
1498 cmd->src = msm_virt_to_dma(chip,
1499 &dma_buffer->data.cfg0);
1500 cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
1501 cmd->len = 8;
1502 cmd++;
1503 }
1504
1505 dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
1506 cmd->cmd = 0;
1507 cmd->src = msm_virt_to_dma(chip,
1508 &dma_buffer->data.ecccfg);
1509 cmd->dst = NC11(MSM_NAND_EBI2_ECC_BUF_CFG);
1510 cmd->len = 4;
1511 cmd++;
1512
1513 /* if 'only' the last code word */
1514 if (n == cwperpage - 1) {
1515 /* MASK CMD ACK/REQ --> NC01 (0x53C)*/
1516 cmd->cmd = 0;
1517 cmd->src = msm_virt_to_dma(chip,
1518 &dma_buffer->
1519 data.adm_mux_cmd_ack_req_nc01);
1520 cmd->dst = EBI2_NAND_ADM_MUX;
1521 cmd->len = 4;
1522 cmd++;
1523
1524 /* CMD */
1525 cmd->cmd = DST_CRCI_NAND_CMD;
1526 cmd->src = msm_virt_to_dma(chip,
1527 &dma_buffer->data.cmd);
1528 cmd->dst = NC10(MSM_NAND_FLASH_CMD);
1529 cmd->len = 4;
1530 cmd++;
1531
1532 /* NC10 --> ADDR0 ( 0x0 ) */
1533 cmd->cmd = 0;
1534 cmd->src = msm_virt_to_dma(chip,
1535 &dma_buffer->data.nandc10_addr0);
1536 cmd->dst = NC10(MSM_NAND_ADDR0);
1537 cmd->len = 4;
1538 cmd++;
1539
1540 /* kick the execute reg for NC10 */
1541 cmd->cmd = 0;
1542 cmd->src = msm_virt_to_dma(chip,
1543 &dma_buffer->data.exec);
1544 cmd->dst = NC10(MSM_NAND_EXEC_CMD);
1545 cmd->len = 4;
1546 cmd++;
1547
1548 /* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
1549 cmd->cmd = 0;
1550 cmd->src = msm_virt_to_dma(chip,
1551 &dma_buffer->
1552 data.adm_mux_data_ack_req_nc01);
1553 cmd->dst = EBI2_NAND_ADM_MUX;
1554 cmd->len = 4;
1555 cmd++;
1556
1557 /* block on data ready from NC10, then
1558 * read the status register
1559 */
1560 cmd->cmd = SRC_CRCI_NAND_DATA;
1561 cmd->src = NC10(MSM_NAND_FLASH_STATUS);
1562 cmd->dst = msm_virt_to_dma(chip,
1563 &dma_buffer->data.result[n]);
1564 /* MSM_NAND_FLASH_STATUS +
1565 * MSM_NAND_BUFFER_STATUS
1566 */
1567 cmd->len = 8;
1568 cmd++;
1569 } else {
1570 /* NC01 --> ADDR0 */
1571 cmd->cmd = 0;
1572 cmd->src = msm_virt_to_dma(chip,
1573 &dma_buffer->data.nandc01_addr0);
1574 cmd->dst = NC01(MSM_NAND_ADDR0);
1575 cmd->len = 4;
1576 cmd++;
1577
1578 /* NC10 --> ADDR1 */
1579 cmd->cmd = 0;
1580 cmd->src = msm_virt_to_dma(chip,
1581 &dma_buffer->data.nandc10_addr0);
1582 cmd->dst = NC10(MSM_NAND_ADDR0);
1583 cmd->len = 4;
1584 cmd++;
1585
1586 /* MASK CMD ACK/REQ --> NC10 (0xF14)*/
1587 cmd->cmd = 0;
1588 cmd->src = msm_virt_to_dma(chip,
1589 &dma_buffer->
1590 data.adm_mux_cmd_ack_req_nc10);
1591 cmd->dst = EBI2_NAND_ADM_MUX;
1592 cmd->len = 4;
1593 cmd++;
1594
1595 /* CMD */
1596 cmd->cmd = DST_CRCI_NAND_CMD;
1597 cmd->src = msm_virt_to_dma(chip,
1598 &dma_buffer->data.cmd);
1599 cmd->dst = NC01(MSM_NAND_FLASH_CMD);
1600 cmd->len = 4;
1601 cmd++;
1602
1603 /* kick the execute register for NC01*/
1604 cmd->cmd = 0;
1605 cmd->src = msm_virt_to_dma(chip,
1606 &dma_buffer->data.exec);
1607 cmd->dst = NC01(MSM_NAND_EXEC_CMD);
1608 cmd->len = 4;
1609 cmd++;
1610 }
1611 }
1612
1613 /* read data block
1614 * (only valid if status says success)
1615 */
1616 if (ops->datbuf || (ops->oobbuf &&
Steve Mucklef132c6c2012-06-06 18:30:57 -07001617 ops->mode != MTD_OPS_AUTO_OOB)) {
1618 if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001619 sectordatasize = (n < (cwperpage - 1))
1620 ? 516 : (512 - ((cwperpage - 1) << 2));
1621 else
1622 sectordatasize = chip->cw_size;
1623
1624 if (n % 2 == 0) {
1625 /* MASK DATA ACK/REQ --> NC10 (0xF28)*/
1626 cmd->cmd = 0;
1627 cmd->src = msm_virt_to_dma(chip,
1628 &dma_buffer->
1629 data.adm_mux_data_ack_req_nc10);
1630 cmd->dst = EBI2_NAND_ADM_MUX;
1631 cmd->len = 4;
1632 cmd++;
1633
1634 /* block on data ready from NC01, then
1635 * read the status register
1636 */
1637 cmd->cmd = SRC_CRCI_NAND_DATA;
1638 cmd->src = NC01(MSM_NAND_FLASH_STATUS);
1639 cmd->dst = msm_virt_to_dma(chip,
1640 &dma_buffer->data.result[n]);
1641 /* MSM_NAND_FLASH_STATUS +
1642 * MSM_NAND_BUFFER_STATUS
1643 */
1644 cmd->len = 8;
1645 cmd++;
1646
1647 /* MASK CMD ACK/REQ --> NC01 (0x53C)*/
1648 cmd->cmd = 0;
1649 cmd->src = msm_virt_to_dma(chip,
1650 &dma_buffer->
1651 data.adm_mux_cmd_ack_req_nc01);
1652 cmd->dst = EBI2_NAND_ADM_MUX;
1653 cmd->len = 4;
1654 cmd++;
1655
1656 /* CMD */
1657 cmd->cmd = DST_CRCI_NAND_CMD;
1658 cmd->src = msm_virt_to_dma(chip,
1659 &dma_buffer->data.cmd);
1660 cmd->dst = NC10(MSM_NAND_FLASH_CMD);
1661 cmd->len = 4;
1662 cmd++;
1663
1664 /* kick the execute register for NC10 */
1665 cmd->cmd = 0;
1666 cmd->src = msm_virt_to_dma(chip,
1667 &dma_buffer->data.exec);
1668 cmd->dst = NC10(MSM_NAND_EXEC_CMD);
1669 cmd->len = 4;
1670 cmd++;
1671
1672 /* Read only when there is data
1673 * buffer
1674 */
1675 if (ops->datbuf) {
1676 cmd->cmd = 0;
1677 cmd->src =
1678 NC01(MSM_NAND_FLASH_BUFFER);
1679 cmd->dst = data_dma_addr_curr;
1680 data_dma_addr_curr +=
1681 sectordatasize;
1682 cmd->len = sectordatasize;
1683 cmd++;
1684 }
1685 } else {
1686 /* MASK DATA ACK/REQ -->
1687 * NC01 (0xA3C)
1688 */
1689 cmd->cmd = 0;
1690 cmd->src = msm_virt_to_dma(chip,
1691 &dma_buffer->
1692 data.adm_mux_data_ack_req_nc01);
1693 cmd->dst = EBI2_NAND_ADM_MUX;
1694 cmd->len = 4;
1695 cmd++;
1696
1697 /* block on data ready from NC10
1698 * then read the status register
1699 */
1700 cmd->cmd = SRC_CRCI_NAND_DATA;
1701 cmd->src =
1702 NC10(MSM_NAND_FLASH_STATUS);
1703 cmd->dst = msm_virt_to_dma(chip,
1704 &dma_buffer->data.result[n]);
1705 /* MSM_NAND_FLASH_STATUS +
1706 * MSM_NAND_BUFFER_STATUS
1707 */
1708 cmd->len = 8;
1709 cmd++;
1710 if (n != cwperpage - 1) {
1711 /* MASK CMD ACK/REQ -->
1712 * NC10 (0xF14)
1713 */
1714 cmd->cmd = 0;
1715 cmd->src =
1716 msm_virt_to_dma(chip,
1717 &dma_buffer->
1718 data.adm_mux_cmd_ack_req_nc10);
1719 cmd->dst = EBI2_NAND_ADM_MUX;
1720 cmd->len = 4;
1721 cmd++;
1722
1723 /* CMD */
1724 cmd->cmd = DST_CRCI_NAND_CMD;
1725 cmd->src = msm_virt_to_dma(chip,
1726 &dma_buffer->data.cmd);
1727 cmd->dst =
1728 NC01(MSM_NAND_FLASH_CMD);
1729 cmd->len = 4;
1730 cmd++;
1731
1732 /* EXEC */
1733 cmd->cmd = 0;
1734 cmd->src = msm_virt_to_dma(chip,
1735 &dma_buffer->data.exec);
1736 cmd->dst =
1737 NC01(MSM_NAND_EXEC_CMD);
1738 cmd->len = 4;
1739 cmd++;
1740 }
1741
1742 /* Read only when there is data
1743 * buffer
1744 */
1745 if (ops->datbuf) {
1746 cmd->cmd = 0;
1747 cmd->src =
1748 NC10(MSM_NAND_FLASH_BUFFER);
1749 cmd->dst = data_dma_addr_curr;
1750 data_dma_addr_curr +=
1751 sectordatasize;
1752 cmd->len = sectordatasize;
1753 cmd++;
1754 }
1755 }
1756 }
1757
1758 if (ops->oobbuf && (n == (cwperpage - 1)
Steve Mucklef132c6c2012-06-06 18:30:57 -07001759 || ops->mode != MTD_OPS_AUTO_OOB)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001760 cmd->cmd = 0;
1761 if (n == (cwperpage - 1)) {
1762 /* Use NC10 for reading the
1763 * last codeword!!!
1764 */
1765 cmd->src = NC10(MSM_NAND_FLASH_BUFFER) +
1766 (512 - ((cwperpage - 1) << 2));
1767 sectoroobsize = (cwperpage << 2);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001768 if (ops->mode != MTD_OPS_AUTO_OOB)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001769 sectoroobsize +=
1770 chip->ecc_parity_bytes;
1771 } else {
1772 if (n % 2 == 0)
1773 cmd->src =
1774 NC01(MSM_NAND_FLASH_BUFFER)
1775 + 516;
1776 else
1777 cmd->src =
1778 NC10(MSM_NAND_FLASH_BUFFER)
1779 + 516;
1780 sectoroobsize = chip->ecc_parity_bytes;
1781 }
1782 cmd->dst = oob_dma_addr_curr;
1783 if (sectoroobsize < oob_len)
1784 cmd->len = sectoroobsize;
1785 else
1786 cmd->len = oob_len;
1787 oob_dma_addr_curr += cmd->len;
1788 oob_len -= cmd->len;
1789 if (cmd->len > 0)
1790 cmd++;
1791 }
1792 }
1793 /* ADM --> Default mux state (0xFC0) */
1794 cmd->cmd = 0;
1795 cmd->src = msm_virt_to_dma(chip,
1796 &dma_buffer->data.adm_default_mux);
1797 cmd->dst = EBI2_NAND_ADM_MUX;
1798 cmd->len = 4;
1799 cmd++;
1800
1801 if (!interleave_enable) {
1802 cmd->cmd = 0;
1803 cmd->src = msm_virt_to_dma(chip,
1804 &dma_buffer->data.nc10_flash_dev_cmd_vld_default);
1805 cmd->dst = NC10(MSM_NAND_DEV_CMD_VLD);
1806 cmd->len = 4;
1807 cmd++;
1808
1809 cmd->cmd = 0;
1810 cmd->src = msm_virt_to_dma(chip,
1811 &dma_buffer->data.nc10_flash_dev_cmd1_default);
1812 cmd->dst = NC10(MSM_NAND_DEV_CMD1);
1813 cmd->len = 4;
1814 cmd++;
1815 } else {
1816 /* disable CS1 */
1817 cmd->cmd = 0;
1818 cmd->src = msm_virt_to_dma(chip,
1819 &dma_buffer->data.default_ebi2_chip_select_cfg0);
1820 cmd->dst = EBI2_CHIP_SELECT_CFG0;
1821 cmd->len = 4;
1822 cmd++;
1823 }
1824
1825 BUILD_BUG_ON(16 * 6 + 20 != ARRAY_SIZE(dma_buffer->cmd));
1826 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
1827 dma_buffer->cmd[0].cmd |= CMD_OCB;
1828 cmd[-1].cmd |= CMD_OCU | CMD_LC;
1829
1830 dma_buffer->cmdptr =
1831 (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
1832 | CMD_PTR_LP;
1833
1834 mb();
Jeff Ohlsteindc39f972011-09-02 13:55:16 -07001835 msm_dmov_exec_cmd(chip->dma_channel,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001836 DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
1837 &dma_buffer->cmdptr)));
1838 mb();
1839
1840 /* if any of the writes failed (0x10), or there
1841 * was a protection violation (0x100), we lose
1842 */
1843 pageerr = rawerr = 0;
1844 for (n = start_sector; n < cwperpage; n++) {
1845 if (dma_buffer->data.result[n].flash_status & 0x110) {
1846 rawerr = -EIO;
1847 break;
1848 }
1849 }
1850 if (rawerr) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001851 if (ops->datbuf && ops->mode != MTD_OPS_RAW) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001852 uint8_t *datbuf = ops->datbuf +
1853 pages_read * mtd->writesize;
1854
1855 dma_sync_single_for_cpu(chip->dev,
1856 data_dma_addr_curr-mtd->writesize,
1857 mtd->writesize, DMA_BIDIRECTIONAL);
1858
1859 for (n = 0; n < mtd->writesize; n++) {
1860 /* empty blocks read 0x54 at
1861 * these offsets
1862 */
1863 if ((n % 516 == 3 || n % 516 == 175)
1864 && datbuf[n] == 0x54)
1865 datbuf[n] = 0xff;
1866 if (datbuf[n] != 0xff) {
1867 pageerr = rawerr;
1868 break;
1869 }
1870 }
1871
1872 dma_sync_single_for_device(chip->dev,
1873 data_dma_addr_curr-mtd->writesize,
1874 mtd->writesize, DMA_BIDIRECTIONAL);
1875
1876 }
1877 if (ops->oobbuf) {
1878 dma_sync_single_for_cpu(chip->dev,
1879 oob_dma_addr_curr - (ops->ooblen - oob_len),
1880 ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
1881
1882 for (n = 0; n < ops->ooblen; n++) {
1883 if (ops->oobbuf[n] != 0xff) {
1884 pageerr = rawerr;
1885 break;
1886 }
1887 }
1888
1889 dma_sync_single_for_device(chip->dev,
1890 oob_dma_addr_curr - (ops->ooblen - oob_len),
1891 ops->ooblen - oob_len, DMA_BIDIRECTIONAL);
1892 }
1893 }
1894 if (pageerr) {
1895 for (n = start_sector; n < cwperpage; n++) {
1896 if (dma_buffer->data.result[n].buffer_status
Sujit Reddy Thummaec9b3252012-04-23 15:53:45 +05301897 & chip->uncorrectable_bit_mask) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001898 /* not thread safe */
1899 mtd->ecc_stats.failed++;
1900 pageerr = -EBADMSG;
1901 break;
1902 }
1903 }
1904 }
1905 if (!rawerr) { /* check for corretable errors */
1906 for (n = start_sector; n < cwperpage; n++) {
1907 ecc_errors = dma_buffer->data.
1908 result[n].buffer_status
Sujit Reddy Thummaec9b3252012-04-23 15:53:45 +05301909 & chip->num_err_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001910 if (ecc_errors) {
1911 total_ecc_errors += ecc_errors;
1912 /* not thread safe */
1913 mtd->ecc_stats.corrected += ecc_errors;
1914 if (ecc_errors > 1)
1915 pageerr = -EUCLEAN;
1916 }
1917 }
1918 }
1919 if (pageerr && (pageerr != -EUCLEAN || err == 0))
1920 err = pageerr;
1921
1922#if VERBOSE
1923 if (rawerr && !pageerr) {
1924 pr_err("msm_nand_read_oob_dualnandc "
1925 "%llx %x %x empty page\n",
1926 (loff_t)page * mtd->writesize, ops->len,
1927 ops->ooblen);
1928 } else {
1929 for (n = start_sector; n < cwperpage; n++) {
1930 if (n%2) {
1931 pr_info("NC10: flash_status[%d] = %x, "
1932 "buffr_status[%d] = %x\n",
1933 n, dma_buffer->
1934 data.result[n].flash_status,
1935 n, dma_buffer->
1936 data.result[n].buffer_status);
1937 } else {
1938 pr_info("NC01: flash_status[%d] = %x, "
1939 "buffr_status[%d] = %x\n",
1940 n, dma_buffer->
1941 data.result[n].flash_status,
1942 n, dma_buffer->
1943 data.result[n].buffer_status);
1944 }
1945 }
1946 }
1947#endif
1948 if (err && err != -EUCLEAN && err != -EBADMSG)
1949 break;
1950 pages_read++;
1951 page++;
1952 }
1953
1954 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
1955
1956 if (ops->oobbuf) {
1957 dma_unmap_page(chip->dev, oob_dma_addr,
1958 ops->ooblen, DMA_FROM_DEVICE);
1959 }
1960err_dma_map_oobbuf_failed:
1961 if (ops->datbuf) {
1962 dma_unmap_page(chip->dev, data_dma_addr,
1963 ops->len, DMA_BIDIRECTIONAL);
1964 }
1965
Steve Mucklef132c6c2012-06-06 18:30:57 -07001966 if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001967 ops->retlen = mtd->writesize * pages_read;
1968 else
1969 ops->retlen = (mtd->writesize + mtd->oobsize) *
1970 pages_read;
1971 ops->oobretlen = ops->ooblen - oob_len;
1972 if (err)
1973 pr_err("msm_nand_read_oob_dualnandc "
1974 "%llx %x %x failed %d, corrected %d\n",
1975 from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
1976 total_ecc_errors);
1977#if VERBOSE
1978 pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
1979 __func__, err, ops->retlen, ops->oobretlen);
1980
1981 pr_info("==================================================="
1982 "==========\n");
1983#endif
1984 return err;
1985}
1986
1987static int
1988msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1989 size_t *retlen, u_char *buf)
1990{
1991 int ret;
1992 struct mtd_oob_ops ops;
Sujit Reddy Thummaf6e83862012-05-23 13:59:57 -04001993 int (*read_oob)(struct mtd_info *, loff_t, struct mtd_oob_ops *);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001994
Sujit Reddy Thummaf6e83862012-05-23 13:59:57 -04001995 if (!dual_nand_ctlr_present)
1996 read_oob = msm_nand_read_oob;
1997 else
1998 read_oob = msm_nand_read_oob_dualnandc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001999
Steve Mucklef132c6c2012-06-06 18:30:57 -07002000 ops.mode = MTD_OPS_PLACE_OOB;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002001 ops.retlen = 0;
2002 ops.ooblen = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002003 ops.oobbuf = NULL;
Sujit Reddy Thummaf6e83862012-05-23 13:59:57 -04002004 ret = 0;
2005 *retlen = 0;
2006
2007 if ((from & (mtd->writesize - 1)) == 0 && len == mtd->writesize) {
2008 /* reading a page on page boundary */
2009 ops.len = len;
2010 ops.datbuf = buf;
2011 ret = read_oob(mtd, from, &ops);
2012 *retlen = ops.retlen;
2013 } else if (len > 0) {
2014 /* reading any size on any offset. partial page is supported */
2015 u8 *bounce_buf;
2016 loff_t aligned_from;
2017 loff_t offset;
2018 size_t actual_len;
2019
2020 bounce_buf = kmalloc(mtd->writesize, GFP_KERNEL);
2021 if (!bounce_buf) {
2022 pr_err("%s: could not allocate memory\n", __func__);
2023 ret = -ENOMEM;
2024 goto out;
2025 }
2026
2027 ops.len = mtd->writesize;
2028 offset = from & (mtd->writesize - 1);
2029 aligned_from = from - offset;
2030
2031 for (;;) {
2032 int no_copy;
2033
2034 actual_len = mtd->writesize - offset;
2035 if (actual_len > len)
2036 actual_len = len;
2037
2038 no_copy = (offset == 0 && actual_len == mtd->writesize);
2039 ops.datbuf = (no_copy) ? buf : bounce_buf;
2040 ret = read_oob(mtd, aligned_from, &ops);
2041 if (ret < 0)
2042 break;
2043
2044 if (!no_copy)
2045 memcpy(buf, bounce_buf + offset, actual_len);
2046
2047 len -= actual_len;
2048 *retlen += actual_len;
2049 if (len == 0)
2050 break;
2051
2052 buf += actual_len;
2053 offset = 0;
2054 aligned_from += mtd->writesize;
2055 }
2056
2057 kfree(bounce_buf);
2058 }
2059
2060out:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002061 return ret;
2062}
2063
2064static int
2065msm_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
2066{
2067 struct msm_nand_chip *chip = mtd->priv;
2068 struct {
2069 dmov_s cmd[8 * 7 + 2];
2070 unsigned cmdptr;
2071 struct {
2072 uint32_t cmd;
2073 uint32_t addr0;
2074 uint32_t addr1;
2075 uint32_t chipsel;
2076 uint32_t cfg0;
2077 uint32_t cfg1;
2078 uint32_t eccbchcfg;
2079 uint32_t exec;
2080 uint32_t ecccfg;
2081 uint32_t clrfstatus;
2082 uint32_t clrrstatus;
2083 uint32_t flash_status[8];
2084 } data;
2085 } *dma_buffer;
2086 dmov_s *cmd;
2087 unsigned n;
2088 unsigned page = 0;
2089 uint32_t oob_len;
2090 uint32_t sectordatawritesize;
2091 int err = 0;
2092 dma_addr_t data_dma_addr = 0;
2093 dma_addr_t oob_dma_addr = 0;
2094 dma_addr_t data_dma_addr_curr = 0;
2095 dma_addr_t oob_dma_addr_curr = 0;
2096 unsigned page_count;
2097 unsigned pages_written = 0;
2098 unsigned cwperpage;
2099#if VERBOSE
2100 pr_info("================================================="
2101 "================\n");
2102 pr_info("%s:\nto 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
2103 "\noobbuf 0x%p ooblen 0x%x\n",
2104 __func__, to, ops->mode, ops->datbuf, ops->len,
2105 ops->oobbuf, ops->ooblen);
2106#endif
2107
2108 if (mtd->writesize == 2048)
2109 page = to >> 11;
2110
2111 if (mtd->writesize == 4096)
2112 page = to >> 12;
2113
2114 oob_len = ops->ooblen;
2115 cwperpage = (mtd->writesize >> 9);
2116
2117 if (to & (mtd->writesize - 1)) {
2118 pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
2119 return -EINVAL;
2120 }
2121
Steve Mucklef132c6c2012-06-06 18:30:57 -07002122 if (ops->mode != MTD_OPS_RAW) {
2123 if (ops->ooblen != 0 && ops->mode != MTD_OPS_AUTO_OOB) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002124 pr_err("%s: unsupported ops->mode,%d\n",
2125 __func__, ops->mode);
2126 return -EINVAL;
2127 }
2128 if ((ops->len % mtd->writesize) != 0) {
2129 pr_err("%s: unsupported ops->len, %d\n",
2130 __func__, ops->len);
2131 return -EINVAL;
2132 }
2133 } else {
2134 if ((ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
2135 pr_err("%s: unsupported ops->len, "
Steve Mucklef132c6c2012-06-06 18:30:57 -07002136 "%d for MTD_OPS_RAW mode\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002137 __func__, ops->len);
2138 return -EINVAL;
2139 }
2140 }
2141
2142 if (ops->datbuf == NULL) {
2143 pr_err("%s: unsupported ops->datbuf == NULL\n", __func__);
2144 return -EINVAL;
2145 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07002146 if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002147 pr_err("%s: unsupported ops->ooboffs, %d\n",
2148 __func__, ops->ooboffs);
2149 return -EINVAL;
2150 }
2151
2152 if (ops->datbuf) {
2153 data_dma_addr_curr = data_dma_addr =
2154 msm_nand_dma_map(chip->dev, ops->datbuf,
2155 ops->len, DMA_TO_DEVICE);
2156 if (dma_mapping_error(chip->dev, data_dma_addr)) {
2157 pr_err("msm_nand_write_oob: failed to get dma addr "
2158 "for %p\n", ops->datbuf);
2159 return -EIO;
2160 }
2161 }
2162 if (ops->oobbuf) {
2163 oob_dma_addr_curr = oob_dma_addr =
2164 msm_nand_dma_map(chip->dev, ops->oobbuf,
2165 ops->ooblen, DMA_TO_DEVICE);
2166 if (dma_mapping_error(chip->dev, oob_dma_addr)) {
2167 pr_err("msm_nand_write_oob: failed to get dma addr "
2168 "for %p\n", ops->oobbuf);
2169 err = -EIO;
2170 goto err_dma_map_oobbuf_failed;
2171 }
2172 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07002173 if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002174 page_count = ops->len / mtd->writesize;
2175 else
2176 page_count = ops->len / (mtd->writesize + mtd->oobsize);
2177
2178 wait_event(chip->wait_queue, (dma_buffer =
2179 msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
2180
2181 while (page_count-- > 0) {
2182 cmd = dma_buffer->cmd;
2183
Steve Mucklef132c6c2012-06-06 18:30:57 -07002184 if (ops->mode != MTD_OPS_RAW) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002185 dma_buffer->data.cfg0 = chip->CFG0;
2186 dma_buffer->data.cfg1 = chip->CFG1;
2187 if (enable_bch_ecc)
2188 dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
2189 } else {
2190 dma_buffer->data.cfg0 = (chip->CFG0_RAW &
2191 ~(7U << 6)) | ((cwperpage-1) << 6);
2192 dma_buffer->data.cfg1 = chip->CFG1_RAW |
2193 (chip->CFG1 & CFG1_WIDE_FLASH);
2194 }
2195
2196 /* CMD / ADDR0 / ADDR1 / CHIPSEL program values */
2197 dma_buffer->data.cmd = MSM_NAND_CMD_PRG_PAGE;
2198 dma_buffer->data.addr0 = page << 16;
2199 dma_buffer->data.addr1 = (page >> 16) & 0xff;
2200 /* chipsel_0 + enable DM interface */
2201 dma_buffer->data.chipsel = 0 | 4;
2202
2203
2204 /* GO bit for the EXEC register */
2205 dma_buffer->data.exec = 1;
2206 dma_buffer->data.clrfstatus = 0x00000020;
2207 dma_buffer->data.clrrstatus = 0x000000C0;
2208
2209 BUILD_BUG_ON(8 != ARRAY_SIZE(dma_buffer->data.flash_status));
2210
2211 for (n = 0; n < cwperpage ; n++) {
2212 /* status return words */
2213 dma_buffer->data.flash_status[n] = 0xeeeeeeee;
2214 /* block on cmd ready, then
2215 * write CMD / ADDR0 / ADDR1 / CHIPSEL regs in a burst
2216 */
2217 cmd->cmd = DST_CRCI_NAND_CMD;
2218 cmd->src =
2219 msm_virt_to_dma(chip, &dma_buffer->data.cmd);
2220 cmd->dst = MSM_NAND_FLASH_CMD;
2221 if (n == 0)
2222 cmd->len = 16;
2223 else
2224 cmd->len = 4;
2225 cmd++;
2226
2227 if (n == 0) {
2228 cmd->cmd = 0;
2229 cmd->src = msm_virt_to_dma(chip,
2230 &dma_buffer->data.cfg0);
2231 cmd->dst = MSM_NAND_DEV0_CFG0;
2232 if (enable_bch_ecc)
2233 cmd->len = 12;
2234 else
2235 cmd->len = 8;
2236 cmd++;
2237
2238 dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
2239 cmd->cmd = 0;
2240 cmd->src = msm_virt_to_dma(chip,
2241 &dma_buffer->data.ecccfg);
2242 cmd->dst = MSM_NAND_EBI2_ECC_BUF_CFG;
2243 cmd->len = 4;
2244 cmd++;
2245 }
2246
2247 /* write data block */
Steve Mucklef132c6c2012-06-06 18:30:57 -07002248 if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002249 sectordatawritesize = (n < (cwperpage - 1)) ?
2250 516 : (512 - ((cwperpage - 1) << 2));
2251 else
2252 sectordatawritesize = chip->cw_size;
2253
2254 cmd->cmd = 0;
2255 cmd->src = data_dma_addr_curr;
2256 data_dma_addr_curr += sectordatawritesize;
2257 cmd->dst = MSM_NAND_FLASH_BUFFER;
2258 cmd->len = sectordatawritesize;
2259 cmd++;
2260
2261 if (ops->oobbuf) {
2262 if (n == (cwperpage - 1)) {
2263 cmd->cmd = 0;
2264 cmd->src = oob_dma_addr_curr;
2265 cmd->dst = MSM_NAND_FLASH_BUFFER +
2266 (512 - ((cwperpage - 1) << 2));
2267 if ((cwperpage << 2) < oob_len)
2268 cmd->len = (cwperpage << 2);
2269 else
2270 cmd->len = oob_len;
2271 oob_dma_addr_curr += cmd->len;
2272 oob_len -= cmd->len;
2273 if (cmd->len > 0)
2274 cmd++;
2275 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07002276 if (ops->mode != MTD_OPS_AUTO_OOB) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002277 /* skip ecc bytes in oobbuf */
2278 if (oob_len < chip->ecc_parity_bytes) {
2279 oob_dma_addr_curr +=
2280 chip->ecc_parity_bytes;
2281 oob_len -=
2282 chip->ecc_parity_bytes;
2283 } else {
2284 oob_dma_addr_curr += oob_len;
2285 oob_len = 0;
2286 }
2287 }
2288 }
2289
2290 /* kick the execute register */
2291 cmd->cmd = 0;
2292 cmd->src =
2293 msm_virt_to_dma(chip, &dma_buffer->data.exec);
2294 cmd->dst = MSM_NAND_EXEC_CMD;
2295 cmd->len = 4;
2296 cmd++;
2297
2298 /* block on data ready, then
2299 * read the status register
2300 */
2301 cmd->cmd = SRC_CRCI_NAND_DATA;
2302 cmd->src = MSM_NAND_FLASH_STATUS;
2303 cmd->dst = msm_virt_to_dma(chip,
2304 &dma_buffer->data.flash_status[n]);
2305 cmd->len = 4;
2306 cmd++;
2307
2308 cmd->cmd = 0;
2309 cmd->src = msm_virt_to_dma(chip,
2310 &dma_buffer->data.clrfstatus);
2311 cmd->dst = MSM_NAND_FLASH_STATUS;
2312 cmd->len = 4;
2313 cmd++;
2314
2315 cmd->cmd = 0;
2316 cmd->src = msm_virt_to_dma(chip,
2317 &dma_buffer->data.clrrstatus);
2318 cmd->dst = MSM_NAND_READ_STATUS;
2319 cmd->len = 4;
2320 cmd++;
2321
2322 }
2323
2324 dma_buffer->cmd[0].cmd |= CMD_OCB;
2325 cmd[-1].cmd |= CMD_OCU | CMD_LC;
2326 BUILD_BUG_ON(8 * 7 + 2 != ARRAY_SIZE(dma_buffer->cmd));
2327 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
2328 dma_buffer->cmdptr =
2329 (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) |
2330 CMD_PTR_LP;
2331
2332 mb();
Jeff Ohlsteindc39f972011-09-02 13:55:16 -07002333 msm_dmov_exec_cmd(chip->dma_channel,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002334 DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(
2335 msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
2336 mb();
2337
2338 /* if any of the writes failed (0x10), or there was a
2339 * protection violation (0x100), or the program success
2340 * bit (0x80) is unset, we lose
2341 */
2342 err = 0;
2343 for (n = 0; n < cwperpage; n++) {
2344 if (dma_buffer->data.flash_status[n] & 0x110) {
2345 err = -EIO;
2346 break;
2347 }
2348 if (!(dma_buffer->data.flash_status[n] & 0x80)) {
2349 err = -EIO;
2350 break;
2351 }
2352 }
2353
2354#if VERBOSE
2355 for (n = 0; n < cwperpage; n++)
2356 pr_info("write pg %d: flash_status[%d] = %x\n", page,
2357 n, dma_buffer->data.flash_status[n]);
2358
2359#endif
2360 if (err)
2361 break;
2362 pages_written++;
2363 page++;
2364 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07002365 if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002366 ops->retlen = mtd->writesize * pages_written;
2367 else
2368 ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
2369
2370 ops->oobretlen = ops->ooblen - oob_len;
2371
2372 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
2373
2374 if (ops->oobbuf)
2375 dma_unmap_page(chip->dev, oob_dma_addr,
2376 ops->ooblen, DMA_TO_DEVICE);
2377err_dma_map_oobbuf_failed:
2378 if (ops->datbuf)
2379 dma_unmap_page(chip->dev, data_dma_addr, ops->len,
2380 DMA_TO_DEVICE);
2381 if (err)
2382 pr_err("msm_nand_write_oob %llx %x %x failed %d\n",
2383 to, ops->len, ops->ooblen, err);
2384
2385#if VERBOSE
2386 pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
2387 __func__, err, ops->retlen, ops->oobretlen);
2388
2389 pr_info("==================================================="
2390 "==============\n");
2391#endif
2392 return err;
2393}
2394
2395static int
2396msm_nand_write_oob_dualnandc(struct mtd_info *mtd, loff_t to,
2397 struct mtd_oob_ops *ops)
2398{
2399 struct msm_nand_chip *chip = mtd->priv;
2400 struct {
2401 dmov_s cmd[16 * 6 + 18];
2402 unsigned cmdptr;
2403 struct {
2404 uint32_t cmd;
2405 uint32_t nandc01_addr0;
2406 uint32_t nandc10_addr0;
2407 uint32_t nandc11_addr1;
2408 uint32_t chipsel_cs0;
2409 uint32_t chipsel_cs1;
2410 uint32_t cfg0;
2411 uint32_t cfg1;
2412 uint32_t eccbchcfg;
2413 uint32_t exec;
2414 uint32_t ecccfg;
2415 uint32_t cfg0_nc01;
2416 uint32_t ebi2_chip_select_cfg0;
2417 uint32_t adm_mux_data_ack_req_nc01;
2418 uint32_t adm_mux_cmd_ack_req_nc01;
2419 uint32_t adm_mux_data_ack_req_nc10;
2420 uint32_t adm_mux_cmd_ack_req_nc10;
2421 uint32_t adm_default_mux;
2422 uint32_t default_ebi2_chip_select_cfg0;
2423 uint32_t nc01_flash_dev_cmd_vld;
2424 uint32_t nc10_flash_dev_cmd0;
2425 uint32_t nc01_flash_dev_cmd_vld_default;
2426 uint32_t nc10_flash_dev_cmd0_default;
2427 uint32_t flash_status[16];
2428 uint32_t clrfstatus;
2429 uint32_t clrrstatus;
2430 } data;
2431 } *dma_buffer;
2432 dmov_s *cmd;
2433 unsigned n;
2434 unsigned page = 0;
2435 uint32_t oob_len;
2436 uint32_t sectordatawritesize;
2437 int err = 0;
2438 dma_addr_t data_dma_addr = 0;
2439 dma_addr_t oob_dma_addr = 0;
2440 dma_addr_t data_dma_addr_curr = 0;
2441 dma_addr_t oob_dma_addr_curr = 0;
2442 unsigned page_count;
2443 unsigned pages_written = 0;
2444 unsigned cwperpage;
2445 unsigned cw_offset = chip->cw_size;
2446#if VERBOSE
2447 pr_info("================================================="
2448 "============\n");
2449 pr_info("%s:\nto 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x"
2450 "\noobbuf 0x%p ooblen 0x%x\n\n",
2451 __func__, to, ops->mode, ops->datbuf, ops->len,
2452 ops->oobbuf, ops->ooblen);
2453#endif
2454
2455 if (mtd->writesize == 2048)
2456 page = to >> 11;
2457
2458 if (mtd->writesize == 4096)
2459 page = to >> 12;
2460
2461 if (interleave_enable)
2462 page = (to >> 1) >> 12;
2463
2464 oob_len = ops->ooblen;
2465 cwperpage = (mtd->writesize >> 9);
2466
2467 if (to & (mtd->writesize - 1)) {
2468 pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
2469 return -EINVAL;
2470 }
2471
Steve Mucklef132c6c2012-06-06 18:30:57 -07002472 if (ops->mode != MTD_OPS_RAW) {
2473 if (ops->ooblen != 0 && ops->mode != MTD_OPS_AUTO_OOB) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002474 pr_err("%s: unsupported ops->mode,%d\n",
2475 __func__, ops->mode);
2476 return -EINVAL;
2477 }
2478 if ((ops->len % mtd->writesize) != 0) {
2479 pr_err("%s: unsupported ops->len, %d\n",
2480 __func__, ops->len);
2481 return -EINVAL;
2482 }
2483 } else {
2484 if ((ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
2485 pr_err("%s: unsupported ops->len, "
Steve Mucklef132c6c2012-06-06 18:30:57 -07002486 "%d for MTD_OPS_RAW mode\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002487 __func__, ops->len);
2488 return -EINVAL;
2489 }
2490 }
2491
2492 if (ops->datbuf == NULL) {
2493 pr_err("%s: unsupported ops->datbuf == NULL\n", __func__);
2494 return -EINVAL;
2495 }
2496
Steve Mucklef132c6c2012-06-06 18:30:57 -07002497 if (ops->mode != MTD_OPS_RAW && ops->ooblen != 0 && ops->ooboffs != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002498 pr_err("%s: unsupported ops->ooboffs, %d\n",
2499 __func__, ops->ooboffs);
2500 return -EINVAL;
2501 }
2502
2503 if (ops->datbuf) {
2504 data_dma_addr_curr = data_dma_addr =
2505 msm_nand_dma_map(chip->dev, ops->datbuf,
2506 ops->len, DMA_TO_DEVICE);
2507 if (dma_mapping_error(chip->dev, data_dma_addr)) {
2508 pr_err("msm_nand_write_oob_dualnandc:"
2509 "failed to get dma addr "
2510 "for %p\n", ops->datbuf);
2511 return -EIO;
2512 }
2513 }
2514 if (ops->oobbuf) {
2515 oob_dma_addr_curr = oob_dma_addr =
2516 msm_nand_dma_map(chip->dev, ops->oobbuf,
2517 ops->ooblen, DMA_TO_DEVICE);
2518 if (dma_mapping_error(chip->dev, oob_dma_addr)) {
2519 pr_err("msm_nand_write_oob_dualnandc:"
2520 "failed to get dma addr "
2521 "for %p\n", ops->oobbuf);
2522 err = -EIO;
2523 goto err_dma_map_oobbuf_failed;
2524 }
2525 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07002526 if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002527 page_count = ops->len / mtd->writesize;
2528 else
2529 page_count = ops->len / (mtd->writesize + mtd->oobsize);
2530
2531 wait_event(chip->wait_queue, (dma_buffer =
2532 msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
2533
2534 if (chip->CFG1 & CFG1_WIDE_FLASH)
2535 cw_offset >>= 1;
2536
2537 dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
2538 dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
2539 dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
2540 dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
2541 dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
2542 dma_buffer->data.adm_default_mux = 0x00000FC0;
2543 dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
2544 dma_buffer->data.nc01_flash_dev_cmd_vld = 0x9;
2545 dma_buffer->data.nc10_flash_dev_cmd0 = 0x1085D060;
2546 dma_buffer->data.nc01_flash_dev_cmd_vld_default = 0x1D;
2547 dma_buffer->data.nc10_flash_dev_cmd0_default = 0x1080D060;
2548 dma_buffer->data.clrfstatus = 0x00000020;
2549 dma_buffer->data.clrrstatus = 0x000000C0;
2550
2551 while (page_count-- > 0) {
2552 cmd = dma_buffer->cmd;
2553
Steve Mucklef132c6c2012-06-06 18:30:57 -07002554 if (ops->mode != MTD_OPS_RAW) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002555 dma_buffer->data.cfg0 = ((chip->CFG0 & ~(7U << 6))
2556 & ~(1 << 4)) | ((((cwperpage >> 1)-1)) << 6);
2557 dma_buffer->data.cfg1 = chip->CFG1;
2558 if (enable_bch_ecc)
2559 dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
2560 } else {
2561 dma_buffer->data.cfg0 = ((chip->CFG0_RAW &
2562 ~(7U << 6)) & ~(1 << 4)) | (((cwperpage >> 1)-1) << 6);
2563 dma_buffer->data.cfg1 = chip->CFG1_RAW |
2564 (chip->CFG1 & CFG1_WIDE_FLASH);
2565 }
2566
2567 /* Disables the automatic issuing of the read
2568 * status command for first NAND controller.
2569 */
2570 if (!interleave_enable)
2571 dma_buffer->data.cfg0_nc01 = dma_buffer->data.cfg0
2572 | (1 << 4);
2573 else
2574 dma_buffer->data.cfg0 |= (1 << 4);
2575
2576 dma_buffer->data.cmd = MSM_NAND_CMD_PRG_PAGE;
2577 dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
2578 dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
2579
2580 /* GO bit for the EXEC register */
2581 dma_buffer->data.exec = 1;
2582
2583 if (!interleave_enable) {
2584 dma_buffer->data.nandc01_addr0 = (page << 16) | 0x0;
2585 /* NC10 ADDR0 points to the next code word */
2586 dma_buffer->data.nandc10_addr0 =
2587 (page << 16) | cw_offset;
2588 } else {
2589 dma_buffer->data.nandc01_addr0 =
2590 dma_buffer->data.nandc10_addr0 = (page << 16) | 0x0;
2591 }
2592 /* ADDR1 */
2593 dma_buffer->data.nandc11_addr1 = (page >> 16) & 0xff;
2594
2595 BUILD_BUG_ON(16 != ARRAY_SIZE(dma_buffer->data.flash_status));
2596
2597 for (n = 0; n < cwperpage; n++) {
2598 /* status return words */
2599 dma_buffer->data.flash_status[n] = 0xeeeeeeee;
2600
2601 if (n == 0) {
2602 if (!interleave_enable) {
2603 cmd->cmd = 0;
2604 cmd->src = msm_virt_to_dma(chip,
2605 &dma_buffer->
2606 data.nc01_flash_dev_cmd_vld);
2607 cmd->dst = NC01(MSM_NAND_DEV_CMD_VLD);
2608 cmd->len = 4;
2609 cmd++;
2610
2611 cmd->cmd = 0;
2612 cmd->src = msm_virt_to_dma(chip,
2613 &dma_buffer->data.nc10_flash_dev_cmd0);
2614 cmd->dst = NC10(MSM_NAND_DEV_CMD0);
2615 cmd->len = 4;
2616 cmd++;
2617
2618 /* common settings for both NC01 & NC10
2619 * NC01, NC10 --> ADDR1 / CHIPSEL
2620 */
2621 cmd->cmd = 0;
2622 cmd->src = msm_virt_to_dma(chip,
2623 &dma_buffer->data.nandc11_addr1);
2624 cmd->dst = NC11(MSM_NAND_ADDR1);
2625 cmd->len = 8;
2626 cmd++;
2627
2628 /* Disables the automatic issue of the
2629 * read status command after the write
2630 * operation.
2631 */
2632 cmd->cmd = 0;
2633 cmd->src = msm_virt_to_dma(chip,
2634 &dma_buffer->data.cfg0_nc01);
2635 cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
2636 cmd->len = 4;
2637 cmd++;
2638
2639 cmd->cmd = 0;
2640 cmd->src = msm_virt_to_dma(chip,
2641 &dma_buffer->data.cfg0);
2642 cmd->dst = NC10(MSM_NAND_DEV0_CFG0);
2643 cmd->len = 4;
2644 cmd++;
2645
2646 cmd->cmd = 0;
2647 cmd->src = msm_virt_to_dma(chip,
2648 &dma_buffer->data.cfg1);
2649 cmd->dst = NC11(MSM_NAND_DEV0_CFG1);
2650 if (enable_bch_ecc)
2651 cmd->len = 8;
2652 else
2653 cmd->len = 4;
2654 cmd++;
2655 } else {
2656 /* enable CS1 */
2657 cmd->cmd = 0;
2658 cmd->src = msm_virt_to_dma(chip,
2659 &dma_buffer->
2660 data.ebi2_chip_select_cfg0);
2661 cmd->dst = EBI2_CHIP_SELECT_CFG0;
2662 cmd->len = 4;
2663 cmd++;
2664
2665 /* NC11 --> ADDR1 */
2666 cmd->cmd = 0;
2667 cmd->src = msm_virt_to_dma(chip,
2668 &dma_buffer->data.nandc11_addr1);
2669 cmd->dst = NC11(MSM_NAND_ADDR1);
2670 cmd->len = 4;
2671 cmd++;
2672
2673 /* Enable CS0 for NC01 */
2674 cmd->cmd = 0;
2675 cmd->src = msm_virt_to_dma(chip,
2676 &dma_buffer->data.chipsel_cs0);
2677 cmd->dst =
2678 NC01(MSM_NAND_FLASH_CHIP_SELECT);
2679 cmd->len = 4;
2680 cmd++;
2681
2682 /* Enable CS1 for NC10 */
2683 cmd->cmd = 0;
2684 cmd->src = msm_virt_to_dma(chip,
2685 &dma_buffer->data.chipsel_cs1);
2686 cmd->dst =
2687 NC10(MSM_NAND_FLASH_CHIP_SELECT);
2688 cmd->len = 4;
2689 cmd++;
2690
2691 /* config DEV0_CFG0 & CFG1 for CS0 */
2692 cmd->cmd = 0;
2693 cmd->src = msm_virt_to_dma(chip,
2694 &dma_buffer->data.cfg0);
2695 cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
2696 cmd->len = 8;
2697 cmd++;
2698
2699 /* config DEV1_CFG0 & CFG1 for CS1 */
2700 cmd->cmd = 0;
2701 cmd->src = msm_virt_to_dma(chip,
2702 &dma_buffer->data.cfg0);
2703 cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
2704 cmd->len = 8;
2705 cmd++;
2706 }
2707
2708 dma_buffer->data.ecccfg = chip->ecc_buf_cfg;
2709 cmd->cmd = 0;
2710 cmd->src = msm_virt_to_dma(chip,
2711 &dma_buffer->data.ecccfg);
2712 cmd->dst = NC11(MSM_NAND_EBI2_ECC_BUF_CFG);
2713 cmd->len = 4;
2714 cmd++;
2715
2716 /* NC01 --> ADDR0 */
2717 cmd->cmd = 0;
2718 cmd->src = msm_virt_to_dma(chip,
2719 &dma_buffer->data.nandc01_addr0);
2720 cmd->dst = NC01(MSM_NAND_ADDR0);
2721 cmd->len = 4;
2722 cmd++;
2723
2724 /* NC10 --> ADDR0 */
2725 cmd->cmd = 0;
2726 cmd->src = msm_virt_to_dma(chip,
2727 &dma_buffer->data.nandc10_addr0);
2728 cmd->dst = NC10(MSM_NAND_ADDR0);
2729 cmd->len = 4;
2730 cmd++;
2731 }
2732
2733 if (n % 2 == 0) {
2734 /* MASK CMD ACK/REQ --> NC10 (0xF14)*/
2735 cmd->cmd = 0;
2736 cmd->src = msm_virt_to_dma(chip,
2737 &dma_buffer->data.adm_mux_cmd_ack_req_nc10);
2738 cmd->dst = EBI2_NAND_ADM_MUX;
2739 cmd->len = 4;
2740 cmd++;
2741
2742 /* CMD */
2743 cmd->cmd = DST_CRCI_NAND_CMD;
2744 cmd->src = msm_virt_to_dma(chip,
2745 &dma_buffer->data.cmd);
2746 cmd->dst = NC01(MSM_NAND_FLASH_CMD);
2747 cmd->len = 4;
2748 cmd++;
2749 } else {
2750 /* MASK CMD ACK/REQ --> NC01 (0x53C)*/
2751 cmd->cmd = 0;
2752 cmd->src = msm_virt_to_dma(chip,
2753 &dma_buffer->data.adm_mux_cmd_ack_req_nc01);
2754 cmd->dst = EBI2_NAND_ADM_MUX;
2755 cmd->len = 4;
2756 cmd++;
2757
2758 /* CMD */
2759 cmd->cmd = DST_CRCI_NAND_CMD;
2760 cmd->src = msm_virt_to_dma(chip,
2761 &dma_buffer->data.cmd);
2762 cmd->dst = NC10(MSM_NAND_FLASH_CMD);
2763 cmd->len = 4;
2764 cmd++;
2765 }
2766
Steve Mucklef132c6c2012-06-06 18:30:57 -07002767 if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002768 sectordatawritesize = (n < (cwperpage - 1)) ?
2769 516 : (512 - ((cwperpage - 1) << 2));
2770 else
2771 sectordatawritesize = chip->cw_size;
2772
2773 cmd->cmd = 0;
2774 cmd->src = data_dma_addr_curr;
2775 data_dma_addr_curr += sectordatawritesize;
2776
2777 if (n % 2 == 0)
2778 cmd->dst = NC01(MSM_NAND_FLASH_BUFFER);
2779 else
2780 cmd->dst = NC10(MSM_NAND_FLASH_BUFFER);
2781 cmd->len = sectordatawritesize;
2782 cmd++;
2783
2784 if (ops->oobbuf) {
2785 if (n == (cwperpage - 1)) {
2786 cmd->cmd = 0;
2787 cmd->src = oob_dma_addr_curr;
2788 cmd->dst = NC10(MSM_NAND_FLASH_BUFFER) +
2789 (512 - ((cwperpage - 1) << 2));
2790 if ((cwperpage << 2) < oob_len)
2791 cmd->len = (cwperpage << 2);
2792 else
2793 cmd->len = oob_len;
2794 oob_dma_addr_curr += cmd->len;
2795 oob_len -= cmd->len;
2796 if (cmd->len > 0)
2797 cmd++;
2798 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07002799 if (ops->mode != MTD_OPS_AUTO_OOB) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002800 /* skip ecc bytes in oobbuf */
2801 if (oob_len < chip->ecc_parity_bytes) {
2802 oob_dma_addr_curr +=
2803 chip->ecc_parity_bytes;
2804 oob_len -=
2805 chip->ecc_parity_bytes;
2806 } else {
2807 oob_dma_addr_curr += oob_len;
2808 oob_len = 0;
2809 }
2810 }
2811 }
2812
2813 if (n % 2 == 0) {
2814 if (n != 0) {
2815 /* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
2816 cmd->cmd = 0;
2817 cmd->src = msm_virt_to_dma(chip,
2818 &dma_buffer->
2819 data.adm_mux_data_ack_req_nc01);
2820 cmd->dst = EBI2_NAND_ADM_MUX;
2821 cmd->len = 4;
2822 cmd++;
2823
2824 /* block on data ready from NC10, then
2825 * read the status register
2826 */
2827 cmd->cmd = SRC_CRCI_NAND_DATA;
2828 cmd->src = NC10(MSM_NAND_FLASH_STATUS);
2829 cmd->dst = msm_virt_to_dma(chip,
2830 &dma_buffer->data.flash_status[n-1]);
2831 cmd->len = 4;
2832 cmd++;
2833 }
2834 /* kick the NC01 execute register */
2835 cmd->cmd = 0;
2836 cmd->src = msm_virt_to_dma(chip,
2837 &dma_buffer->data.exec);
2838 cmd->dst = NC01(MSM_NAND_EXEC_CMD);
2839 cmd->len = 4;
2840 cmd++;
2841 } else {
2842 /* MASK DATA ACK/REQ --> NC10 (0xF28)*/
2843 cmd->cmd = 0;
2844 cmd->src = msm_virt_to_dma(chip,
2845 &dma_buffer->data.adm_mux_data_ack_req_nc10);
2846 cmd->dst = EBI2_NAND_ADM_MUX;
2847 cmd->len = 4;
2848 cmd++;
2849
2850 /* block on data ready from NC01, then
2851 * read the status register
2852 */
2853 cmd->cmd = SRC_CRCI_NAND_DATA;
2854 cmd->src = NC01(MSM_NAND_FLASH_STATUS);
2855 cmd->dst = msm_virt_to_dma(chip,
2856 &dma_buffer->data.flash_status[n-1]);
2857 cmd->len = 4;
2858 cmd++;
2859
2860 /* kick the execute register */
2861 cmd->cmd = 0;
2862 cmd->src =
2863 msm_virt_to_dma(chip, &dma_buffer->data.exec);
2864 cmd->dst = NC10(MSM_NAND_EXEC_CMD);
2865 cmd->len = 4;
2866 cmd++;
2867 }
2868 }
2869
2870 /* MASK DATA ACK/REQ --> NC01 (0xA3C)*/
2871 cmd->cmd = 0;
2872 cmd->src = msm_virt_to_dma(chip,
2873 &dma_buffer->data.adm_mux_data_ack_req_nc01);
2874 cmd->dst = EBI2_NAND_ADM_MUX;
2875 cmd->len = 4;
2876 cmd++;
2877
2878 /* we should process outstanding request */
2879 /* block on data ready, then
2880 * read the status register
2881 */
2882 cmd->cmd = SRC_CRCI_NAND_DATA;
2883 cmd->src = NC10(MSM_NAND_FLASH_STATUS);
2884 cmd->dst = msm_virt_to_dma(chip,
2885 &dma_buffer->data.flash_status[n-1]);
2886 cmd->len = 4;
2887 cmd++;
2888
2889 cmd->cmd = 0;
2890 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
2891 cmd->dst = NC11(MSM_NAND_FLASH_STATUS);
2892 cmd->len = 4;
2893 cmd++;
2894
2895 cmd->cmd = 0;
2896 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
2897 cmd->dst = NC11(MSM_NAND_READ_STATUS);
2898 cmd->len = 4;
2899 cmd++;
2900
2901 /* MASK DATA ACK/REQ --> NC01 (0xFC0)*/
2902 cmd->cmd = 0;
2903 cmd->src = msm_virt_to_dma(chip,
2904 &dma_buffer->data.adm_default_mux);
2905 cmd->dst = EBI2_NAND_ADM_MUX;
2906 cmd->len = 4;
2907 cmd++;
2908
2909 if (!interleave_enable) {
2910 /* setting to defalut values back */
2911 cmd->cmd = 0;
2912 cmd->src = msm_virt_to_dma(chip,
2913 &dma_buffer->data.nc01_flash_dev_cmd_vld_default);
2914 cmd->dst = NC01(MSM_NAND_DEV_CMD_VLD);
2915 cmd->len = 4;
2916 cmd++;
2917
2918 cmd->cmd = 0;
2919 cmd->src = msm_virt_to_dma(chip,
2920 &dma_buffer->data.nc10_flash_dev_cmd0_default);
2921 cmd->dst = NC10(MSM_NAND_DEV_CMD0);
2922 cmd->len = 4;
2923 cmd++;
2924 } else {
2925 /* disable CS1 */
2926 cmd->cmd = 0;
2927 cmd->src = msm_virt_to_dma(chip,
2928 &dma_buffer->data.default_ebi2_chip_select_cfg0);
2929 cmd->dst = EBI2_CHIP_SELECT_CFG0;
2930 cmd->len = 4;
2931 cmd++;
2932 }
2933
2934 dma_buffer->cmd[0].cmd |= CMD_OCB;
2935 cmd[-1].cmd |= CMD_OCU | CMD_LC;
2936 BUILD_BUG_ON(16 * 6 + 18 != ARRAY_SIZE(dma_buffer->cmd));
2937 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
2938 dma_buffer->cmdptr =
2939 ((msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP);
2940
2941 mb();
Jeff Ohlsteindc39f972011-09-02 13:55:16 -07002942 msm_dmov_exec_cmd(chip->dma_channel,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002943 DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(
2944 msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
2945 mb();
2946
2947 /* if any of the writes failed (0x10), or there was a
2948 * protection violation (0x100), or the program success
2949 * bit (0x80) is unset, we lose
2950 */
2951 err = 0;
2952 for (n = 0; n < cwperpage; n++) {
2953 if (dma_buffer->data.flash_status[n] & 0x110) {
2954 err = -EIO;
2955 break;
2956 }
2957 if (!(dma_buffer->data.flash_status[n] & 0x80)) {
2958 err = -EIO;
2959 break;
2960 }
2961 }
2962 /* check for flash status busy for the last codeword */
2963 if (!interleave_enable)
2964 if (!(dma_buffer->data.flash_status[cwperpage - 1]
2965 & 0x20)) {
2966 err = -EIO;
2967 break;
2968 }
2969#if VERBOSE
2970 for (n = 0; n < cwperpage; n++) {
2971 if (n%2) {
2972 pr_info("NC10: write pg %d: flash_status[%d] = %x\n",
2973 page, n, dma_buffer->data.flash_status[n]);
2974 } else {
2975 pr_info("NC01: write pg %d: flash_status[%d] = %x\n",
2976 page, n, dma_buffer->data.flash_status[n]);
2977 }
2978 }
2979#endif
2980 if (err)
2981 break;
2982 pages_written++;
2983 page++;
2984 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07002985 if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002986 ops->retlen = mtd->writesize * pages_written;
2987 else
2988 ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
2989
2990 ops->oobretlen = ops->ooblen - oob_len;
2991
2992 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
2993
2994 if (ops->oobbuf)
2995 dma_unmap_page(chip->dev, oob_dma_addr,
2996 ops->ooblen, DMA_TO_DEVICE);
2997err_dma_map_oobbuf_failed:
2998 if (ops->datbuf)
2999 dma_unmap_page(chip->dev, data_dma_addr, ops->len,
3000 DMA_TO_DEVICE);
3001 if (err)
3002 pr_err("msm_nand_write_oob_dualnandc %llx %x %x failed %d\n",
3003 to, ops->len, ops->ooblen, err);
3004
3005#if VERBOSE
3006 pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
3007 __func__, err, ops->retlen, ops->oobretlen);
3008
3009 pr_info("==================================================="
3010 "==========\n");
3011#endif
3012 return err;
3013}
3014
3015static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
3016 size_t *retlen, const u_char *buf)
3017{
3018 int ret;
3019 struct mtd_oob_ops ops;
Sujit Reddy Thummaf6e83862012-05-23 13:59:57 -04003020 int (*write_oob)(struct mtd_info *, loff_t, struct mtd_oob_ops *);
3021
3022 if (!dual_nand_ctlr_present)
3023 write_oob = msm_nand_write_oob;
3024 else
3025 write_oob = msm_nand_write_oob_dualnandc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003026
Steve Mucklef132c6c2012-06-06 18:30:57 -07003027 ops.mode = MTD_OPS_PLACE_OOB;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003028 ops.retlen = 0;
3029 ops.ooblen = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003030 ops.oobbuf = NULL;
Sujit Reddy Thummaf6e83862012-05-23 13:59:57 -04003031 ret = 0;
3032 *retlen = 0;
3033
3034 if (!virt_addr_valid(buf) &&
3035 ((to | len) & (mtd->writesize - 1)) == 0 &&
3036 ((unsigned long) buf & ~PAGE_MASK) + len > PAGE_SIZE) {
3037 /*
3038 * Handle writing of large size write buffer in vmalloc
3039 * address space that does not fit in an MMU page.
3040 * The destination address must be on page boundary,
3041 * and the size must be multiple of NAND page size.
3042 * Writing partial page is not supported.
3043 */
3044 ops.len = mtd->writesize;
3045
3046 for (;;) {
3047 ops.datbuf = (uint8_t *) buf;
3048
3049 ret = write_oob(mtd, to, &ops);
3050 if (ret < 0)
3051 break;
3052
3053 len -= mtd->writesize;
3054 *retlen += mtd->writesize;
3055 if (len == 0)
3056 break;
3057
3058 buf += mtd->writesize;
3059 to += mtd->writesize;
3060 }
3061 } else {
3062 ops.len = len;
3063 ops.datbuf = (uint8_t *) buf;
3064 ret = write_oob(mtd, to, &ops);
3065 *retlen = ops.retlen;
3066 }
3067
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003068 return ret;
3069}
3070
3071static int
3072msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
3073{
3074 int err;
3075 struct msm_nand_chip *chip = mtd->priv;
3076 struct {
3077 dmov_s cmd[6];
3078 unsigned cmdptr;
3079 struct {
3080 uint32_t cmd;
3081 uint32_t addr0;
3082 uint32_t addr1;
3083 uint32_t chipsel;
3084 uint32_t cfg0;
3085 uint32_t cfg1;
3086 uint32_t exec;
3087 uint32_t flash_status;
3088 uint32_t clrfstatus;
3089 uint32_t clrrstatus;
3090 } data;
3091 } *dma_buffer;
3092 dmov_s *cmd;
3093 unsigned page = 0;
3094
3095 if (mtd->writesize == 2048)
3096 page = instr->addr >> 11;
3097
3098 if (mtd->writesize == 4096)
3099 page = instr->addr >> 12;
3100
3101 if (instr->addr & (mtd->erasesize - 1)) {
3102 pr_err("%s: unsupported erase address, 0x%llx\n",
3103 __func__, instr->addr);
3104 return -EINVAL;
3105 }
3106 if (instr->len != mtd->erasesize) {
3107 pr_err("%s: unsupported erase len, %lld\n",
3108 __func__, instr->len);
3109 return -EINVAL;
3110 }
3111
3112 wait_event(chip->wait_queue,
3113 (dma_buffer = msm_nand_get_dma_buffer(
3114 chip, sizeof(*dma_buffer))));
3115
3116 cmd = dma_buffer->cmd;
3117
3118 dma_buffer->data.cmd = MSM_NAND_CMD_BLOCK_ERASE;
3119 dma_buffer->data.addr0 = page;
3120 dma_buffer->data.addr1 = 0;
3121 dma_buffer->data.chipsel = 0 | 4;
3122 dma_buffer->data.exec = 1;
3123 dma_buffer->data.flash_status = 0xeeeeeeee;
3124 dma_buffer->data.cfg0 = chip->CFG0 & (~(7 << 6)); /* CW_PER_PAGE = 0 */
3125 dma_buffer->data.cfg1 = chip->CFG1;
3126 dma_buffer->data.clrfstatus = 0x00000020;
3127 dma_buffer->data.clrrstatus = 0x000000C0;
3128
3129 cmd->cmd = DST_CRCI_NAND_CMD | CMD_OCB;
3130 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
3131 cmd->dst = MSM_NAND_FLASH_CMD;
3132 cmd->len = 16;
3133 cmd++;
3134
3135 cmd->cmd = 0;
3136 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
3137 cmd->dst = MSM_NAND_DEV0_CFG0;
3138 cmd->len = 8;
3139 cmd++;
3140
3141 cmd->cmd = 0;
3142 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
3143 cmd->dst = MSM_NAND_EXEC_CMD;
3144 cmd->len = 4;
3145 cmd++;
3146
3147 cmd->cmd = SRC_CRCI_NAND_DATA;
3148 cmd->src = MSM_NAND_FLASH_STATUS;
3149 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status);
3150 cmd->len = 4;
3151 cmd++;
3152
3153 cmd->cmd = 0;
3154 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
3155 cmd->dst = MSM_NAND_FLASH_STATUS;
3156 cmd->len = 4;
3157 cmd++;
3158
3159 cmd->cmd = CMD_OCU | CMD_LC;
3160 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
3161 cmd->dst = MSM_NAND_READ_STATUS;
3162 cmd->len = 4;
3163 cmd++;
3164
3165 BUILD_BUG_ON(5 != ARRAY_SIZE(dma_buffer->cmd) - 1);
3166 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
3167 dma_buffer->cmdptr =
3168 (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP;
3169
3170 mb();
3171 msm_dmov_exec_cmd(
Jeff Ohlsteindc39f972011-09-02 13:55:16 -07003172 chip->dma_channel, DMOV_CMD_PTR_LIST |
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003173 DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
3174 mb();
3175
3176 /* we fail if there was an operation error, a mpu error, or the
3177 * erase success bit was not set.
3178 */
3179
3180 if (dma_buffer->data.flash_status & 0x110 ||
3181 !(dma_buffer->data.flash_status & 0x80))
3182 err = -EIO;
3183 else
3184 err = 0;
3185
3186 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
3187 if (err) {
3188 pr_err("%s: erase failed, 0x%llx\n", __func__, instr->addr);
3189 instr->fail_addr = instr->addr;
3190 instr->state = MTD_ERASE_FAILED;
3191 } else {
3192 instr->state = MTD_ERASE_DONE;
3193 instr->fail_addr = 0xffffffff;
3194 mtd_erase_callback(instr);
3195 }
3196 return err;
3197}
3198
3199static int
3200msm_nand_erase_dualnandc(struct mtd_info *mtd, struct erase_info *instr)
3201{
3202 int err;
3203 struct msm_nand_chip *chip = mtd->priv;
3204 struct {
3205 dmov_s cmd[18];
3206 unsigned cmdptr;
3207 struct {
3208 uint32_t cmd;
3209 uint32_t addr0;
3210 uint32_t addr1;
3211 uint32_t chipsel_cs0;
3212 uint32_t chipsel_cs1;
3213 uint32_t cfg0;
3214 uint32_t cfg1;
3215 uint32_t exec;
3216 uint32_t ecccfg;
3217 uint32_t ebi2_chip_select_cfg0;
3218 uint32_t adm_mux_data_ack_req_nc01;
3219 uint32_t adm_mux_cmd_ack_req_nc01;
3220 uint32_t adm_mux_data_ack_req_nc10;
3221 uint32_t adm_mux_cmd_ack_req_nc10;
3222 uint32_t adm_default_mux;
3223 uint32_t default_ebi2_chip_select_cfg0;
3224 uint32_t nc01_flash_dev_cmd0;
3225 uint32_t nc01_flash_dev_cmd0_default;
3226 uint32_t flash_status[2];
3227 uint32_t clrfstatus;
3228 uint32_t clrrstatus;
3229 } data;
3230 } *dma_buffer;
3231 dmov_s *cmd;
3232 unsigned page = 0;
3233
3234 if (mtd->writesize == 2048)
3235 page = instr->addr >> 11;
3236
3237 if (mtd->writesize == 4096)
3238 page = instr->addr >> 12;
3239
3240 if (mtd->writesize == 8192)
3241 page = (instr->addr >> 1) >> 12;
3242
3243 if (instr->addr & (mtd->erasesize - 1)) {
3244 pr_err("%s: unsupported erase address, 0x%llx\n",
3245 __func__, instr->addr);
3246 return -EINVAL;
3247 }
3248 if (instr->len != mtd->erasesize) {
3249 pr_err("%s: unsupported erase len, %lld\n",
3250 __func__, instr->len);
3251 return -EINVAL;
3252 }
3253
3254 wait_event(chip->wait_queue,
3255 (dma_buffer = msm_nand_get_dma_buffer(
3256 chip, sizeof(*dma_buffer))));
3257
3258 cmd = dma_buffer->cmd;
3259
3260 dma_buffer->data.cmd = MSM_NAND_CMD_BLOCK_ERASE;
3261 dma_buffer->data.addr0 = page;
3262 dma_buffer->data.addr1 = 0;
3263 dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
3264 dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
3265 dma_buffer->data.exec = 1;
3266 dma_buffer->data.flash_status[0] = 0xeeeeeeee;
3267 dma_buffer->data.flash_status[1] = 0xeeeeeeee;
3268 dma_buffer->data.cfg0 = chip->CFG0 & (~(7 << 6)); /* CW_PER_PAGE = 0 */
3269 dma_buffer->data.cfg1 = chip->CFG1;
3270 dma_buffer->data.clrfstatus = 0x00000020;
3271 dma_buffer->data.clrrstatus = 0x000000C0;
3272
3273 dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
3274 dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
3275 dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
3276 dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
3277 dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
3278 dma_buffer->data.adm_default_mux = 0x00000FC0;
3279 dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
3280
3281 /* enable CS1 */
3282 cmd->cmd = 0 | CMD_OCB;
3283 cmd->src = msm_virt_to_dma(chip,
3284 &dma_buffer->data.ebi2_chip_select_cfg0);
3285 cmd->dst = EBI2_CHIP_SELECT_CFG0;
3286 cmd->len = 4;
3287 cmd++;
3288
3289 /* erase CS0 block now !!! */
3290 /* 0xF14 */
3291 cmd->cmd = 0;
3292 cmd->src = msm_virt_to_dma(chip,
3293 &dma_buffer->data.adm_mux_cmd_ack_req_nc10);
3294 cmd->dst = EBI2_NAND_ADM_MUX;
3295 cmd->len = 4;
3296 cmd++;
3297
3298 cmd->cmd = DST_CRCI_NAND_CMD;
3299 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
3300 cmd->dst = NC01(MSM_NAND_FLASH_CMD);
3301 cmd->len = 16;
3302 cmd++;
3303
3304 cmd->cmd = 0;
3305 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
3306 cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
3307 cmd->len = 8;
3308 cmd++;
3309
3310 cmd->cmd = 0;
3311 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
3312 cmd->dst = NC01(MSM_NAND_EXEC_CMD);
3313 cmd->len = 4;
3314 cmd++;
3315
3316 /* 0xF28 */
3317 cmd->cmd = 0;
3318 cmd->src = msm_virt_to_dma(chip,
3319 &dma_buffer->data.adm_mux_data_ack_req_nc10);
3320 cmd->dst = EBI2_NAND_ADM_MUX;
3321 cmd->len = 4;
3322 cmd++;
3323
3324 cmd->cmd = SRC_CRCI_NAND_DATA;
3325 cmd->src = NC01(MSM_NAND_FLASH_STATUS);
3326 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status[0]);
3327 cmd->len = 4;
3328 cmd++;
3329
3330 /* erase CS1 block now !!! */
3331 /* 0x53C */
3332 cmd->cmd = 0;
3333 cmd->src = msm_virt_to_dma(chip,
3334 &dma_buffer->data.adm_mux_cmd_ack_req_nc01);
3335 cmd->dst = EBI2_NAND_ADM_MUX;
3336 cmd->len = 4;
3337 cmd++;
3338
3339 cmd->cmd = DST_CRCI_NAND_CMD;
3340 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
3341 cmd->dst = NC10(MSM_NAND_FLASH_CMD);
3342 cmd->len = 12;
3343 cmd++;
3344
3345 cmd->cmd = 0;
3346 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.chipsel_cs1);
3347 cmd->dst = NC10(MSM_NAND_FLASH_CHIP_SELECT);
3348 cmd->len = 4;
3349 cmd++;
3350
3351 cmd->cmd = 0;
3352 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
3353 cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
3354 cmd->len = 8;
3355
3356 cmd->cmd = 0;
3357 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
3358 cmd->dst = NC10(MSM_NAND_EXEC_CMD);
3359 cmd->len = 4;
3360 cmd++;
3361
3362 /* 0xA3C */
3363 cmd->cmd = 0;
3364 cmd->src = msm_virt_to_dma(chip,
3365 &dma_buffer->data.adm_mux_data_ack_req_nc01);
3366 cmd->dst = EBI2_NAND_ADM_MUX;
3367 cmd->len = 4;
3368 cmd++;
3369
3370 cmd->cmd = SRC_CRCI_NAND_DATA;
3371 cmd->src = NC10(MSM_NAND_FLASH_STATUS);
3372 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.flash_status[1]);
3373 cmd->len = 4;
3374 cmd++;
3375
3376 cmd->cmd = 0;
3377 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrfstatus);
3378 cmd->dst = NC11(MSM_NAND_FLASH_STATUS);
3379 cmd->len = 4;
3380 cmd++;
3381
3382 cmd->cmd = 0;
3383 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.clrrstatus);
3384 cmd->dst = NC11(MSM_NAND_READ_STATUS);
3385 cmd->len = 4;
3386 cmd++;
3387
3388 cmd->cmd = 0;
3389 cmd->src = msm_virt_to_dma(chip,
3390 &dma_buffer->data.adm_default_mux);
3391 cmd->dst = EBI2_NAND_ADM_MUX;
3392 cmd->len = 4;
3393 cmd++;
3394
3395 /* disable CS1 */
3396 cmd->cmd = CMD_OCU | CMD_LC;
3397 cmd->src = msm_virt_to_dma(chip,
3398 &dma_buffer->data.default_ebi2_chip_select_cfg0);
3399 cmd->dst = EBI2_CHIP_SELECT_CFG0;
3400 cmd->len = 4;
3401 cmd++;
3402
3403 BUILD_BUG_ON(17 != ARRAY_SIZE(dma_buffer->cmd) - 1);
3404 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
3405
3406 dma_buffer->cmdptr =
3407 (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3) | CMD_PTR_LP;
3408
3409 mb();
3410 msm_dmov_exec_cmd(
Jeff Ohlsteindc39f972011-09-02 13:55:16 -07003411 chip->dma_channel, DMOV_CMD_PTR_LIST |
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003412 DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
3413 mb();
3414
3415 /* we fail if there was an operation error, a mpu error, or the
3416 * erase success bit was not set.
3417 */
3418
3419 if (dma_buffer->data.flash_status[0] & 0x110 ||
3420 !(dma_buffer->data.flash_status[0] & 0x80) ||
3421 dma_buffer->data.flash_status[1] & 0x110 ||
3422 !(dma_buffer->data.flash_status[1] & 0x80))
3423 err = -EIO;
3424 else
3425 err = 0;
3426
3427 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
3428 if (err) {
3429 pr_err("%s: erase failed, 0x%llx\n", __func__, instr->addr);
3430 instr->fail_addr = instr->addr;
3431 instr->state = MTD_ERASE_FAILED;
3432 } else {
3433 instr->state = MTD_ERASE_DONE;
3434 instr->fail_addr = 0xffffffff;
3435 mtd_erase_callback(instr);
3436 }
3437 return err;
3438}
3439
3440static int
3441msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
3442{
3443 struct msm_nand_chip *chip = mtd->priv;
3444 int ret;
3445 struct {
3446 dmov_s cmd[5];
3447 unsigned cmdptr;
3448 struct {
3449 uint32_t cmd;
3450 uint32_t addr0;
3451 uint32_t addr1;
3452 uint32_t chipsel;
3453 uint32_t cfg0;
3454 uint32_t cfg1;
3455 uint32_t eccbchcfg;
3456 uint32_t exec;
3457 uint32_t ecccfg;
3458 struct {
3459 uint32_t flash_status;
3460 uint32_t buffer_status;
3461 } result;
3462 } data;
3463 } *dma_buffer;
3464 dmov_s *cmd;
3465 uint8_t *buf;
3466 unsigned page = 0;
3467 unsigned cwperpage;
3468
3469 if (mtd->writesize == 2048)
3470 page = ofs >> 11;
3471
3472 if (mtd->writesize == 4096)
3473 page = ofs >> 12;
3474
3475 cwperpage = (mtd->writesize >> 9);
3476
3477 /* Check for invalid offset */
3478 if (ofs > mtd->size)
3479 return -EINVAL;
3480 if (ofs & (mtd->erasesize - 1)) {
3481 pr_err("%s: unsupported block address, 0x%x\n",
3482 __func__, (uint32_t)ofs);
3483 return -EINVAL;
3484 }
3485
3486 wait_event(chip->wait_queue,
3487 (dma_buffer = msm_nand_get_dma_buffer(chip ,
3488 sizeof(*dma_buffer) + 4)));
3489 buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
3490
3491 /* Read 4 bytes starting from the bad block marker location
3492 * in the last code word of the page
3493 */
3494
3495 cmd = dma_buffer->cmd;
3496
3497 dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
3498 dma_buffer->data.cfg0 = chip->CFG0_RAW & ~(7U << 6);
3499 dma_buffer->data.cfg1 = chip->CFG1_RAW |
3500 (chip->CFG1 & CFG1_WIDE_FLASH);
3501 if (enable_bch_ecc)
3502 dma_buffer->data.eccbchcfg = chip->ecc_bch_cfg;
3503
3504 if (chip->CFG1 & CFG1_WIDE_FLASH)
3505 dma_buffer->data.addr0 = (page << 16) |
3506 ((chip->cw_size * (cwperpage-1)) >> 1);
3507 else
3508 dma_buffer->data.addr0 = (page << 16) |
3509 (chip->cw_size * (cwperpage-1));
3510
3511 dma_buffer->data.addr1 = (page >> 16) & 0xff;
3512 dma_buffer->data.chipsel = 0 | 4;
3513
3514 dma_buffer->data.exec = 1;
3515
3516 dma_buffer->data.result.flash_status = 0xeeeeeeee;
3517 dma_buffer->data.result.buffer_status = 0xeeeeeeee;
3518
3519 cmd->cmd = DST_CRCI_NAND_CMD;
3520 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
3521 cmd->dst = MSM_NAND_FLASH_CMD;
3522 cmd->len = 16;
3523 cmd++;
3524
3525 cmd->cmd = 0;
3526 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
3527 cmd->dst = MSM_NAND_DEV0_CFG0;
3528 if (enable_bch_ecc)
3529 cmd->len = 12;
3530 else
3531 cmd->len = 8;
3532 cmd++;
3533
3534 cmd->cmd = 0;
3535 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
3536 cmd->dst = MSM_NAND_EXEC_CMD;
3537 cmd->len = 4;
3538 cmd++;
3539
3540 cmd->cmd = SRC_CRCI_NAND_DATA;
3541 cmd->src = MSM_NAND_FLASH_STATUS;
3542 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result);
3543 cmd->len = 8;
3544 cmd++;
3545
3546 cmd->cmd = 0;
3547 cmd->src = MSM_NAND_FLASH_BUFFER +
3548 (mtd->writesize - (chip->cw_size * (cwperpage-1)));
3549 cmd->dst = msm_virt_to_dma(chip, buf);
3550 cmd->len = 4;
3551 cmd++;
3552
3553 BUILD_BUG_ON(5 != ARRAY_SIZE(dma_buffer->cmd));
3554 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
3555 dma_buffer->cmd[0].cmd |= CMD_OCB;
3556 cmd[-1].cmd |= CMD_OCU | CMD_LC;
3557
3558 dma_buffer->cmdptr = (msm_virt_to_dma(chip,
3559 dma_buffer->cmd) >> 3) | CMD_PTR_LP;
3560
3561 mb();
Jeff Ohlsteindc39f972011-09-02 13:55:16 -07003562 msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST |
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003563 DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
3564 mb();
3565
3566 ret = 0;
3567 if (dma_buffer->data.result.flash_status & 0x110)
3568 ret = -EIO;
3569
3570 if (!ret) {
3571 /* Check for bad block marker byte */
3572 if (chip->CFG1 & CFG1_WIDE_FLASH) {
3573 if (buf[0] != 0xFF || buf[1] != 0xFF)
3574 ret = 1;
3575 } else {
3576 if (buf[0] != 0xFF)
3577 ret = 1;
3578 }
3579 }
3580
3581 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4);
3582 return ret;
3583}
3584
3585static int
3586msm_nand_block_isbad_dualnandc(struct mtd_info *mtd, loff_t ofs)
3587{
3588 struct msm_nand_chip *chip = mtd->priv;
3589 int ret;
3590 struct {
3591 dmov_s cmd[18];
3592 unsigned cmdptr;
3593 struct {
3594 uint32_t cmd;
3595 uint32_t addr0;
3596 uint32_t addr1;
3597 uint32_t chipsel_cs0;
3598 uint32_t chipsel_cs1;
3599 uint32_t cfg0;
3600 uint32_t cfg1;
3601 uint32_t exec;
3602 uint32_t ecccfg;
3603 uint32_t ebi2_chip_select_cfg0;
3604 uint32_t adm_mux_data_ack_req_nc01;
3605 uint32_t adm_mux_cmd_ack_req_nc01;
3606 uint32_t adm_mux_data_ack_req_nc10;
3607 uint32_t adm_mux_cmd_ack_req_nc10;
3608 uint32_t adm_default_mux;
3609 uint32_t default_ebi2_chip_select_cfg0;
3610 struct {
3611 uint32_t flash_status;
3612 uint32_t buffer_status;
3613 } result[2];
3614 } data;
3615 } *dma_buffer;
3616 dmov_s *cmd;
3617 uint8_t *buf01;
3618 uint8_t *buf10;
3619 unsigned page = 0;
3620 unsigned cwperpage;
3621
3622 if (mtd->writesize == 2048)
3623 page = ofs >> 11;
3624
3625 if (mtd->writesize == 4096)
3626 page = ofs >> 12;
3627
3628 if (mtd->writesize == 8192)
3629 page = (ofs >> 1) >> 12;
3630
3631 cwperpage = ((mtd->writesize >> 1) >> 9);
3632
3633 /* Check for invalid offset */
3634 if (ofs > mtd->size)
3635 return -EINVAL;
3636 if (ofs & (mtd->erasesize - 1)) {
3637 pr_err("%s: unsupported block address, 0x%x\n",
3638 __func__, (uint32_t)ofs);
3639 return -EINVAL;
3640 }
3641
3642 wait_event(chip->wait_queue,
3643 (dma_buffer = msm_nand_get_dma_buffer(chip ,
3644 sizeof(*dma_buffer) + 8)));
3645 buf01 = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
3646 buf10 = buf01 + 4;
3647
3648 /* Read 4 bytes starting from the bad block marker location
3649 * in the last code word of the page
3650 */
3651 cmd = dma_buffer->cmd;
3652
3653 dma_buffer->data.cmd = MSM_NAND_CMD_PAGE_READ;
3654 dma_buffer->data.cfg0 = chip->CFG0_RAW & ~(7U << 6);
3655 dma_buffer->data.cfg1 = chip->CFG1_RAW |
3656 (chip->CFG1 & CFG1_WIDE_FLASH);
3657
3658 if (chip->CFG1 & CFG1_WIDE_FLASH)
3659 dma_buffer->data.addr0 = (page << 16) |
3660 ((528*(cwperpage-1)) >> 1);
3661 else
3662 dma_buffer->data.addr0 = (page << 16) |
3663 (528*(cwperpage-1));
3664
3665 dma_buffer->data.addr1 = (page >> 16) & 0xff;
3666 dma_buffer->data.chipsel_cs0 = (1<<4) | 4;
3667 dma_buffer->data.chipsel_cs1 = (1<<4) | 5;
3668
3669 dma_buffer->data.exec = 1;
3670
3671 dma_buffer->data.result[0].flash_status = 0xeeeeeeee;
3672 dma_buffer->data.result[0].buffer_status = 0xeeeeeeee;
3673 dma_buffer->data.result[1].flash_status = 0xeeeeeeee;
3674 dma_buffer->data.result[1].buffer_status = 0xeeeeeeee;
3675
3676 dma_buffer->data.ebi2_chip_select_cfg0 = 0x00000805;
3677 dma_buffer->data.adm_mux_data_ack_req_nc01 = 0x00000A3C;
3678 dma_buffer->data.adm_mux_cmd_ack_req_nc01 = 0x0000053C;
3679 dma_buffer->data.adm_mux_data_ack_req_nc10 = 0x00000F28;
3680 dma_buffer->data.adm_mux_cmd_ack_req_nc10 = 0x00000F14;
3681 dma_buffer->data.adm_default_mux = 0x00000FC0;
3682 dma_buffer->data.default_ebi2_chip_select_cfg0 = 0x00000801;
3683
3684 /* Reading last code word from NC01 */
3685 /* enable CS1 */
3686 cmd->cmd = 0;
3687 cmd->src = msm_virt_to_dma(chip,
3688 &dma_buffer->data.ebi2_chip_select_cfg0);
3689 cmd->dst = EBI2_CHIP_SELECT_CFG0;
3690 cmd->len = 4;
3691 cmd++;
3692
3693 /* 0xF14 */
3694 cmd->cmd = 0;
3695 cmd->src = msm_virt_to_dma(chip,
3696 &dma_buffer->data.adm_mux_cmd_ack_req_nc10);
3697 cmd->dst = EBI2_NAND_ADM_MUX;
3698 cmd->len = 4;
3699 cmd++;
3700
3701 cmd->cmd = DST_CRCI_NAND_CMD;
3702 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
3703 cmd->dst = NC01(MSM_NAND_FLASH_CMD);
3704 cmd->len = 16;
3705 cmd++;
3706
3707 cmd->cmd = 0;
3708 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
3709 cmd->dst = NC01(MSM_NAND_DEV0_CFG0);
3710 cmd->len = 8;
3711 cmd++;
3712
3713 cmd->cmd = 0;
3714 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
3715 cmd->dst = NC01(MSM_NAND_EXEC_CMD);
3716 cmd->len = 4;
3717 cmd++;
3718
3719 /* 0xF28 */
3720 cmd->cmd = 0;
3721 cmd->src = msm_virt_to_dma(chip,
3722 &dma_buffer->data.adm_mux_data_ack_req_nc10);
3723 cmd->dst = EBI2_NAND_ADM_MUX;
3724 cmd->len = 4;
3725 cmd++;
3726
3727 cmd->cmd = SRC_CRCI_NAND_DATA;
3728 cmd->src = NC01(MSM_NAND_FLASH_STATUS);
3729 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result[0]);
3730 cmd->len = 8;
3731 cmd++;
3732
3733 cmd->cmd = 0;
3734 cmd->src = NC01(MSM_NAND_FLASH_BUFFER) + ((mtd->writesize >> 1) -
3735 (528*(cwperpage-1)));
3736 cmd->dst = msm_virt_to_dma(chip, buf01);
3737 cmd->len = 4;
3738 cmd++;
3739
3740 /* Reading last code word from NC10 */
3741 /* 0x53C */
3742 cmd->cmd = 0;
3743 cmd->src = msm_virt_to_dma(chip,
3744 &dma_buffer->data.adm_mux_cmd_ack_req_nc01);
3745 cmd->dst = EBI2_NAND_ADM_MUX;
3746 cmd->len = 4;
3747 cmd++;
3748
3749 cmd->cmd = DST_CRCI_NAND_CMD;
3750 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
3751 cmd->dst = NC10(MSM_NAND_FLASH_CMD);
3752 cmd->len = 12;
3753 cmd++;
3754
3755 cmd->cmd = 0;
3756 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.chipsel_cs1);
3757 cmd->dst = NC10(MSM_NAND_FLASH_CHIP_SELECT);
3758 cmd->len = 4;
3759 cmd++;
3760
3761 cmd->cmd = 0;
3762 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cfg0);
3763 cmd->dst = NC10(MSM_NAND_DEV1_CFG0);
3764 cmd->len = 8;
3765 cmd++;
3766
3767 cmd->cmd = 0;
3768 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
3769 cmd->dst = NC10(MSM_NAND_EXEC_CMD);
3770 cmd->len = 4;
3771 cmd++;
3772
3773 /* A3C */
3774 cmd->cmd = 0;
3775 cmd->src = msm_virt_to_dma(chip,
3776 &dma_buffer->data.adm_mux_data_ack_req_nc01);
3777 cmd->dst = EBI2_NAND_ADM_MUX;
3778 cmd->len = 4;
3779 cmd++;
3780
3781 cmd->cmd = SRC_CRCI_NAND_DATA;
3782 cmd->src = NC10(MSM_NAND_FLASH_STATUS);
3783 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.result[1]);
3784 cmd->len = 8;
3785 cmd++;
3786
3787 cmd->cmd = 0;
3788 cmd->src = NC10(MSM_NAND_FLASH_BUFFER) + ((mtd->writesize >> 1) -
3789 (528*(cwperpage-1)));
3790 cmd->dst = msm_virt_to_dma(chip, buf10);
3791 cmd->len = 4;
3792 cmd++;
3793
3794 /* FC0 */
3795 cmd->cmd = 0;
3796 cmd->src = msm_virt_to_dma(chip,
3797 &dma_buffer->data.adm_default_mux);
3798 cmd->dst = EBI2_NAND_ADM_MUX;
3799 cmd->len = 4;
3800 cmd++;
3801
3802 /* disble CS1 */
3803 cmd->cmd = 0;
3804 cmd->src = msm_virt_to_dma(chip,
3805 &dma_buffer->data.ebi2_chip_select_cfg0);
3806 cmd->dst = EBI2_CHIP_SELECT_CFG0;
3807 cmd->len = 4;
3808 cmd++;
3809
3810 BUILD_BUG_ON(18 != ARRAY_SIZE(dma_buffer->cmd));
3811 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
3812 dma_buffer->cmd[0].cmd |= CMD_OCB;
3813 cmd[-1].cmd |= CMD_OCU | CMD_LC;
3814
3815 dma_buffer->cmdptr = (msm_virt_to_dma(chip,
3816 dma_buffer->cmd) >> 3) | CMD_PTR_LP;
3817
3818 mb();
Jeff Ohlsteindc39f972011-09-02 13:55:16 -07003819 msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST |
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003820 DMOV_CMD_ADDR(msm_virt_to_dma(chip, &dma_buffer->cmdptr)));
3821 mb();
3822
3823 ret = 0;
3824 if ((dma_buffer->data.result[0].flash_status & 0x110) ||
3825 (dma_buffer->data.result[1].flash_status & 0x110))
3826 ret = -EIO;
3827
3828 if (!ret) {
3829 /* Check for bad block marker byte for NC01 & NC10 */
3830 if (chip->CFG1 & CFG1_WIDE_FLASH) {
3831 if ((buf01[0] != 0xFF || buf01[1] != 0xFF) ||
3832 (buf10[0] != 0xFF || buf10[1] != 0xFF))
3833 ret = 1;
3834 } else {
3835 if (buf01[0] != 0xFF || buf10[0] != 0xFF)
3836 ret = 1;
3837 }
3838 }
3839
3840 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 8);
3841 return ret;
3842}
3843
3844static int
3845msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
3846{
3847 struct mtd_oob_ops ops;
3848 int ret;
3849 uint8_t *buf;
3850
3851 /* Check for invalid offset */
3852 if (ofs > mtd->size)
3853 return -EINVAL;
3854 if (ofs & (mtd->erasesize - 1)) {
3855 pr_err("%s: unsupported block address, 0x%x\n",
3856 __func__, (uint32_t)ofs);
3857 return -EINVAL;
3858 }
3859
3860 /*
3861 Write all 0s to the first page
3862 This will set the BB marker to 0
3863 */
3864 buf = page_address(ZERO_PAGE());
3865
Steve Mucklef132c6c2012-06-06 18:30:57 -07003866 ops.mode = MTD_OPS_RAW;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003867 ops.len = mtd->writesize + mtd->oobsize;
3868 ops.retlen = 0;
3869 ops.ooblen = 0;
3870 ops.datbuf = buf;
3871 ops.oobbuf = NULL;
3872 if (!interleave_enable)
3873 ret = msm_nand_write_oob(mtd, ofs, &ops);
3874 else
3875 ret = msm_nand_write_oob_dualnandc(mtd, ofs, &ops);
3876
3877 return ret;
3878}
3879
3880/**
3881 * msm_nand_suspend - [MTD Interface] Suspend the msm_nand flash
3882 * @param mtd MTD device structure
3883 */
3884static int msm_nand_suspend(struct mtd_info *mtd)
3885{
3886 return 0;
3887}
3888
3889/**
3890 * msm_nand_resume - [MTD Interface] Resume the msm_nand flash
3891 * @param mtd MTD device structure
3892 */
3893static void msm_nand_resume(struct mtd_info *mtd)
3894{
3895}
3896
3897struct onenand_information {
3898 uint16_t manufacturer_id;
3899 uint16_t device_id;
3900 uint16_t version_id;
3901 uint16_t data_buf_size;
3902 uint16_t boot_buf_size;
3903 uint16_t num_of_buffers;
3904 uint16_t technology;
3905};
3906
3907static struct onenand_information onenand_info;
3908static uint32_t nand_sfcmd_mode;
3909
3910uint32_t flash_onenand_probe(struct msm_nand_chip *chip)
3911{
3912 struct {
3913 dmov_s cmd[7];
3914 unsigned cmdptr;
3915 struct {
3916 uint32_t bcfg;
3917 uint32_t cmd;
3918 uint32_t exec;
3919 uint32_t status;
3920 uint32_t addr0;
3921 uint32_t addr1;
3922 uint32_t addr2;
3923 uint32_t addr3;
3924 uint32_t addr4;
3925 uint32_t addr5;
3926 uint32_t addr6;
3927 uint32_t data0;
3928 uint32_t data1;
3929 uint32_t data2;
3930 uint32_t data3;
3931 uint32_t data4;
3932 uint32_t data5;
3933 uint32_t data6;
3934 } data;
3935 } *dma_buffer;
3936 dmov_s *cmd;
3937
3938 int err = 0;
3939 uint32_t initialsflashcmd = 0;
3940
3941 initialsflashcmd = flash_rd_reg(chip, MSM_NAND_SFLASHC_CMD);
3942
3943 if ((initialsflashcmd & 0x10) == 0x10)
3944 nand_sfcmd_mode = MSM_NAND_SFCMD_ASYNC;
3945 else
3946 nand_sfcmd_mode = MSM_NAND_SFCMD_BURST;
3947
3948 printk(KERN_INFO "SFLASHC Async Mode bit: %x \n", nand_sfcmd_mode);
3949
3950 wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
3951 (chip, sizeof(*dma_buffer))));
3952
3953 cmd = dma_buffer->cmd;
3954
3955 dma_buffer->data.bcfg = SFLASH_BCFG |
3956 (nand_sfcmd_mode ? 0 : (1 << 24));
3957 dma_buffer->data.cmd = SFLASH_PREPCMD(7, 0, 0,
3958 MSM_NAND_SFCMD_DATXS,
3959 nand_sfcmd_mode,
3960 MSM_NAND_SFCMD_REGRD);
3961 dma_buffer->data.exec = 1;
3962 dma_buffer->data.status = CLEAN_DATA_32;
3963 dma_buffer->data.addr0 = (ONENAND_DEVICE_ID << 16) |
3964 (ONENAND_MANUFACTURER_ID);
3965 dma_buffer->data.addr1 = (ONENAND_DATA_BUFFER_SIZE << 16) |
3966 (ONENAND_VERSION_ID);
3967 dma_buffer->data.addr2 = (ONENAND_AMOUNT_OF_BUFFERS << 16) |
3968 (ONENAND_BOOT_BUFFER_SIZE);
3969 dma_buffer->data.addr3 = (CLEAN_DATA_16 << 16) |
3970 (ONENAND_TECHNOLOGY << 0);
3971 dma_buffer->data.data0 = CLEAN_DATA_32;
3972 dma_buffer->data.data1 = CLEAN_DATA_32;
3973 dma_buffer->data.data2 = CLEAN_DATA_32;
3974 dma_buffer->data.data3 = CLEAN_DATA_32;
3975
3976 /* Enable and configure the SFlash controller */
3977 cmd->cmd = 0;
3978 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.bcfg);
3979 cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
3980 cmd->len = 4;
3981 cmd++;
3982
3983 /* Block on cmd ready and write CMD register */
3984 cmd->cmd = DST_CRCI_NAND_CMD;
3985 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.cmd);
3986 cmd->dst = MSM_NAND_SFLASHC_CMD;
3987 cmd->len = 4;
3988 cmd++;
3989
3990 /* Configure the ADDR0 and ADDR1 registers */
3991 cmd->cmd = 0;
3992 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
3993 cmd->dst = MSM_NAND_ADDR0;
3994 cmd->len = 8;
3995 cmd++;
3996
3997 /* Configure the ADDR2 and ADDR3 registers */
3998 cmd->cmd = 0;
3999 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
4000 cmd->dst = MSM_NAND_ADDR2;
4001 cmd->len = 8;
4002 cmd++;
4003
4004 /* Kick the execute command */
4005 cmd->cmd = 0;
4006 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.exec);
4007 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
4008 cmd->len = 4;
4009 cmd++;
4010
4011 /* Block on data ready, and read the two status registers */
4012 cmd->cmd = SRC_CRCI_NAND_DATA;
4013 cmd->src = MSM_NAND_SFLASHC_STATUS;
4014 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.status);
4015 cmd->len = 4;
4016 cmd++;
4017
4018 /* Read data registers - valid only if status says success */
4019 cmd->cmd = 0;
4020 cmd->src = MSM_NAND_GENP_REG0;
4021 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data0);
4022 cmd->len = 16;
4023 cmd++;
4024
4025 BUILD_BUG_ON(7 != ARRAY_SIZE(dma_buffer->cmd));
4026 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
4027 dma_buffer->cmd[0].cmd |= CMD_OCB;
4028 cmd[-1].cmd |= CMD_OCU | CMD_LC;
4029
4030 dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
4031 >> 3) | CMD_PTR_LP;
4032
4033 mb();
Jeff Ohlsteindc39f972011-09-02 13:55:16 -07004034 msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004035 | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
4036 &dma_buffer->cmdptr)));
4037 mb();
4038
4039 /* Check for errors, protection violations etc */
4040 if (dma_buffer->data.status & 0x110) {
4041 pr_info("%s: MPU/OP error"
4042 "(0x%x) during Onenand probe\n",
4043 __func__, dma_buffer->data.status);
4044 err = -EIO;
4045 } else {
4046
4047 onenand_info.manufacturer_id =
4048 (dma_buffer->data.data0 >> 0) & 0x0000FFFF;
4049 onenand_info.device_id =
4050 (dma_buffer->data.data0 >> 16) & 0x0000FFFF;
4051 onenand_info.version_id =
4052 (dma_buffer->data.data1 >> 0) & 0x0000FFFF;
4053 onenand_info.data_buf_size =
4054 (dma_buffer->data.data1 >> 16) & 0x0000FFFF;
4055 onenand_info.boot_buf_size =
4056 (dma_buffer->data.data2 >> 0) & 0x0000FFFF;
4057 onenand_info.num_of_buffers =
4058 (dma_buffer->data.data2 >> 16) & 0x0000FFFF;
4059 onenand_info.technology =
4060 (dma_buffer->data.data3 >> 0) & 0x0000FFFF;
4061
4062
4063 pr_info("======================================="
4064 "==========================\n");
4065
4066 pr_info("%s: manufacturer_id = 0x%x\n"
4067 , __func__, onenand_info.manufacturer_id);
4068 pr_info("%s: device_id = 0x%x\n"
4069 , __func__, onenand_info.device_id);
4070 pr_info("%s: version_id = 0x%x\n"
4071 , __func__, onenand_info.version_id);
4072 pr_info("%s: data_buf_size = 0x%x\n"
4073 , __func__, onenand_info.data_buf_size);
4074 pr_info("%s: boot_buf_size = 0x%x\n"
4075 , __func__, onenand_info.boot_buf_size);
4076 pr_info("%s: num_of_buffers = 0x%x\n"
4077 , __func__, onenand_info.num_of_buffers);
4078 pr_info("%s: technology = 0x%x\n"
4079 , __func__, onenand_info.technology);
4080
4081 pr_info("======================================="
4082 "==========================\n");
4083
4084 if ((onenand_info.manufacturer_id != 0x00EC)
4085 || ((onenand_info.device_id & 0x0040) != 0x0040)
4086 || (onenand_info.data_buf_size != 0x0800)
4087 || (onenand_info.boot_buf_size != 0x0200)
4088 || (onenand_info.num_of_buffers != 0x0201)
4089 || (onenand_info.technology != 0)) {
4090
4091 pr_info("%s: Detected an unsupported device\n"
4092 , __func__);
4093 err = -EIO;
4094 }
4095 }
4096
4097 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
4098
4099 return err;
4100}
4101
4102int msm_onenand_read_oob(struct mtd_info *mtd,
4103 loff_t from, struct mtd_oob_ops *ops)
4104{
4105 struct msm_nand_chip *chip = mtd->priv;
4106
4107 struct {
4108 dmov_s cmd[53];
4109 unsigned cmdptr;
4110 struct {
4111 uint32_t sfbcfg;
4112 uint32_t sfcmd[9];
4113 uint32_t sfexec;
4114 uint32_t sfstat[9];
4115 uint32_t addr0;
4116 uint32_t addr1;
4117 uint32_t addr2;
4118 uint32_t addr3;
4119 uint32_t addr4;
4120 uint32_t addr5;
4121 uint32_t addr6;
4122 uint32_t data0;
4123 uint32_t data1;
4124 uint32_t data2;
4125 uint32_t data3;
4126 uint32_t data4;
4127 uint32_t data5;
4128 uint32_t data6;
4129 uint32_t macro[5];
4130 } data;
4131 } *dma_buffer;
4132 dmov_s *cmd;
4133
4134 int err = 0;
4135 int i;
4136 dma_addr_t data_dma_addr = 0;
4137 dma_addr_t oob_dma_addr = 0;
4138 dma_addr_t data_dma_addr_curr = 0;
4139 dma_addr_t oob_dma_addr_curr = 0;
4140
4141 loff_t from_curr = 0;
4142 unsigned page_count;
4143 unsigned pages_read = 0;
4144
4145 uint16_t onenand_startaddr1;
4146 uint16_t onenand_startaddr8;
4147 uint16_t onenand_startaddr2;
4148 uint16_t onenand_startbuffer;
4149 uint16_t onenand_sysconfig1;
4150 uint16_t controller_status;
4151 uint16_t interrupt_status;
4152 uint16_t ecc_status;
4153#if VERBOSE
4154 pr_info("================================================="
4155 "================\n");
4156 pr_info("%s: from 0x%llx mode %d \ndatbuf 0x%p datlen 0x%x"
4157 "\noobbuf 0x%p ooblen 0x%x\n",
4158 __func__, from, ops->mode, ops->datbuf, ops->len,
4159 ops->oobbuf, ops->ooblen);
4160#endif
4161 if (!mtd) {
4162 pr_err("%s: invalid mtd pointer, 0x%x\n", __func__,
4163 (uint32_t)mtd);
4164 return -EINVAL;
4165 }
4166 if (from & (mtd->writesize - 1)) {
4167 pr_err("%s: unsupported from, 0x%llx\n", __func__,
4168 from);
4169 return -EINVAL;
4170 }
4171
Steve Mucklef132c6c2012-06-06 18:30:57 -07004172 if ((ops->mode != MTD_OPS_PLACE_OOB) && (ops->mode != MTD_OPS_AUTO_OOB) &&
4173 (ops->mode != MTD_OPS_RAW)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004174 pr_err("%s: unsupported ops->mode, %d\n", __func__,
4175 ops->mode);
4176 return -EINVAL;
4177 }
4178
4179 if (((ops->datbuf == NULL) || (ops->len == 0)) &&
4180 ((ops->oobbuf == NULL) || (ops->ooblen == 0))) {
4181 pr_err("%s: incorrect ops fields - nothing to do\n",
4182 __func__);
4183 return -EINVAL;
4184 }
4185
4186 if ((ops->datbuf != NULL) && (ops->len == 0)) {
4187 pr_err("%s: data buffer passed but length 0\n",
4188 __func__);
4189 return -EINVAL;
4190 }
4191
4192 if ((ops->oobbuf != NULL) && (ops->ooblen == 0)) {
4193 pr_err("%s: oob buffer passed but length 0\n",
4194 __func__);
4195 return -EINVAL;
4196 }
4197
Steve Mucklef132c6c2012-06-06 18:30:57 -07004198 if (ops->mode != MTD_OPS_RAW) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004199 if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
4200 /* when ops->datbuf is NULL, ops->len can be ooblen */
4201 pr_err("%s: unsupported ops->len, %d\n", __func__,
4202 ops->len);
4203 return -EINVAL;
4204 }
4205 } else {
4206 if (ops->datbuf != NULL &&
4207 (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
4208 pr_err("%s: unsupported ops->len,"
Steve Mucklef132c6c2012-06-06 18:30:57 -07004209 " %d for MTD_OPS_RAW\n", __func__, ops->len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004210 return -EINVAL;
4211 }
4212 }
4213
Steve Mucklef132c6c2012-06-06 18:30:57 -07004214 if ((ops->mode == MTD_OPS_RAW) && (ops->oobbuf)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004215 pr_err("%s: unsupported operation, oobbuf pointer "
4216 "passed in for RAW mode, %x\n", __func__,
4217 (uint32_t)ops->oobbuf);
4218 return -EINVAL;
4219 }
4220
4221 if (ops->oobbuf && !ops->datbuf) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07004222 page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004223 mtd->oobavail : mtd->oobsize);
4224 if ((page_count == 0) && (ops->ooblen))
4225 page_count = 1;
Steve Mucklef132c6c2012-06-06 18:30:57 -07004226 } else if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004227 page_count = ops->len / mtd->writesize;
4228 else
4229 page_count = ops->len / (mtd->writesize + mtd->oobsize);
4230
Steve Mucklef132c6c2012-06-06 18:30:57 -07004231 if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->oobbuf != NULL)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004232 if (page_count * mtd->oobsize > ops->ooblen) {
4233 pr_err("%s: unsupported ops->ooblen for "
4234 "PLACE, %d\n", __func__, ops->ooblen);
4235 return -EINVAL;
4236 }
4237 }
4238
Steve Mucklef132c6c2012-06-06 18:30:57 -07004239 if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->ooblen != 0) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004240 (ops->ooboffs != 0)) {
4241 pr_err("%s: unsupported ops->ooboffs, %d\n", __func__,
4242 ops->ooboffs);
4243 return -EINVAL;
4244 }
4245
4246 if (ops->datbuf) {
4247 memset(ops->datbuf, 0x55, ops->len);
4248 data_dma_addr_curr = data_dma_addr = msm_nand_dma_map(chip->dev,
4249 ops->datbuf, ops->len, DMA_FROM_DEVICE);
4250 if (dma_mapping_error(chip->dev, data_dma_addr)) {
4251 pr_err("%s: failed to get dma addr for %p\n",
4252 __func__, ops->datbuf);
4253 return -EIO;
4254 }
4255 }
4256 if (ops->oobbuf) {
4257 memset(ops->oobbuf, 0x55, ops->ooblen);
4258 oob_dma_addr_curr = oob_dma_addr = msm_nand_dma_map(chip->dev,
4259 ops->oobbuf, ops->ooblen, DMA_FROM_DEVICE);
4260 if (dma_mapping_error(chip->dev, oob_dma_addr)) {
4261 pr_err("%s: failed to get dma addr for %p\n",
4262 __func__, ops->oobbuf);
4263 err = -EIO;
4264 goto err_dma_map_oobbuf_failed;
4265 }
4266 }
4267
4268 wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
4269 (chip, sizeof(*dma_buffer))));
4270
4271 from_curr = from;
4272
4273 while (page_count-- > 0) {
4274
4275 cmd = dma_buffer->cmd;
4276
4277 if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
4278 && (from_curr >= (mtd->size>>1))) { /* DDP Device */
4279 onenand_startaddr1 = DEVICE_FLASHCORE_1 |
4280 (((uint32_t)(from_curr-(mtd->size>>1))
4281 / mtd->erasesize));
4282 onenand_startaddr2 = DEVICE_BUFFERRAM_1;
4283 } else {
4284 onenand_startaddr1 = DEVICE_FLASHCORE_0 |
4285 ((uint32_t)from_curr / mtd->erasesize) ;
4286 onenand_startaddr2 = DEVICE_BUFFERRAM_0;
4287 }
4288
4289 onenand_startaddr8 = (((uint32_t)from_curr &
4290 (mtd->erasesize - 1)) / mtd->writesize) << 2;
4291 onenand_startbuffer = DATARAM0_0 << 8;
Steve Mucklef132c6c2012-06-06 18:30:57 -07004292 onenand_sysconfig1 = (ops->mode == MTD_OPS_RAW) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004293 ONENAND_SYSCFG1_ECCDIS(nand_sfcmd_mode) :
4294 ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode);
4295
4296 dma_buffer->data.sfbcfg = SFLASH_BCFG |
4297 (nand_sfcmd_mode ? 0 : (1 << 24));
4298 dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
4299 MSM_NAND_SFCMD_CMDXS,
4300 nand_sfcmd_mode,
4301 MSM_NAND_SFCMD_REGWR);
4302 dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
4303 MSM_NAND_SFCMD_CMDXS,
4304 nand_sfcmd_mode,
4305 MSM_NAND_SFCMD_INTHI);
4306 dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
4307 MSM_NAND_SFCMD_DATXS,
4308 nand_sfcmd_mode,
4309 MSM_NAND_SFCMD_REGRD);
4310 dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(256, 0, 0,
4311 MSM_NAND_SFCMD_DATXS,
4312 nand_sfcmd_mode,
4313 MSM_NAND_SFCMD_DATRD);
4314 dma_buffer->data.sfcmd[4] = SFLASH_PREPCMD(256, 0, 0,
4315 MSM_NAND_SFCMD_DATXS,
4316 nand_sfcmd_mode,
4317 MSM_NAND_SFCMD_DATRD);
4318 dma_buffer->data.sfcmd[5] = SFLASH_PREPCMD(256, 0, 0,
4319 MSM_NAND_SFCMD_DATXS,
4320 nand_sfcmd_mode,
4321 MSM_NAND_SFCMD_DATRD);
4322 dma_buffer->data.sfcmd[6] = SFLASH_PREPCMD(256, 0, 0,
4323 MSM_NAND_SFCMD_DATXS,
4324 nand_sfcmd_mode,
4325 MSM_NAND_SFCMD_DATRD);
4326 dma_buffer->data.sfcmd[7] = SFLASH_PREPCMD(32, 0, 0,
4327 MSM_NAND_SFCMD_DATXS,
4328 nand_sfcmd_mode,
4329 MSM_NAND_SFCMD_DATRD);
4330 dma_buffer->data.sfcmd[8] = SFLASH_PREPCMD(4, 10, 0,
4331 MSM_NAND_SFCMD_CMDXS,
4332 nand_sfcmd_mode,
4333 MSM_NAND_SFCMD_REGWR);
4334 dma_buffer->data.sfexec = 1;
4335 dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
4336 dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
4337 dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
4338 dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
4339 dma_buffer->data.sfstat[4] = CLEAN_DATA_32;
4340 dma_buffer->data.sfstat[5] = CLEAN_DATA_32;
4341 dma_buffer->data.sfstat[6] = CLEAN_DATA_32;
4342 dma_buffer->data.sfstat[7] = CLEAN_DATA_32;
4343 dma_buffer->data.sfstat[8] = CLEAN_DATA_32;
4344 dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
4345 (ONENAND_SYSTEM_CONFIG_1);
4346 dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
4347 (ONENAND_START_ADDRESS_1);
4348 dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
4349 (ONENAND_START_ADDRESS_2);
4350 dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
4351 (ONENAND_COMMAND);
4352 dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
4353 (ONENAND_INTERRUPT_STATUS);
4354 dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
4355 (ONENAND_SYSTEM_CONFIG_1);
4356 dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
4357 (ONENAND_START_ADDRESS_1);
4358 dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
4359 (onenand_sysconfig1);
4360 dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
4361 (onenand_startaddr1);
4362 dma_buffer->data.data2 = (onenand_startbuffer << 16) |
4363 (onenand_startaddr2);
4364 dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
4365 (ONENAND_CMDLOADSPARE);
4366 dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
4367 (CLEAN_DATA_16);
4368 dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
4369 (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
4370 dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
4371 (ONENAND_STARTADDR1_RES);
4372 dma_buffer->data.macro[0] = 0x0200;
4373 dma_buffer->data.macro[1] = 0x0300;
4374 dma_buffer->data.macro[2] = 0x0400;
4375 dma_buffer->data.macro[3] = 0x0500;
4376 dma_buffer->data.macro[4] = 0x8010;
4377
4378 /*************************************************************/
4379 /* Write necessary address registers in the onenand device */
4380 /*************************************************************/
4381
4382 /* Enable and configure the SFlash controller */
4383 cmd->cmd = 0;
4384 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
4385 cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
4386 cmd->len = 4;
4387 cmd++;
4388
4389 /* Block on cmd ready and write CMD register */
4390 cmd->cmd = DST_CRCI_NAND_CMD;
4391 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
4392 cmd->dst = MSM_NAND_SFLASHC_CMD;
4393 cmd->len = 4;
4394 cmd++;
4395
4396 /* Write the ADDR0 and ADDR1 registers */
4397 cmd->cmd = 0;
4398 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
4399 cmd->dst = MSM_NAND_ADDR0;
4400 cmd->len = 8;
4401 cmd++;
4402
4403 /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
4404 cmd->cmd = 0;
4405 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
4406 cmd->dst = MSM_NAND_ADDR2;
4407 cmd->len = 16;
4408 cmd++;
4409
4410 /* Write the ADDR6 registers */
4411 cmd->cmd = 0;
4412 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
4413 cmd->dst = MSM_NAND_ADDR6;
4414 cmd->len = 4;
4415 cmd++;
4416
4417 /* Write the GENP0, GENP1, GENP2, GENP3 registers */
4418 cmd->cmd = 0;
4419 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
4420 cmd->dst = MSM_NAND_GENP_REG0;
4421 cmd->len = 16;
4422 cmd++;
4423
4424 /* Write the FLASH_DEV_CMD4,5,6 registers */
4425 cmd->cmd = 0;
4426 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
4427 cmd->dst = MSM_NAND_DEV_CMD4;
4428 cmd->len = 12;
4429 cmd++;
4430
4431 /* Kick the execute command */
4432 cmd->cmd = 0;
4433 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
4434 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
4435 cmd->len = 4;
4436 cmd++;
4437
4438 /* Block on data ready, and read the status register */
4439 cmd->cmd = SRC_CRCI_NAND_DATA;
4440 cmd->src = MSM_NAND_SFLASHC_STATUS;
4441 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
4442 cmd->len = 4;
4443 cmd++;
4444
4445 /*************************************************************/
4446 /* Wait for the interrupt from the Onenand device controller */
4447 /*************************************************************/
4448
4449 /* Block on cmd ready and write CMD register */
4450 cmd->cmd = DST_CRCI_NAND_CMD;
4451 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
4452 cmd->dst = MSM_NAND_SFLASHC_CMD;
4453 cmd->len = 4;
4454 cmd++;
4455
4456 /* Kick the execute command */
4457 cmd->cmd = 0;
4458 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
4459 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
4460 cmd->len = 4;
4461 cmd++;
4462
4463 /* Block on data ready, and read the status register */
4464 cmd->cmd = SRC_CRCI_NAND_DATA;
4465 cmd->src = MSM_NAND_SFLASHC_STATUS;
4466 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
4467 cmd->len = 4;
4468 cmd++;
4469
4470 /*************************************************************/
4471 /* Read necessary status registers from the onenand device */
4472 /*************************************************************/
4473
4474 /* Block on cmd ready and write CMD register */
4475 cmd->cmd = DST_CRCI_NAND_CMD;
4476 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
4477 cmd->dst = MSM_NAND_SFLASHC_CMD;
4478 cmd->len = 4;
4479 cmd++;
4480
4481 /* Kick the execute command */
4482 cmd->cmd = 0;
4483 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
4484 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
4485 cmd->len = 4;
4486 cmd++;
4487
4488 /* Block on data ready, and read the status register */
4489 cmd->cmd = SRC_CRCI_NAND_DATA;
4490 cmd->src = MSM_NAND_SFLASHC_STATUS;
4491 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
4492 cmd->len = 4;
4493 cmd++;
4494
4495 /* Read the GENP3 register */
4496 cmd->cmd = 0;
4497 cmd->src = MSM_NAND_GENP_REG3;
4498 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
4499 cmd->len = 4;
4500 cmd++;
4501
4502 /* Read the DEVCMD4 register */
4503 cmd->cmd = 0;
4504 cmd->src = MSM_NAND_DEV_CMD4;
4505 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
4506 cmd->len = 4;
4507 cmd++;
4508
4509 /*************************************************************/
4510 /* Read the data ram area from the onenand buffer ram */
4511 /*************************************************************/
4512
4513 if (ops->datbuf) {
4514
4515 dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
4516 (ONENAND_CMDLOAD);
4517
4518 for (i = 0; i < 4; i++) {
4519
4520 /* Block on cmd ready and write CMD register */
4521 cmd->cmd = DST_CRCI_NAND_CMD;
4522 cmd->src = msm_virt_to_dma(chip,
4523 &dma_buffer->data.sfcmd[3+i]);
4524 cmd->dst = MSM_NAND_SFLASHC_CMD;
4525 cmd->len = 4;
4526 cmd++;
4527
4528 /* Write the MACRO1 register */
4529 cmd->cmd = 0;
4530 cmd->src = msm_virt_to_dma(chip,
4531 &dma_buffer->data.macro[i]);
4532 cmd->dst = MSM_NAND_MACRO1_REG;
4533 cmd->len = 4;
4534 cmd++;
4535
4536 /* Kick the execute command */
4537 cmd->cmd = 0;
4538 cmd->src = msm_virt_to_dma(chip,
4539 &dma_buffer->data.sfexec);
4540 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
4541 cmd->len = 4;
4542 cmd++;
4543
4544 /* Block on data rdy, & read status register */
4545 cmd->cmd = SRC_CRCI_NAND_DATA;
4546 cmd->src = MSM_NAND_SFLASHC_STATUS;
4547 cmd->dst = msm_virt_to_dma(chip,
4548 &dma_buffer->data.sfstat[3+i]);
4549 cmd->len = 4;
4550 cmd++;
4551
4552 /* Transfer nand ctlr buf contents to usr buf */
4553 cmd->cmd = 0;
4554 cmd->src = MSM_NAND_FLASH_BUFFER;
4555 cmd->dst = data_dma_addr_curr;
4556 cmd->len = 512;
4557 data_dma_addr_curr += 512;
4558 cmd++;
4559 }
4560 }
4561
Steve Mucklef132c6c2012-06-06 18:30:57 -07004562 if ((ops->oobbuf) || (ops->mode == MTD_OPS_RAW)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004563
4564 /* Block on cmd ready and write CMD register */
4565 cmd->cmd = DST_CRCI_NAND_CMD;
4566 cmd->src = msm_virt_to_dma(chip,
4567 &dma_buffer->data.sfcmd[7]);
4568 cmd->dst = MSM_NAND_SFLASHC_CMD;
4569 cmd->len = 4;
4570 cmd++;
4571
4572 /* Write the MACRO1 register */
4573 cmd->cmd = 0;
4574 cmd->src = msm_virt_to_dma(chip,
4575 &dma_buffer->data.macro[4]);
4576 cmd->dst = MSM_NAND_MACRO1_REG;
4577 cmd->len = 4;
4578 cmd++;
4579
4580 /* Kick the execute command */
4581 cmd->cmd = 0;
4582 cmd->src = msm_virt_to_dma(chip,
4583 &dma_buffer->data.sfexec);
4584 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
4585 cmd->len = 4;
4586 cmd++;
4587
4588 /* Block on data ready, and read status register */
4589 cmd->cmd = SRC_CRCI_NAND_DATA;
4590 cmd->src = MSM_NAND_SFLASHC_STATUS;
4591 cmd->dst = msm_virt_to_dma(chip,
4592 &dma_buffer->data.sfstat[7]);
4593 cmd->len = 4;
4594 cmd++;
4595
4596 /* Transfer nand ctlr buffer contents into usr buf */
Steve Mucklef132c6c2012-06-06 18:30:57 -07004597 if (ops->mode == MTD_OPS_AUTO_OOB) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004598 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
4599 cmd->cmd = 0;
4600 cmd->src = MSM_NAND_FLASH_BUFFER +
4601 mtd->ecclayout->oobfree[i].offset;
4602 cmd->dst = oob_dma_addr_curr;
4603 cmd->len =
4604 mtd->ecclayout->oobfree[i].length;
4605 oob_dma_addr_curr +=
4606 mtd->ecclayout->oobfree[i].length;
4607 cmd++;
4608 }
4609 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07004610 if (ops->mode == MTD_OPS_PLACE_OOB) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004611 cmd->cmd = 0;
4612 cmd->src = MSM_NAND_FLASH_BUFFER;
4613 cmd->dst = oob_dma_addr_curr;
4614 cmd->len = mtd->oobsize;
4615 oob_dma_addr_curr += mtd->oobsize;
4616 cmd++;
4617 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07004618 if (ops->mode == MTD_OPS_RAW) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004619 cmd->cmd = 0;
4620 cmd->src = MSM_NAND_FLASH_BUFFER;
4621 cmd->dst = data_dma_addr_curr;
4622 cmd->len = mtd->oobsize;
4623 data_dma_addr_curr += mtd->oobsize;
4624 cmd++;
4625 }
4626 }
4627
4628 /*************************************************************/
4629 /* Restore the necessary registers to proper values */
4630 /*************************************************************/
4631
4632 /* Block on cmd ready and write CMD register */
4633 cmd->cmd = DST_CRCI_NAND_CMD;
4634 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[8]);
4635 cmd->dst = MSM_NAND_SFLASHC_CMD;
4636 cmd->len = 4;
4637 cmd++;
4638
4639 /* Kick the execute command */
4640 cmd->cmd = 0;
4641 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
4642 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
4643 cmd->len = 4;
4644 cmd++;
4645
4646 /* Block on data ready, and read the status register */
4647 cmd->cmd = SRC_CRCI_NAND_DATA;
4648 cmd->src = MSM_NAND_SFLASHC_STATUS;
4649 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[8]);
4650 cmd->len = 4;
4651 cmd++;
4652
4653
4654 BUILD_BUG_ON(53 != ARRAY_SIZE(dma_buffer->cmd));
4655 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
4656 dma_buffer->cmd[0].cmd |= CMD_OCB;
4657 cmd[-1].cmd |= CMD_OCU | CMD_LC;
4658
4659 dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
4660 >> 3) | CMD_PTR_LP;
4661
4662 mb();
Jeff Ohlsteindc39f972011-09-02 13:55:16 -07004663 msm_dmov_exec_cmd(chip->dma_channel,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004664 DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
4665 &dma_buffer->cmdptr)));
4666 mb();
4667
4668 ecc_status = (dma_buffer->data.data3 >> 16) &
4669 0x0000FFFF;
4670 interrupt_status = (dma_buffer->data.data4 >> 0) &
4671 0x0000FFFF;
4672 controller_status = (dma_buffer->data.data4 >> 16) &
4673 0x0000FFFF;
4674
4675#if VERBOSE
4676 pr_info("\n%s: sflash status %x %x %x %x %x %x %x"
4677 "%x %x\n", __func__,
4678 dma_buffer->data.sfstat[0],
4679 dma_buffer->data.sfstat[1],
4680 dma_buffer->data.sfstat[2],
4681 dma_buffer->data.sfstat[3],
4682 dma_buffer->data.sfstat[4],
4683 dma_buffer->data.sfstat[5],
4684 dma_buffer->data.sfstat[6],
4685 dma_buffer->data.sfstat[7],
4686 dma_buffer->data.sfstat[8]);
4687
4688 pr_info("%s: controller_status = %x\n", __func__,
4689 controller_status);
4690 pr_info("%s: interrupt_status = %x\n", __func__,
4691 interrupt_status);
4692 pr_info("%s: ecc_status = %x\n", __func__,
4693 ecc_status);
4694#endif
4695 /* Check for errors, protection violations etc */
4696 if ((controller_status != 0)
4697 || (dma_buffer->data.sfstat[0] & 0x110)
4698 || (dma_buffer->data.sfstat[1] & 0x110)
4699 || (dma_buffer->data.sfstat[2] & 0x110)
4700 || (dma_buffer->data.sfstat[8] & 0x110)
4701 || ((dma_buffer->data.sfstat[3] & 0x110) &&
4702 (ops->datbuf))
4703 || ((dma_buffer->data.sfstat[4] & 0x110) &&
4704 (ops->datbuf))
4705 || ((dma_buffer->data.sfstat[5] & 0x110) &&
4706 (ops->datbuf))
4707 || ((dma_buffer->data.sfstat[6] & 0x110) &&
4708 (ops->datbuf))
4709 || ((dma_buffer->data.sfstat[7] & 0x110) &&
4710 ((ops->oobbuf)
Steve Mucklef132c6c2012-06-06 18:30:57 -07004711 || (ops->mode == MTD_OPS_RAW)))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004712 pr_info("%s: ECC/MPU/OP error\n", __func__);
4713 err = -EIO;
4714 }
4715
4716 if (err)
4717 break;
4718 pages_read++;
4719 from_curr += mtd->writesize;
4720 }
4721
4722 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
4723
4724 if (ops->oobbuf) {
4725 dma_unmap_page(chip->dev, oob_dma_addr, ops->ooblen,
4726 DMA_FROM_DEVICE);
4727 }
4728err_dma_map_oobbuf_failed:
4729 if (ops->datbuf) {
4730 dma_unmap_page(chip->dev, data_dma_addr, ops->len,
4731 DMA_FROM_DEVICE);
4732 }
4733
4734 if (err) {
4735 pr_err("%s: %llx %x %x failed\n", __func__, from_curr,
4736 ops->datbuf ? ops->len : 0, ops->ooblen);
4737 } else {
4738 ops->retlen = ops->oobretlen = 0;
4739 if (ops->datbuf != NULL) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07004740 if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004741 ops->retlen = mtd->writesize * pages_read;
4742 else
4743 ops->retlen = (mtd->writesize + mtd->oobsize)
4744 * pages_read;
4745 }
4746 if (ops->oobbuf != NULL) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07004747 if (ops->mode == MTD_OPS_AUTO_OOB)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004748 ops->oobretlen = mtd->oobavail * pages_read;
4749 else
4750 ops->oobretlen = mtd->oobsize * pages_read;
4751 }
4752 }
4753
4754#if VERBOSE
4755 pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
4756 __func__, err, ops->retlen, ops->oobretlen);
4757
4758 pr_info("==================================================="
4759 "==============\n");
4760#endif
4761 return err;
4762}
4763
4764int msm_onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
4765 size_t *retlen, u_char *buf)
4766{
4767 int ret;
4768 struct mtd_oob_ops ops;
4769
Steve Mucklef132c6c2012-06-06 18:30:57 -07004770 ops.mode = MTD_OPS_PLACE_OOB;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004771 ops.datbuf = buf;
4772 ops.len = len;
4773 ops.retlen = 0;
4774 ops.oobbuf = NULL;
4775 ops.ooblen = 0;
4776 ops.oobretlen = 0;
4777 ret = msm_onenand_read_oob(mtd, from, &ops);
4778 *retlen = ops.retlen;
4779
4780 return ret;
4781}
4782
4783static int msm_onenand_write_oob(struct mtd_info *mtd, loff_t to,
4784 struct mtd_oob_ops *ops)
4785{
4786 struct msm_nand_chip *chip = mtd->priv;
4787
4788 struct {
4789 dmov_s cmd[53];
4790 unsigned cmdptr;
4791 struct {
4792 uint32_t sfbcfg;
4793 uint32_t sfcmd[10];
4794 uint32_t sfexec;
4795 uint32_t sfstat[10];
4796 uint32_t addr0;
4797 uint32_t addr1;
4798 uint32_t addr2;
4799 uint32_t addr3;
4800 uint32_t addr4;
4801 uint32_t addr5;
4802 uint32_t addr6;
4803 uint32_t data0;
4804 uint32_t data1;
4805 uint32_t data2;
4806 uint32_t data3;
4807 uint32_t data4;
4808 uint32_t data5;
4809 uint32_t data6;
4810 uint32_t macro[5];
4811 } data;
4812 } *dma_buffer;
4813 dmov_s *cmd;
4814
4815 int err = 0;
4816 int i, j, k;
4817 dma_addr_t data_dma_addr = 0;
4818 dma_addr_t oob_dma_addr = 0;
4819 dma_addr_t init_dma_addr = 0;
4820 dma_addr_t data_dma_addr_curr = 0;
4821 dma_addr_t oob_dma_addr_curr = 0;
4822 uint8_t *init_spare_bytes;
4823
4824 loff_t to_curr = 0;
4825 unsigned page_count;
4826 unsigned pages_written = 0;
4827
4828 uint16_t onenand_startaddr1;
4829 uint16_t onenand_startaddr8;
4830 uint16_t onenand_startaddr2;
4831 uint16_t onenand_startbuffer;
4832 uint16_t onenand_sysconfig1;
4833
4834 uint16_t controller_status;
4835 uint16_t interrupt_status;
4836 uint16_t ecc_status;
4837
4838#if VERBOSE
4839 pr_info("================================================="
4840 "================\n");
4841 pr_info("%s: to 0x%llx mode %d \ndatbuf 0x%p datlen 0x%x"
4842 "\noobbuf 0x%p ooblen 0x%x\n",
4843 __func__, to, ops->mode, ops->datbuf, ops->len,
4844 ops->oobbuf, ops->ooblen);
4845#endif
4846 if (!mtd) {
4847 pr_err("%s: invalid mtd pointer, 0x%x\n", __func__,
4848 (uint32_t)mtd);
4849 return -EINVAL;
4850 }
4851 if (to & (mtd->writesize - 1)) {
4852 pr_err("%s: unsupported to, 0x%llx\n", __func__, to);
4853 return -EINVAL;
4854 }
4855
Steve Mucklef132c6c2012-06-06 18:30:57 -07004856 if ((ops->mode != MTD_OPS_PLACE_OOB) && (ops->mode != MTD_OPS_AUTO_OOB) &&
4857 (ops->mode != MTD_OPS_RAW)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004858 pr_err("%s: unsupported ops->mode, %d\n", __func__,
4859 ops->mode);
4860 return -EINVAL;
4861 }
4862
4863 if (((ops->datbuf == NULL) || (ops->len == 0)) &&
4864 ((ops->oobbuf == NULL) || (ops->ooblen == 0))) {
4865 pr_err("%s: incorrect ops fields - nothing to do\n",
4866 __func__);
4867 return -EINVAL;
4868 }
4869
4870 if ((ops->datbuf != NULL) && (ops->len == 0)) {
4871 pr_err("%s: data buffer passed but length 0\n",
4872 __func__);
4873 return -EINVAL;
4874 }
4875
4876 if ((ops->oobbuf != NULL) && (ops->ooblen == 0)) {
4877 pr_err("%s: oob buffer passed but length 0\n",
4878 __func__);
4879 return -EINVAL;
4880 }
4881
Steve Mucklef132c6c2012-06-06 18:30:57 -07004882 if (ops->mode != MTD_OPS_RAW) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004883 if (ops->datbuf != NULL && (ops->len % mtd->writesize) != 0) {
4884 /* when ops->datbuf is NULL, ops->len can be ooblen */
4885 pr_err("%s: unsupported ops->len, %d\n", __func__,
4886 ops->len);
4887 return -EINVAL;
4888 }
4889 } else {
4890 if (ops->datbuf != NULL &&
4891 (ops->len % (mtd->writesize + mtd->oobsize)) != 0) {
4892 pr_err("%s: unsupported ops->len,"
Steve Mucklef132c6c2012-06-06 18:30:57 -07004893 " %d for MTD_OPS_RAW\n", __func__, ops->len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004894 return -EINVAL;
4895 }
4896 }
4897
Steve Mucklef132c6c2012-06-06 18:30:57 -07004898 if ((ops->mode == MTD_OPS_RAW) && (ops->oobbuf)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004899 pr_err("%s: unsupported operation, oobbuf pointer "
4900 "passed in for RAW mode, %x\n", __func__,
4901 (uint32_t)ops->oobbuf);
4902 return -EINVAL;
4903 }
4904
4905 if (ops->oobbuf && !ops->datbuf) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07004906 page_count = ops->ooblen / ((ops->mode == MTD_OPS_AUTO_OOB) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004907 mtd->oobavail : mtd->oobsize);
4908 if ((page_count == 0) && (ops->ooblen))
4909 page_count = 1;
Steve Mucklef132c6c2012-06-06 18:30:57 -07004910 } else if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004911 page_count = ops->len / mtd->writesize;
4912 else
4913 page_count = ops->len / (mtd->writesize + mtd->oobsize);
4914
Steve Mucklef132c6c2012-06-06 18:30:57 -07004915 if ((ops->mode == MTD_OPS_AUTO_OOB) && (ops->oobbuf != NULL)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004916 if (page_count > 1) {
4917 pr_err("%s: unsupported ops->ooblen for"
4918 "AUTO, %d\n", __func__, ops->ooblen);
4919 return -EINVAL;
4920 }
4921 }
4922
Steve Mucklef132c6c2012-06-06 18:30:57 -07004923 if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->oobbuf != NULL)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004924 if (page_count * mtd->oobsize > ops->ooblen) {
4925 pr_err("%s: unsupported ops->ooblen for"
4926 "PLACE, %d\n", __func__, ops->ooblen);
4927 return -EINVAL;
4928 }
4929 }
4930
Steve Mucklef132c6c2012-06-06 18:30:57 -07004931 if ((ops->mode == MTD_OPS_PLACE_OOB) && (ops->ooblen != 0) &&
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004932 (ops->ooboffs != 0)) {
4933 pr_err("%s: unsupported ops->ooboffs, %d\n",
4934 __func__, ops->ooboffs);
4935 return -EINVAL;
4936 }
4937
4938 init_spare_bytes = kmalloc(64, GFP_KERNEL);
4939 if (!init_spare_bytes) {
4940 pr_err("%s: failed to alloc init_spare_bytes buffer\n",
4941 __func__);
4942 return -ENOMEM;
4943 }
4944 for (i = 0; i < 64; i++)
4945 init_spare_bytes[i] = 0xFF;
4946
Steve Mucklef132c6c2012-06-06 18:30:57 -07004947 if ((ops->oobbuf) && (ops->mode == MTD_OPS_AUTO_OOB)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004948 for (i = 0, k = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++)
4949 for (j = 0; j < mtd->ecclayout->oobfree[i].length;
4950 j++) {
4951 init_spare_bytes[j +
4952 mtd->ecclayout->oobfree[i].offset]
4953 = (ops->oobbuf)[k];
4954 k++;
4955 }
4956 }
4957
4958 if (ops->datbuf) {
4959 data_dma_addr_curr = data_dma_addr = msm_nand_dma_map(chip->dev,
4960 ops->datbuf, ops->len, DMA_TO_DEVICE);
4961 if (dma_mapping_error(chip->dev, data_dma_addr)) {
4962 pr_err("%s: failed to get dma addr for %p\n",
4963 __func__, ops->datbuf);
4964 return -EIO;
4965 }
4966 }
4967 if (ops->oobbuf) {
4968 oob_dma_addr_curr = oob_dma_addr = msm_nand_dma_map(chip->dev,
4969 ops->oobbuf, ops->ooblen, DMA_TO_DEVICE);
4970 if (dma_mapping_error(chip->dev, oob_dma_addr)) {
4971 pr_err("%s: failed to get dma addr for %p\n",
4972 __func__, ops->oobbuf);
4973 err = -EIO;
4974 goto err_dma_map_oobbuf_failed;
4975 }
4976 }
4977
4978 init_dma_addr = msm_nand_dma_map(chip->dev, init_spare_bytes, 64,
4979 DMA_TO_DEVICE);
4980 if (dma_mapping_error(chip->dev, init_dma_addr)) {
4981 pr_err("%s: failed to get dma addr for %p\n",
4982 __func__, init_spare_bytes);
4983 err = -EIO;
4984 goto err_dma_map_initbuf_failed;
4985 }
4986
4987
4988 wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
4989 (chip, sizeof(*dma_buffer))));
4990
4991 to_curr = to;
4992
4993 while (page_count-- > 0) {
4994 cmd = dma_buffer->cmd;
4995
4996 if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
4997 && (to_curr >= (mtd->size>>1))) { /* DDP Device */
4998 onenand_startaddr1 = DEVICE_FLASHCORE_1 |
4999 (((uint32_t)(to_curr-(mtd->size>>1))
5000 / mtd->erasesize));
5001 onenand_startaddr2 = DEVICE_BUFFERRAM_1;
5002 } else {
5003 onenand_startaddr1 = DEVICE_FLASHCORE_0 |
5004 ((uint32_t)to_curr / mtd->erasesize) ;
5005 onenand_startaddr2 = DEVICE_BUFFERRAM_0;
5006 }
5007
5008 onenand_startaddr8 = (((uint32_t)to_curr &
5009 (mtd->erasesize - 1)) / mtd->writesize) << 2;
5010 onenand_startbuffer = DATARAM0_0 << 8;
Steve Mucklef132c6c2012-06-06 18:30:57 -07005011 onenand_sysconfig1 = (ops->mode == MTD_OPS_RAW) ?
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005012 ONENAND_SYSCFG1_ECCDIS(nand_sfcmd_mode) :
5013 ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode);
5014
5015 dma_buffer->data.sfbcfg = SFLASH_BCFG |
5016 (nand_sfcmd_mode ? 0 : (1 << 24));
5017 dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(6, 0, 0,
5018 MSM_NAND_SFCMD_CMDXS,
5019 nand_sfcmd_mode,
5020 MSM_NAND_SFCMD_REGWR);
5021 dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(256, 0, 0,
5022 MSM_NAND_SFCMD_CMDXS,
5023 nand_sfcmd_mode,
5024 MSM_NAND_SFCMD_DATWR);
5025 dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(256, 0, 0,
5026 MSM_NAND_SFCMD_CMDXS,
5027 nand_sfcmd_mode,
5028 MSM_NAND_SFCMD_DATWR);
5029 dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(256, 0, 0,
5030 MSM_NAND_SFCMD_CMDXS,
5031 nand_sfcmd_mode,
5032 MSM_NAND_SFCMD_DATWR);
5033 dma_buffer->data.sfcmd[4] = SFLASH_PREPCMD(256, 0, 0,
5034 MSM_NAND_SFCMD_CMDXS,
5035 nand_sfcmd_mode,
5036 MSM_NAND_SFCMD_DATWR);
5037 dma_buffer->data.sfcmd[5] = SFLASH_PREPCMD(32, 0, 0,
5038 MSM_NAND_SFCMD_CMDXS,
5039 nand_sfcmd_mode,
5040 MSM_NAND_SFCMD_DATWR);
5041 dma_buffer->data.sfcmd[6] = SFLASH_PREPCMD(1, 6, 0,
5042 MSM_NAND_SFCMD_CMDXS,
5043 nand_sfcmd_mode,
5044 MSM_NAND_SFCMD_REGWR);
5045 dma_buffer->data.sfcmd[7] = SFLASH_PREPCMD(0, 0, 32,
5046 MSM_NAND_SFCMD_CMDXS,
5047 nand_sfcmd_mode,
5048 MSM_NAND_SFCMD_INTHI);
5049 dma_buffer->data.sfcmd[8] = SFLASH_PREPCMD(3, 7, 0,
5050 MSM_NAND_SFCMD_DATXS,
5051 nand_sfcmd_mode,
5052 MSM_NAND_SFCMD_REGRD);
5053 dma_buffer->data.sfcmd[9] = SFLASH_PREPCMD(4, 10, 0,
5054 MSM_NAND_SFCMD_CMDXS,
5055 nand_sfcmd_mode,
5056 MSM_NAND_SFCMD_REGWR);
5057 dma_buffer->data.sfexec = 1;
5058 dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
5059 dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
5060 dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
5061 dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
5062 dma_buffer->data.sfstat[4] = CLEAN_DATA_32;
5063 dma_buffer->data.sfstat[5] = CLEAN_DATA_32;
5064 dma_buffer->data.sfstat[6] = CLEAN_DATA_32;
5065 dma_buffer->data.sfstat[7] = CLEAN_DATA_32;
5066 dma_buffer->data.sfstat[8] = CLEAN_DATA_32;
5067 dma_buffer->data.sfstat[9] = CLEAN_DATA_32;
5068 dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
5069 (ONENAND_SYSTEM_CONFIG_1);
5070 dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
5071 (ONENAND_START_ADDRESS_1);
5072 dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
5073 (ONENAND_START_ADDRESS_2);
5074 dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
5075 (ONENAND_COMMAND);
5076 dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
5077 (ONENAND_INTERRUPT_STATUS);
5078 dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
5079 (ONENAND_SYSTEM_CONFIG_1);
5080 dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
5081 (ONENAND_START_ADDRESS_1);
5082 dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
5083 (onenand_sysconfig1);
5084 dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
5085 (onenand_startaddr1);
5086 dma_buffer->data.data2 = (onenand_startbuffer << 16) |
5087 (onenand_startaddr2);
5088 dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
5089 (ONENAND_CMDPROGSPARE);
5090 dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
5091 (CLEAN_DATA_16);
5092 dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
5093 (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
5094 dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
5095 (ONENAND_STARTADDR1_RES);
5096 dma_buffer->data.macro[0] = 0x0200;
5097 dma_buffer->data.macro[1] = 0x0300;
5098 dma_buffer->data.macro[2] = 0x0400;
5099 dma_buffer->data.macro[3] = 0x0500;
5100 dma_buffer->data.macro[4] = 0x8010;
5101
5102
5103 /*************************************************************/
5104 /* Write necessary address registers in the onenand device */
5105 /*************************************************************/
5106
5107 /* Enable and configure the SFlash controller */
5108 cmd->cmd = 0;
5109 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
5110 cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
5111 cmd->len = 4;
5112 cmd++;
5113
5114 /* Block on cmd ready and write CMD register */
5115 cmd->cmd = DST_CRCI_NAND_CMD;
5116 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
5117 cmd->dst = MSM_NAND_SFLASHC_CMD;
5118 cmd->len = 4;
5119 cmd++;
5120
5121 /* Write the ADDR0 and ADDR1 registers */
5122 cmd->cmd = 0;
5123 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
5124 cmd->dst = MSM_NAND_ADDR0;
5125 cmd->len = 8;
5126 cmd++;
5127
5128 /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
5129 cmd->cmd = 0;
5130 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
5131 cmd->dst = MSM_NAND_ADDR2;
5132 cmd->len = 16;
5133 cmd++;
5134
5135 /* Write the ADDR6 registers */
5136 cmd->cmd = 0;
5137 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
5138 cmd->dst = MSM_NAND_ADDR6;
5139 cmd->len = 4;
5140 cmd++;
5141
5142 /* Write the GENP0, GENP1, GENP2, GENP3 registers */
5143 cmd->cmd = 0;
5144 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
5145 cmd->dst = MSM_NAND_GENP_REG0;
5146 cmd->len = 16;
5147 cmd++;
5148
5149 /* Write the FLASH_DEV_CMD4,5,6 registers */
5150 cmd->cmd = 0;
5151 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
5152 cmd->dst = MSM_NAND_DEV_CMD4;
5153 cmd->len = 12;
5154 cmd++;
5155
5156 /* Kick the execute command */
5157 cmd->cmd = 0;
5158 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
5159 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5160 cmd->len = 4;
5161 cmd++;
5162
5163 /* Block on data ready, and read the status register */
5164 cmd->cmd = SRC_CRCI_NAND_DATA;
5165 cmd->src = MSM_NAND_SFLASHC_STATUS;
5166 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
5167 cmd->len = 4;
5168 cmd++;
5169
5170 /*************************************************************/
5171 /* Write the data ram area in the onenand buffer ram */
5172 /*************************************************************/
5173
5174 if (ops->datbuf) {
5175 dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
5176 (ONENAND_CMDPROG);
5177
5178 for (i = 0; i < 4; i++) {
5179
5180 /* Block on cmd ready and write CMD register */
5181 cmd->cmd = DST_CRCI_NAND_CMD;
5182 cmd->src = msm_virt_to_dma(chip,
5183 &dma_buffer->data.sfcmd[1+i]);
5184 cmd->dst = MSM_NAND_SFLASHC_CMD;
5185 cmd->len = 4;
5186 cmd++;
5187
5188 /* Trnsfr usr buf contents to nand ctlr buf */
5189 cmd->cmd = 0;
5190 cmd->src = data_dma_addr_curr;
5191 cmd->dst = MSM_NAND_FLASH_BUFFER;
5192 cmd->len = 512;
5193 data_dma_addr_curr += 512;
5194 cmd++;
5195
5196 /* Write the MACRO1 register */
5197 cmd->cmd = 0;
5198 cmd->src = msm_virt_to_dma(chip,
5199 &dma_buffer->data.macro[i]);
5200 cmd->dst = MSM_NAND_MACRO1_REG;
5201 cmd->len = 4;
5202 cmd++;
5203
5204 /* Kick the execute command */
5205 cmd->cmd = 0;
5206 cmd->src = msm_virt_to_dma(chip,
5207 &dma_buffer->data.sfexec);
5208 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5209 cmd->len = 4;
5210 cmd++;
5211
5212 /* Block on data rdy, & read status register */
5213 cmd->cmd = SRC_CRCI_NAND_DATA;
5214 cmd->src = MSM_NAND_SFLASHC_STATUS;
5215 cmd->dst = msm_virt_to_dma(chip,
5216 &dma_buffer->data.sfstat[1+i]);
5217 cmd->len = 4;
5218 cmd++;
5219
5220 }
5221 }
5222
5223 /* Block on cmd ready and write CMD register */
5224 cmd->cmd = DST_CRCI_NAND_CMD;
5225 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[5]);
5226 cmd->dst = MSM_NAND_SFLASHC_CMD;
5227 cmd->len = 4;
5228 cmd++;
5229
Steve Mucklef132c6c2012-06-06 18:30:57 -07005230 if ((ops->oobbuf) || (ops->mode == MTD_OPS_RAW)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005231
5232 /* Transfer user buf contents into nand ctlr buffer */
Steve Mucklef132c6c2012-06-06 18:30:57 -07005233 if (ops->mode == MTD_OPS_AUTO_OOB) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005234 cmd->cmd = 0;
5235 cmd->src = init_dma_addr;
5236 cmd->dst = MSM_NAND_FLASH_BUFFER;
5237 cmd->len = mtd->oobsize;
5238 cmd++;
5239 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07005240 if (ops->mode == MTD_OPS_PLACE_OOB) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005241 cmd->cmd = 0;
5242 cmd->src = oob_dma_addr_curr;
5243 cmd->dst = MSM_NAND_FLASH_BUFFER;
5244 cmd->len = mtd->oobsize;
5245 oob_dma_addr_curr += mtd->oobsize;
5246 cmd++;
5247 }
Steve Mucklef132c6c2012-06-06 18:30:57 -07005248 if (ops->mode == MTD_OPS_RAW) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005249 cmd->cmd = 0;
5250 cmd->src = data_dma_addr_curr;
5251 cmd->dst = MSM_NAND_FLASH_BUFFER;
5252 cmd->len = mtd->oobsize;
5253 data_dma_addr_curr += mtd->oobsize;
5254 cmd++;
5255 }
5256 } else {
5257 cmd->cmd = 0;
5258 cmd->src = init_dma_addr;
5259 cmd->dst = MSM_NAND_FLASH_BUFFER;
5260 cmd->len = mtd->oobsize;
5261 cmd++;
5262 }
5263
5264 /* Write the MACRO1 register */
5265 cmd->cmd = 0;
5266 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.macro[4]);
5267 cmd->dst = MSM_NAND_MACRO1_REG;
5268 cmd->len = 4;
5269 cmd++;
5270
5271 /* Kick the execute command */
5272 cmd->cmd = 0;
5273 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
5274 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5275 cmd->len = 4;
5276 cmd++;
5277
5278 /* Block on data ready, and read the status register */
5279 cmd->cmd = SRC_CRCI_NAND_DATA;
5280 cmd->src = MSM_NAND_SFLASHC_STATUS;
5281 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[5]);
5282 cmd->len = 4;
5283 cmd++;
5284
5285 /*********************************************************/
5286 /* Issuing write command */
5287 /*********************************************************/
5288
5289 /* Block on cmd ready and write CMD register */
5290 cmd->cmd = DST_CRCI_NAND_CMD;
5291 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[6]);
5292 cmd->dst = MSM_NAND_SFLASHC_CMD;
5293 cmd->len = 4;
5294 cmd++;
5295
5296 /* Kick the execute command */
5297 cmd->cmd = 0;
5298 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
5299 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5300 cmd->len = 4;
5301 cmd++;
5302
5303 /* Block on data ready, and read the status register */
5304 cmd->cmd = SRC_CRCI_NAND_DATA;
5305 cmd->src = MSM_NAND_SFLASHC_STATUS;
5306 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[6]);
5307 cmd->len = 4;
5308 cmd++;
5309
5310 /*************************************************************/
5311 /* Wait for the interrupt from the Onenand device controller */
5312 /*************************************************************/
5313
5314 /* Block on cmd ready and write CMD register */
5315 cmd->cmd = DST_CRCI_NAND_CMD;
5316 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[7]);
5317 cmd->dst = MSM_NAND_SFLASHC_CMD;
5318 cmd->len = 4;
5319 cmd++;
5320
5321 /* Kick the execute command */
5322 cmd->cmd = 0;
5323 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
5324 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5325 cmd->len = 4;
5326 cmd++;
5327
5328 /* Block on data ready, and read the status register */
5329 cmd->cmd = SRC_CRCI_NAND_DATA;
5330 cmd->src = MSM_NAND_SFLASHC_STATUS;
5331 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[7]);
5332 cmd->len = 4;
5333 cmd++;
5334
5335 /*************************************************************/
5336 /* Read necessary status registers from the onenand device */
5337 /*************************************************************/
5338
5339 /* Block on cmd ready and write CMD register */
5340 cmd->cmd = DST_CRCI_NAND_CMD;
5341 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[8]);
5342 cmd->dst = MSM_NAND_SFLASHC_CMD;
5343 cmd->len = 4;
5344 cmd++;
5345
5346 /* Kick the execute command */
5347 cmd->cmd = 0;
5348 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
5349 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5350 cmd->len = 4;
5351 cmd++;
5352
5353 /* Block on data ready, and read the status register */
5354 cmd->cmd = SRC_CRCI_NAND_DATA;
5355 cmd->src = MSM_NAND_SFLASHC_STATUS;
5356 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[8]);
5357 cmd->len = 4;
5358 cmd++;
5359
5360 /* Read the GENP3 register */
5361 cmd->cmd = 0;
5362 cmd->src = MSM_NAND_GENP_REG3;
5363 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
5364 cmd->len = 4;
5365 cmd++;
5366
5367 /* Read the DEVCMD4 register */
5368 cmd->cmd = 0;
5369 cmd->src = MSM_NAND_DEV_CMD4;
5370 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
5371 cmd->len = 4;
5372 cmd++;
5373
5374 /*************************************************************/
5375 /* Restore the necessary registers to proper values */
5376 /*************************************************************/
5377
5378 /* Block on cmd ready and write CMD register */
5379 cmd->cmd = DST_CRCI_NAND_CMD;
5380 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[9]);
5381 cmd->dst = MSM_NAND_SFLASHC_CMD;
5382 cmd->len = 4;
5383 cmd++;
5384
5385 /* Kick the execute command */
5386 cmd->cmd = 0;
5387 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
5388 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5389 cmd->len = 4;
5390 cmd++;
5391
5392 /* Block on data ready, and read the status register */
5393 cmd->cmd = SRC_CRCI_NAND_DATA;
5394 cmd->src = MSM_NAND_SFLASHC_STATUS;
5395 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[9]);
5396 cmd->len = 4;
5397 cmd++;
5398
5399
5400 BUILD_BUG_ON(53 != ARRAY_SIZE(dma_buffer->cmd));
5401 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
5402 dma_buffer->cmd[0].cmd |= CMD_OCB;
5403 cmd[-1].cmd |= CMD_OCU | CMD_LC;
5404
5405 dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
5406 >> 3) | CMD_PTR_LP;
5407
5408 mb();
Jeff Ohlsteindc39f972011-09-02 13:55:16 -07005409 msm_dmov_exec_cmd(chip->dma_channel,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005410 DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
5411 &dma_buffer->cmdptr)));
5412 mb();
5413
5414 ecc_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
5415 interrupt_status = (dma_buffer->data.data4 >> 0)&0x0000FFFF;
5416 controller_status = (dma_buffer->data.data4 >> 16)&0x0000FFFF;
5417
5418#if VERBOSE
5419 pr_info("\n%s: sflash status %x %x %x %x %x %x %x"
5420 " %x %x %x\n", __func__,
5421 dma_buffer->data.sfstat[0],
5422 dma_buffer->data.sfstat[1],
5423 dma_buffer->data.sfstat[2],
5424 dma_buffer->data.sfstat[3],
5425 dma_buffer->data.sfstat[4],
5426 dma_buffer->data.sfstat[5],
5427 dma_buffer->data.sfstat[6],
5428 dma_buffer->data.sfstat[7],
5429 dma_buffer->data.sfstat[8],
5430 dma_buffer->data.sfstat[9]);
5431
5432 pr_info("%s: controller_status = %x\n", __func__,
5433 controller_status);
5434 pr_info("%s: interrupt_status = %x\n", __func__,
5435 interrupt_status);
5436 pr_info("%s: ecc_status = %x\n", __func__,
5437 ecc_status);
5438#endif
5439 /* Check for errors, protection violations etc */
5440 if ((controller_status != 0)
5441 || (dma_buffer->data.sfstat[0] & 0x110)
5442 || (dma_buffer->data.sfstat[6] & 0x110)
5443 || (dma_buffer->data.sfstat[7] & 0x110)
5444 || (dma_buffer->data.sfstat[8] & 0x110)
5445 || (dma_buffer->data.sfstat[9] & 0x110)
5446 || ((dma_buffer->data.sfstat[1] & 0x110) &&
5447 (ops->datbuf))
5448 || ((dma_buffer->data.sfstat[2] & 0x110) &&
5449 (ops->datbuf))
5450 || ((dma_buffer->data.sfstat[3] & 0x110) &&
5451 (ops->datbuf))
5452 || ((dma_buffer->data.sfstat[4] & 0x110) &&
5453 (ops->datbuf))
5454 || ((dma_buffer->data.sfstat[5] & 0x110) &&
5455 ((ops->oobbuf)
Steve Mucklef132c6c2012-06-06 18:30:57 -07005456 || (ops->mode == MTD_OPS_RAW)))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005457 pr_info("%s: ECC/MPU/OP error\n", __func__);
5458 err = -EIO;
5459 }
5460
5461 if (err)
5462 break;
5463 pages_written++;
5464 to_curr += mtd->writesize;
5465 }
5466
5467 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
5468
5469 dma_unmap_page(chip->dev, init_dma_addr, 64, DMA_TO_DEVICE);
5470
5471err_dma_map_initbuf_failed:
5472 if (ops->oobbuf) {
5473 dma_unmap_page(chip->dev, oob_dma_addr, ops->ooblen,
5474 DMA_TO_DEVICE);
5475 }
5476err_dma_map_oobbuf_failed:
5477 if (ops->datbuf) {
5478 dma_unmap_page(chip->dev, data_dma_addr, ops->len,
5479 DMA_TO_DEVICE);
5480 }
5481
5482 if (err) {
5483 pr_err("%s: %llx %x %x failed\n", __func__, to_curr,
5484 ops->datbuf ? ops->len : 0, ops->ooblen);
5485 } else {
5486 ops->retlen = ops->oobretlen = 0;
5487 if (ops->datbuf != NULL) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07005488 if (ops->mode != MTD_OPS_RAW)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005489 ops->retlen = mtd->writesize * pages_written;
5490 else
5491 ops->retlen = (mtd->writesize + mtd->oobsize)
5492 * pages_written;
5493 }
5494 if (ops->oobbuf != NULL) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07005495 if (ops->mode == MTD_OPS_AUTO_OOB)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005496 ops->oobretlen = mtd->oobavail * pages_written;
5497 else
5498 ops->oobretlen = mtd->oobsize * pages_written;
5499 }
5500 }
5501
5502#if VERBOSE
5503 pr_info("\n%s: ret %d, retlen %d oobretlen %d\n",
5504 __func__, err, ops->retlen, ops->oobretlen);
5505
5506 pr_info("================================================="
5507 "================\n");
5508#endif
5509 kfree(init_spare_bytes);
5510 return err;
5511}
5512
5513static int msm_onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
5514 size_t *retlen, const u_char *buf)
5515{
5516 int ret;
5517 struct mtd_oob_ops ops;
5518
Steve Mucklef132c6c2012-06-06 18:30:57 -07005519 ops.mode = MTD_OPS_PLACE_OOB;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005520 ops.datbuf = (uint8_t *)buf;
5521 ops.len = len;
5522 ops.retlen = 0;
5523 ops.oobbuf = NULL;
5524 ops.ooblen = 0;
5525 ops.oobretlen = 0;
5526 ret = msm_onenand_write_oob(mtd, to, &ops);
5527 *retlen = ops.retlen;
5528
5529 return ret;
5530}
5531
5532static int msm_onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
5533{
5534 struct msm_nand_chip *chip = mtd->priv;
5535
5536 struct {
5537 dmov_s cmd[20];
5538 unsigned cmdptr;
5539 struct {
5540 uint32_t sfbcfg;
5541 uint32_t sfcmd[4];
5542 uint32_t sfexec;
5543 uint32_t sfstat[4];
5544 uint32_t addr0;
5545 uint32_t addr1;
5546 uint32_t addr2;
5547 uint32_t addr3;
5548 uint32_t addr4;
5549 uint32_t addr5;
5550 uint32_t addr6;
5551 uint32_t data0;
5552 uint32_t data1;
5553 uint32_t data2;
5554 uint32_t data3;
5555 uint32_t data4;
5556 uint32_t data5;
5557 uint32_t data6;
5558 } data;
5559 } *dma_buffer;
5560 dmov_s *cmd;
5561
5562 int err = 0;
5563
5564 uint16_t onenand_startaddr1;
5565 uint16_t onenand_startaddr8;
5566 uint16_t onenand_startaddr2;
5567 uint16_t onenand_startbuffer;
5568
5569 uint16_t controller_status;
5570 uint16_t interrupt_status;
5571 uint16_t ecc_status;
5572
5573 uint64_t temp;
5574
5575#if VERBOSE
5576 pr_info("================================================="
5577 "================\n");
5578 pr_info("%s: addr 0x%llx len 0x%llx\n",
5579 __func__, instr->addr, instr->len);
5580#endif
5581 if (instr->addr & (mtd->erasesize - 1)) {
5582 pr_err("%s: Unsupported erase address, 0x%llx\n",
5583 __func__, instr->addr);
5584 return -EINVAL;
5585 }
5586 if (instr->len != mtd->erasesize) {
5587 pr_err("%s: Unsupported erase len, %lld\n",
5588 __func__, instr->len);
5589 return -EINVAL;
5590 }
5591
5592 wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
5593 (chip, sizeof(*dma_buffer))));
5594
5595 cmd = dma_buffer->cmd;
5596
5597 temp = instr->addr;
5598
5599 if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
5600 && (temp >= (mtd->size>>1))) { /* DDP Device */
5601 onenand_startaddr1 = DEVICE_FLASHCORE_1 |
5602 (((uint32_t)(temp-(mtd->size>>1))
5603 / mtd->erasesize));
5604 onenand_startaddr2 = DEVICE_BUFFERRAM_1;
5605 } else {
5606 onenand_startaddr1 = DEVICE_FLASHCORE_0 |
5607 ((uint32_t)temp / mtd->erasesize) ;
5608 onenand_startaddr2 = DEVICE_BUFFERRAM_0;
5609 }
5610
5611 onenand_startaddr8 = 0x0000;
5612 onenand_startbuffer = DATARAM0_0 << 8;
5613
5614 dma_buffer->data.sfbcfg = SFLASH_BCFG |
5615 (nand_sfcmd_mode ? 0 : (1 << 24));
5616 dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
5617 MSM_NAND_SFCMD_CMDXS,
5618 nand_sfcmd_mode,
5619 MSM_NAND_SFCMD_REGWR);
5620 dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
5621 MSM_NAND_SFCMD_CMDXS,
5622 nand_sfcmd_mode,
5623 MSM_NAND_SFCMD_INTHI);
5624 dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
5625 MSM_NAND_SFCMD_DATXS,
5626 nand_sfcmd_mode,
5627 MSM_NAND_SFCMD_REGRD);
5628 dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
5629 MSM_NAND_SFCMD_CMDXS,
5630 nand_sfcmd_mode,
5631 MSM_NAND_SFCMD_REGWR);
5632 dma_buffer->data.sfexec = 1;
5633 dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
5634 dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
5635 dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
5636 dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
5637 dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
5638 (ONENAND_SYSTEM_CONFIG_1);
5639 dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
5640 (ONENAND_START_ADDRESS_1);
5641 dma_buffer->data.addr2 = (ONENAND_START_BUFFER << 16) |
5642 (ONENAND_START_ADDRESS_2);
5643 dma_buffer->data.addr3 = (ONENAND_ECC_STATUS << 16) |
5644 (ONENAND_COMMAND);
5645 dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
5646 (ONENAND_INTERRUPT_STATUS);
5647 dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
5648 (ONENAND_SYSTEM_CONFIG_1);
5649 dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
5650 (ONENAND_START_ADDRESS_1);
5651 dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
5652 (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
5653 dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
5654 (onenand_startaddr1);
5655 dma_buffer->data.data2 = (onenand_startbuffer << 16) |
5656 (onenand_startaddr2);
5657 dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
5658 (ONENAND_CMDERAS);
5659 dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
5660 (CLEAN_DATA_16);
5661 dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
5662 (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
5663 dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
5664 (ONENAND_STARTADDR1_RES);
5665
5666 /***************************************************************/
5667 /* Write the necessary address registers in the onenand device */
5668 /***************************************************************/
5669
5670 /* Enable and configure the SFlash controller */
5671 cmd->cmd = 0;
5672 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
5673 cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
5674 cmd->len = 4;
5675 cmd++;
5676
5677 /* Block on cmd ready and write CMD register */
5678 cmd->cmd = DST_CRCI_NAND_CMD;
5679 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
5680 cmd->dst = MSM_NAND_SFLASHC_CMD;
5681 cmd->len = 4;
5682 cmd++;
5683
5684 /* Write the ADDR0 and ADDR1 registers */
5685 cmd->cmd = 0;
5686 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
5687 cmd->dst = MSM_NAND_ADDR0;
5688 cmd->len = 8;
5689 cmd++;
5690
5691 /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
5692 cmd->cmd = 0;
5693 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
5694 cmd->dst = MSM_NAND_ADDR2;
5695 cmd->len = 16;
5696 cmd++;
5697
5698 /* Write the ADDR6 registers */
5699 cmd->cmd = 0;
5700 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
5701 cmd->dst = MSM_NAND_ADDR6;
5702 cmd->len = 4;
5703 cmd++;
5704
5705 /* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
5706 cmd->cmd = 0;
5707 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
5708 cmd->dst = MSM_NAND_GENP_REG0;
5709 cmd->len = 16;
5710 cmd++;
5711
5712 /* Write the FLASH_DEV_CMD4,5,6 registers */
5713 cmd->cmd = 0;
5714 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
5715 cmd->dst = MSM_NAND_DEV_CMD4;
5716 cmd->len = 12;
5717 cmd++;
5718
5719 /* Kick the execute command */
5720 cmd->cmd = 0;
5721 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
5722 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5723 cmd->len = 4;
5724 cmd++;
5725
5726 /* Block on data ready, and read the status register */
5727 cmd->cmd = SRC_CRCI_NAND_DATA;
5728 cmd->src = MSM_NAND_SFLASHC_STATUS;
5729 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
5730 cmd->len = 4;
5731 cmd++;
5732
5733 /***************************************************************/
5734 /* Wait for the interrupt from the Onenand device controller */
5735 /***************************************************************/
5736
5737 /* Block on cmd ready and write CMD register */
5738 cmd->cmd = DST_CRCI_NAND_CMD;
5739 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
5740 cmd->dst = MSM_NAND_SFLASHC_CMD;
5741 cmd->len = 4;
5742 cmd++;
5743
5744 /* Kick the execute command */
5745 cmd->cmd = 0;
5746 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
5747 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5748 cmd->len = 4;
5749 cmd++;
5750
5751 /* Block on data ready, and read the status register */
5752 cmd->cmd = SRC_CRCI_NAND_DATA;
5753 cmd->src = MSM_NAND_SFLASHC_STATUS;
5754 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
5755 cmd->len = 4;
5756 cmd++;
5757
5758 /***************************************************************/
5759 /* Read the necessary status registers from the onenand device */
5760 /***************************************************************/
5761
5762 /* Block on cmd ready and write CMD register */
5763 cmd->cmd = DST_CRCI_NAND_CMD;
5764 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
5765 cmd->dst = MSM_NAND_SFLASHC_CMD;
5766 cmd->len = 4;
5767 cmd++;
5768
5769 /* Kick the execute command */
5770 cmd->cmd = 0;
5771 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
5772 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5773 cmd->len = 4;
5774 cmd++;
5775
5776 /* Block on data ready, and read the status register */
5777 cmd->cmd = SRC_CRCI_NAND_DATA;
5778 cmd->src = MSM_NAND_SFLASHC_STATUS;
5779 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
5780 cmd->len = 4;
5781 cmd++;
5782
5783 /* Read the GENP3 register */
5784 cmd->cmd = 0;
5785 cmd->src = MSM_NAND_GENP_REG3;
5786 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
5787 cmd->len = 4;
5788 cmd++;
5789
5790 /* Read the DEVCMD4 register */
5791 cmd->cmd = 0;
5792 cmd->src = MSM_NAND_DEV_CMD4;
5793 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
5794 cmd->len = 4;
5795 cmd++;
5796
5797 /***************************************************************/
5798 /* Restore the necessary registers to proper values */
5799 /***************************************************************/
5800
5801 /* Block on cmd ready and write CMD register */
5802 cmd->cmd = DST_CRCI_NAND_CMD;
5803 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
5804 cmd->dst = MSM_NAND_SFLASHC_CMD;
5805 cmd->len = 4;
5806 cmd++;
5807
5808 /* Kick the execute command */
5809 cmd->cmd = 0;
5810 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
5811 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
5812 cmd->len = 4;
5813 cmd++;
5814
5815 /* Block on data ready, and read the status register */
5816 cmd->cmd = SRC_CRCI_NAND_DATA;
5817 cmd->src = MSM_NAND_SFLASHC_STATUS;
5818 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
5819 cmd->len = 4;
5820 cmd++;
5821
5822
5823 BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
5824 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
5825 dma_buffer->cmd[0].cmd |= CMD_OCB;
5826 cmd[-1].cmd |= CMD_OCU | CMD_LC;
5827
5828 dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
5829 >> 3) | CMD_PTR_LP;
5830
5831 mb();
Jeff Ohlsteindc39f972011-09-02 13:55:16 -07005832 msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005833 | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
5834 &dma_buffer->cmdptr)));
5835 mb();
5836
5837 ecc_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
5838 interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
5839 controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
5840
5841#if VERBOSE
5842 pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
5843 dma_buffer->data.sfstat[0],
5844 dma_buffer->data.sfstat[1],
5845 dma_buffer->data.sfstat[2],
5846 dma_buffer->data.sfstat[3]);
5847
5848 pr_info("%s: controller_status = %x\n", __func__,
5849 controller_status);
5850 pr_info("%s: interrupt_status = %x\n", __func__,
5851 interrupt_status);
5852 pr_info("%s: ecc_status = %x\n", __func__,
5853 ecc_status);
5854#endif
5855 /* Check for errors, protection violations etc */
5856 if ((controller_status != 0)
5857 || (dma_buffer->data.sfstat[0] & 0x110)
5858 || (dma_buffer->data.sfstat[1] & 0x110)
5859 || (dma_buffer->data.sfstat[2] & 0x110)
5860 || (dma_buffer->data.sfstat[3] & 0x110)) {
5861 pr_err("%s: ECC/MPU/OP error\n", __func__);
5862 err = -EIO;
5863 }
5864
5865 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
5866
5867 if (err) {
5868 pr_err("%s: Erase failed, 0x%llx\n", __func__,
5869 instr->addr);
5870 instr->fail_addr = instr->addr;
5871 instr->state = MTD_ERASE_FAILED;
5872 } else {
5873 instr->state = MTD_ERASE_DONE;
5874 instr->fail_addr = 0xffffffff;
5875 mtd_erase_callback(instr);
5876 }
5877
5878#if VERBOSE
5879 pr_info("\n%s: ret %d\n", __func__, err);
5880 pr_info("===================================================="
5881 "=============\n");
5882#endif
5883 return err;
5884}
5885
5886static int msm_onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
5887{
5888 struct mtd_oob_ops ops;
5889 int rval, i;
5890 int ret = 0;
5891 uint8_t *buffer;
5892 uint8_t *oobptr;
5893
5894 if ((ofs > mtd->size) || (ofs & (mtd->erasesize - 1))) {
5895 pr_err("%s: unsupported block address, 0x%x\n",
5896 __func__, (uint32_t)ofs);
5897 return -EINVAL;
5898 }
5899
5900 buffer = kmalloc(2112, GFP_KERNEL|GFP_DMA);
5901 if (buffer == 0) {
5902 pr_err("%s: Could not kmalloc for buffer\n",
5903 __func__);
5904 return -ENOMEM;
5905 }
5906
5907 memset(buffer, 0x00, 2112);
5908 oobptr = &(buffer[2048]);
5909
Steve Mucklef132c6c2012-06-06 18:30:57 -07005910 ops.mode = MTD_OPS_RAW;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005911 ops.len = 2112;
5912 ops.retlen = 0;
5913 ops.ooblen = 0;
5914 ops.oobretlen = 0;
5915 ops.ooboffs = 0;
5916 ops.datbuf = buffer;
5917 ops.oobbuf = NULL;
5918
5919 for (i = 0; i < 2; i++) {
5920 ofs = ofs + i*mtd->writesize;
5921 rval = msm_onenand_read_oob(mtd, ofs, &ops);
5922 if (rval) {
5923 pr_err("%s: Error in reading bad blk info\n",
5924 __func__);
5925 ret = rval;
5926 break;
5927 }
5928 if ((oobptr[0] != 0xFF) || (oobptr[1] != 0xFF) ||
5929 (oobptr[16] != 0xFF) || (oobptr[17] != 0xFF) ||
5930 (oobptr[32] != 0xFF) || (oobptr[33] != 0xFF) ||
5931 (oobptr[48] != 0xFF) || (oobptr[49] != 0xFF)
5932 ) {
5933 ret = 1;
5934 break;
5935 }
5936 }
5937
5938 kfree(buffer);
5939
5940#if VERBOSE
5941 if (ret == 1)
5942 pr_info("%s : Block containing 0x%x is bad\n",
5943 __func__, (unsigned int)ofs);
5944#endif
5945 return ret;
5946}
5947
5948static int msm_onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
5949{
5950 struct mtd_oob_ops ops;
5951 int rval, i;
5952 int ret = 0;
5953 uint8_t *buffer;
5954
5955 if ((ofs > mtd->size) || (ofs & (mtd->erasesize - 1))) {
5956 pr_err("%s: unsupported block address, 0x%x\n",
5957 __func__, (uint32_t)ofs);
5958 return -EINVAL;
5959 }
5960
5961 buffer = page_address(ZERO_PAGE());
5962
Steve Mucklef132c6c2012-06-06 18:30:57 -07005963 ops.mode = MTD_OPS_RAW;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005964 ops.len = 2112;
5965 ops.retlen = 0;
5966 ops.ooblen = 0;
5967 ops.oobretlen = 0;
5968 ops.ooboffs = 0;
5969 ops.datbuf = buffer;
5970 ops.oobbuf = NULL;
5971
5972 for (i = 0; i < 2; i++) {
5973 ofs = ofs + i*mtd->writesize;
5974 rval = msm_onenand_write_oob(mtd, ofs, &ops);
5975 if (rval) {
5976 pr_err("%s: Error in writing bad blk info\n",
5977 __func__);
5978 ret = rval;
5979 break;
5980 }
5981 }
5982
5983 return ret;
5984}
5985
5986static int msm_onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
5987{
5988 struct msm_nand_chip *chip = mtd->priv;
5989
5990 struct {
5991 dmov_s cmd[20];
5992 unsigned cmdptr;
5993 struct {
5994 uint32_t sfbcfg;
5995 uint32_t sfcmd[4];
5996 uint32_t sfexec;
5997 uint32_t sfstat[4];
5998 uint32_t addr0;
5999 uint32_t addr1;
6000 uint32_t addr2;
6001 uint32_t addr3;
6002 uint32_t addr4;
6003 uint32_t addr5;
6004 uint32_t addr6;
6005 uint32_t data0;
6006 uint32_t data1;
6007 uint32_t data2;
6008 uint32_t data3;
6009 uint32_t data4;
6010 uint32_t data5;
6011 uint32_t data6;
6012 } data;
6013 } *dma_buffer;
6014 dmov_s *cmd;
6015
6016 int err = 0;
6017
6018 uint16_t onenand_startaddr1;
6019 uint16_t onenand_startaddr8;
6020 uint16_t onenand_startaddr2;
6021 uint16_t onenand_startblock;
6022
6023 uint16_t controller_status;
6024 uint16_t interrupt_status;
6025 uint16_t write_prot_status;
6026
6027 uint64_t start_ofs;
6028
6029#if VERBOSE
6030 pr_info("===================================================="
6031 "=============\n");
6032 pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
6033#endif
6034 /* 'ofs' & 'len' should align to block size */
6035 if (ofs&(mtd->erasesize - 1)) {
6036 pr_err("%s: Unsupported ofs address, 0x%llx\n",
6037 __func__, ofs);
6038 return -EINVAL;
6039 }
6040
6041 if (len&(mtd->erasesize - 1)) {
6042 pr_err("%s: Unsupported len, %lld\n",
6043 __func__, len);
6044 return -EINVAL;
6045 }
6046
6047 if (ofs+len > mtd->size) {
6048 pr_err("%s: Maximum chip size exceeded\n", __func__);
6049 return -EINVAL;
6050 }
6051
6052 wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
6053 (chip, sizeof(*dma_buffer))));
6054
6055 for (start_ofs = ofs; ofs < start_ofs+len; ofs = ofs+mtd->erasesize) {
6056#if VERBOSE
6057 pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
6058#endif
6059
6060 cmd = dma_buffer->cmd;
6061 if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
6062 && (ofs >= (mtd->size>>1))) { /* DDP Device */
6063 onenand_startaddr1 = DEVICE_FLASHCORE_1 |
6064 (((uint32_t)(ofs - (mtd->size>>1))
6065 / mtd->erasesize));
6066 onenand_startaddr2 = DEVICE_BUFFERRAM_1;
6067 onenand_startblock = ((uint32_t)(ofs - (mtd->size>>1))
6068 / mtd->erasesize);
6069 } else {
6070 onenand_startaddr1 = DEVICE_FLASHCORE_0 |
6071 ((uint32_t)ofs / mtd->erasesize) ;
6072 onenand_startaddr2 = DEVICE_BUFFERRAM_0;
6073 onenand_startblock = ((uint32_t)ofs
6074 / mtd->erasesize);
6075 }
6076
6077 onenand_startaddr8 = 0x0000;
6078 dma_buffer->data.sfbcfg = SFLASH_BCFG |
6079 (nand_sfcmd_mode ? 0 : (1 << 24));
6080 dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
6081 MSM_NAND_SFCMD_CMDXS,
6082 nand_sfcmd_mode,
6083 MSM_NAND_SFCMD_REGWR);
6084 dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
6085 MSM_NAND_SFCMD_CMDXS,
6086 nand_sfcmd_mode,
6087 MSM_NAND_SFCMD_INTHI);
6088 dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
6089 MSM_NAND_SFCMD_DATXS,
6090 nand_sfcmd_mode,
6091 MSM_NAND_SFCMD_REGRD);
6092 dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
6093 MSM_NAND_SFCMD_CMDXS,
6094 nand_sfcmd_mode,
6095 MSM_NAND_SFCMD_REGWR);
6096 dma_buffer->data.sfexec = 1;
6097 dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
6098 dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
6099 dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
6100 dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
6101 dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
6102 (ONENAND_SYSTEM_CONFIG_1);
6103 dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
6104 (ONENAND_START_ADDRESS_1);
6105 dma_buffer->data.addr2 = (ONENAND_START_BLOCK_ADDRESS << 16) |
6106 (ONENAND_START_ADDRESS_2);
6107 dma_buffer->data.addr3 = (ONENAND_WRITE_PROT_STATUS << 16) |
6108 (ONENAND_COMMAND);
6109 dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
6110 (ONENAND_INTERRUPT_STATUS);
6111 dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
6112 (ONENAND_SYSTEM_CONFIG_1);
6113 dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
6114 (ONENAND_START_ADDRESS_1);
6115 dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
6116 (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
6117 dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
6118 (onenand_startaddr1);
6119 dma_buffer->data.data2 = (onenand_startblock << 16) |
6120 (onenand_startaddr2);
6121 dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
6122 (ONENAND_CMD_UNLOCK);
6123 dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
6124 (CLEAN_DATA_16);
6125 dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
6126 (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
6127 dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
6128 (ONENAND_STARTADDR1_RES);
6129
6130 /*************************************************************/
6131 /* Write the necessary address reg in the onenand device */
6132 /*************************************************************/
6133
6134 /* Enable and configure the SFlash controller */
6135 cmd->cmd = 0;
6136 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
6137 cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
6138 cmd->len = 4;
6139 cmd++;
6140
6141 /* Block on cmd ready and write CMD register */
6142 cmd->cmd = DST_CRCI_NAND_CMD;
6143 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
6144 cmd->dst = MSM_NAND_SFLASHC_CMD;
6145 cmd->len = 4;
6146 cmd++;
6147
6148 /* Write the ADDR0 and ADDR1 registers */
6149 cmd->cmd = 0;
6150 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
6151 cmd->dst = MSM_NAND_ADDR0;
6152 cmd->len = 8;
6153 cmd++;
6154
6155 /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
6156 cmd->cmd = 0;
6157 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
6158 cmd->dst = MSM_NAND_ADDR2;
6159 cmd->len = 16;
6160 cmd++;
6161
6162 /* Write the ADDR6 registers */
6163 cmd->cmd = 0;
6164 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
6165 cmd->dst = MSM_NAND_ADDR6;
6166 cmd->len = 4;
6167 cmd++;
6168
6169 /* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
6170 cmd->cmd = 0;
6171 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
6172 cmd->dst = MSM_NAND_GENP_REG0;
6173 cmd->len = 16;
6174 cmd++;
6175
6176 /* Write the FLASH_DEV_CMD4,5,6 registers */
6177 cmd->cmd = 0;
6178 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
6179 cmd->dst = MSM_NAND_DEV_CMD4;
6180 cmd->len = 12;
6181 cmd++;
6182
6183 /* Kick the execute command */
6184 cmd->cmd = 0;
6185 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
6186 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6187 cmd->len = 4;
6188 cmd++;
6189
6190 /* Block on data ready, and read the status register */
6191 cmd->cmd = SRC_CRCI_NAND_DATA;
6192 cmd->src = MSM_NAND_SFLASHC_STATUS;
6193 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
6194 cmd->len = 4;
6195 cmd++;
6196
6197 /*************************************************************/
6198 /* Wait for the interrupt from the Onenand device controller */
6199 /*************************************************************/
6200
6201 /* Block on cmd ready and write CMD register */
6202 cmd->cmd = DST_CRCI_NAND_CMD;
6203 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
6204 cmd->dst = MSM_NAND_SFLASHC_CMD;
6205 cmd->len = 4;
6206 cmd++;
6207
6208 /* Kick the execute command */
6209 cmd->cmd = 0;
6210 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
6211 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6212 cmd->len = 4;
6213 cmd++;
6214
6215 /* Block on data ready, and read the status register */
6216 cmd->cmd = SRC_CRCI_NAND_DATA;
6217 cmd->src = MSM_NAND_SFLASHC_STATUS;
6218 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
6219 cmd->len = 4;
6220 cmd++;
6221
6222 /*********************************************************/
6223 /* Read the necessary status reg from the onenand device */
6224 /*********************************************************/
6225
6226 /* Block on cmd ready and write CMD register */
6227 cmd->cmd = DST_CRCI_NAND_CMD;
6228 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
6229 cmd->dst = MSM_NAND_SFLASHC_CMD;
6230 cmd->len = 4;
6231 cmd++;
6232
6233 /* Kick the execute command */
6234 cmd->cmd = 0;
6235 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
6236 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6237 cmd->len = 4;
6238 cmd++;
6239
6240 /* Block on data ready, and read the status register */
6241 cmd->cmd = SRC_CRCI_NAND_DATA;
6242 cmd->src = MSM_NAND_SFLASHC_STATUS;
6243 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
6244 cmd->len = 4;
6245 cmd++;
6246
6247 /* Read the GENP3 register */
6248 cmd->cmd = 0;
6249 cmd->src = MSM_NAND_GENP_REG3;
6250 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
6251 cmd->len = 4;
6252 cmd++;
6253
6254 /* Read the DEVCMD4 register */
6255 cmd->cmd = 0;
6256 cmd->src = MSM_NAND_DEV_CMD4;
6257 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
6258 cmd->len = 4;
6259 cmd++;
6260
6261 /************************************************************/
6262 /* Restore the necessary registers to proper values */
6263 /************************************************************/
6264
6265 /* Block on cmd ready and write CMD register */
6266 cmd->cmd = DST_CRCI_NAND_CMD;
6267 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
6268 cmd->dst = MSM_NAND_SFLASHC_CMD;
6269 cmd->len = 4;
6270 cmd++;
6271
6272 /* Kick the execute command */
6273 cmd->cmd = 0;
6274 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
6275 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6276 cmd->len = 4;
6277 cmd++;
6278
6279 /* Block on data ready, and read the status register */
6280 cmd->cmd = SRC_CRCI_NAND_DATA;
6281 cmd->src = MSM_NAND_SFLASHC_STATUS;
6282 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
6283 cmd->len = 4;
6284 cmd++;
6285
6286
6287 BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
6288 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
6289 dma_buffer->cmd[0].cmd |= CMD_OCB;
6290 cmd[-1].cmd |= CMD_OCU | CMD_LC;
6291
6292 dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
6293 >> 3) | CMD_PTR_LP;
6294
6295 mb();
Jeff Ohlsteindc39f972011-09-02 13:55:16 -07006296 msm_dmov_exec_cmd(chip->dma_channel,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006297 DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
6298 &dma_buffer->cmdptr)));
6299 mb();
6300
6301 write_prot_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
6302 interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
6303 controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
6304
6305#if VERBOSE
6306 pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
6307 dma_buffer->data.sfstat[0],
6308 dma_buffer->data.sfstat[1],
6309 dma_buffer->data.sfstat[2],
6310 dma_buffer->data.sfstat[3]);
6311
6312 pr_info("%s: controller_status = %x\n", __func__,
6313 controller_status);
6314 pr_info("%s: interrupt_status = %x\n", __func__,
6315 interrupt_status);
6316 pr_info("%s: write_prot_status = %x\n", __func__,
6317 write_prot_status);
6318#endif
6319 /* Check for errors, protection violations etc */
6320 if ((controller_status != 0)
6321 || (dma_buffer->data.sfstat[0] & 0x110)
6322 || (dma_buffer->data.sfstat[1] & 0x110)
6323 || (dma_buffer->data.sfstat[2] & 0x110)
6324 || (dma_buffer->data.sfstat[3] & 0x110)) {
6325 pr_err("%s: ECC/MPU/OP error\n", __func__);
6326 err = -EIO;
6327 }
6328
6329 if (!(write_prot_status & ONENAND_WP_US)) {
6330 pr_err("%s: Unexpected status ofs = 0x%llx,"
6331 "wp_status = %x\n",
6332 __func__, ofs, write_prot_status);
6333 err = -EIO;
6334 }
6335
6336 if (err)
6337 break;
6338 }
6339
6340 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
6341
6342#if VERBOSE
6343 pr_info("\n%s: ret %d\n", __func__, err);
6344 pr_info("===================================================="
6345 "=============\n");
6346#endif
6347 return err;
6348}
6349
6350static int msm_onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
6351{
6352 struct msm_nand_chip *chip = mtd->priv;
6353
6354 struct {
6355 dmov_s cmd[20];
6356 unsigned cmdptr;
6357 struct {
6358 uint32_t sfbcfg;
6359 uint32_t sfcmd[4];
6360 uint32_t sfexec;
6361 uint32_t sfstat[4];
6362 uint32_t addr0;
6363 uint32_t addr1;
6364 uint32_t addr2;
6365 uint32_t addr3;
6366 uint32_t addr4;
6367 uint32_t addr5;
6368 uint32_t addr6;
6369 uint32_t data0;
6370 uint32_t data1;
6371 uint32_t data2;
6372 uint32_t data3;
6373 uint32_t data4;
6374 uint32_t data5;
6375 uint32_t data6;
6376 } data;
6377 } *dma_buffer;
6378 dmov_s *cmd;
6379
6380 int err = 0;
6381
6382 uint16_t onenand_startaddr1;
6383 uint16_t onenand_startaddr8;
6384 uint16_t onenand_startaddr2;
6385 uint16_t onenand_startblock;
6386
6387 uint16_t controller_status;
6388 uint16_t interrupt_status;
6389 uint16_t write_prot_status;
6390
6391 uint64_t start_ofs;
6392
6393#if VERBOSE
6394 pr_info("===================================================="
6395 "=============\n");
6396 pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
6397#endif
6398 /* 'ofs' & 'len' should align to block size */
6399 if (ofs&(mtd->erasesize - 1)) {
6400 pr_err("%s: Unsupported ofs address, 0x%llx\n",
6401 __func__, ofs);
6402 return -EINVAL;
6403 }
6404
6405 if (len&(mtd->erasesize - 1)) {
6406 pr_err("%s: Unsupported len, %lld\n",
6407 __func__, len);
6408 return -EINVAL;
6409 }
6410
6411 if (ofs+len > mtd->size) {
6412 pr_err("%s: Maximum chip size exceeded\n", __func__);
6413 return -EINVAL;
6414 }
6415
6416 wait_event(chip->wait_queue, (dma_buffer = msm_nand_get_dma_buffer
6417 (chip, sizeof(*dma_buffer))));
6418
6419 for (start_ofs = ofs; ofs < start_ofs+len; ofs = ofs+mtd->erasesize) {
6420#if VERBOSE
6421 pr_info("%s: ofs 0x%llx len %lld\n", __func__, ofs, len);
6422#endif
6423
6424 cmd = dma_buffer->cmd;
6425 if ((onenand_info.device_id & ONENAND_DEVICE_IS_DDP)
6426 && (ofs >= (mtd->size>>1))) { /* DDP Device */
6427 onenand_startaddr1 = DEVICE_FLASHCORE_1 |
6428 (((uint32_t)(ofs - (mtd->size>>1))
6429 / mtd->erasesize));
6430 onenand_startaddr2 = DEVICE_BUFFERRAM_1;
6431 onenand_startblock = ((uint32_t)(ofs - (mtd->size>>1))
6432 / mtd->erasesize);
6433 } else {
6434 onenand_startaddr1 = DEVICE_FLASHCORE_0 |
6435 ((uint32_t)ofs / mtd->erasesize) ;
6436 onenand_startaddr2 = DEVICE_BUFFERRAM_0;
6437 onenand_startblock = ((uint32_t)ofs
6438 / mtd->erasesize);
6439 }
6440
6441 onenand_startaddr8 = 0x0000;
6442 dma_buffer->data.sfbcfg = SFLASH_BCFG |
6443 (nand_sfcmd_mode ? 0 : (1 << 24));
6444 dma_buffer->data.sfcmd[0] = SFLASH_PREPCMD(7, 0, 0,
6445 MSM_NAND_SFCMD_CMDXS,
6446 nand_sfcmd_mode,
6447 MSM_NAND_SFCMD_REGWR);
6448 dma_buffer->data.sfcmd[1] = SFLASH_PREPCMD(0, 0, 32,
6449 MSM_NAND_SFCMD_CMDXS,
6450 nand_sfcmd_mode,
6451 MSM_NAND_SFCMD_INTHI);
6452 dma_buffer->data.sfcmd[2] = SFLASH_PREPCMD(3, 7, 0,
6453 MSM_NAND_SFCMD_DATXS,
6454 nand_sfcmd_mode,
6455 MSM_NAND_SFCMD_REGRD);
6456 dma_buffer->data.sfcmd[3] = SFLASH_PREPCMD(4, 10, 0,
6457 MSM_NAND_SFCMD_CMDXS,
6458 nand_sfcmd_mode,
6459 MSM_NAND_SFCMD_REGWR);
6460 dma_buffer->data.sfexec = 1;
6461 dma_buffer->data.sfstat[0] = CLEAN_DATA_32;
6462 dma_buffer->data.sfstat[1] = CLEAN_DATA_32;
6463 dma_buffer->data.sfstat[2] = CLEAN_DATA_32;
6464 dma_buffer->data.sfstat[3] = CLEAN_DATA_32;
6465 dma_buffer->data.addr0 = (ONENAND_INTERRUPT_STATUS << 16) |
6466 (ONENAND_SYSTEM_CONFIG_1);
6467 dma_buffer->data.addr1 = (ONENAND_START_ADDRESS_8 << 16) |
6468 (ONENAND_START_ADDRESS_1);
6469 dma_buffer->data.addr2 = (ONENAND_START_BLOCK_ADDRESS << 16) |
6470 (ONENAND_START_ADDRESS_2);
6471 dma_buffer->data.addr3 = (ONENAND_WRITE_PROT_STATUS << 16) |
6472 (ONENAND_COMMAND);
6473 dma_buffer->data.addr4 = (ONENAND_CONTROLLER_STATUS << 16) |
6474 (ONENAND_INTERRUPT_STATUS);
6475 dma_buffer->data.addr5 = (ONENAND_INTERRUPT_STATUS << 16) |
6476 (ONENAND_SYSTEM_CONFIG_1);
6477 dma_buffer->data.addr6 = (ONENAND_START_ADDRESS_3 << 16) |
6478 (ONENAND_START_ADDRESS_1);
6479 dma_buffer->data.data0 = (ONENAND_CLRINTR << 16) |
6480 (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
6481 dma_buffer->data.data1 = (onenand_startaddr8 << 16) |
6482 (onenand_startaddr1);
6483 dma_buffer->data.data2 = (onenand_startblock << 16) |
6484 (onenand_startaddr2);
6485 dma_buffer->data.data3 = (CLEAN_DATA_16 << 16) |
6486 (ONENAND_CMD_LOCK);
6487 dma_buffer->data.data4 = (CLEAN_DATA_16 << 16) |
6488 (CLEAN_DATA_16);
6489 dma_buffer->data.data5 = (ONENAND_CLRINTR << 16) |
6490 (ONENAND_SYSCFG1_ECCENA(nand_sfcmd_mode));
6491 dma_buffer->data.data6 = (ONENAND_STARTADDR3_RES << 16) |
6492 (ONENAND_STARTADDR1_RES);
6493
6494 /*************************************************************/
6495 /* Write the necessary address reg in the onenand device */
6496 /*************************************************************/
6497
6498 /* Enable and configure the SFlash controller */
6499 cmd->cmd = 0;
6500 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfbcfg);
6501 cmd->dst = MSM_NAND_SFLASHC_BURST_CFG;
6502 cmd->len = 4;
6503 cmd++;
6504
6505 /* Block on cmd ready and write CMD register */
6506 cmd->cmd = DST_CRCI_NAND_CMD;
6507 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[0]);
6508 cmd->dst = MSM_NAND_SFLASHC_CMD;
6509 cmd->len = 4;
6510 cmd++;
6511
6512 /* Write the ADDR0 and ADDR1 registers */
6513 cmd->cmd = 0;
6514 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr0);
6515 cmd->dst = MSM_NAND_ADDR0;
6516 cmd->len = 8;
6517 cmd++;
6518
6519 /* Write the ADDR2 ADDR3 ADDR4 ADDR5 registers */
6520 cmd->cmd = 0;
6521 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr2);
6522 cmd->dst = MSM_NAND_ADDR2;
6523 cmd->len = 16;
6524 cmd++;
6525
6526 /* Write the ADDR6 registers */
6527 cmd->cmd = 0;
6528 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.addr6);
6529 cmd->dst = MSM_NAND_ADDR6;
6530 cmd->len = 4;
6531 cmd++;
6532
6533 /* Write the GENP0, GENP1, GENP2, GENP3, GENP4 registers */
6534 cmd->cmd = 0;
6535 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data0);
6536 cmd->dst = MSM_NAND_GENP_REG0;
6537 cmd->len = 16;
6538 cmd++;
6539
6540 /* Write the FLASH_DEV_CMD4,5,6 registers */
6541 cmd->cmd = 0;
6542 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.data4);
6543 cmd->dst = MSM_NAND_DEV_CMD4;
6544 cmd->len = 12;
6545 cmd++;
6546
6547 /* Kick the execute command */
6548 cmd->cmd = 0;
6549 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
6550 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6551 cmd->len = 4;
6552 cmd++;
6553
6554 /* Block on data ready, and read the status register */
6555 cmd->cmd = SRC_CRCI_NAND_DATA;
6556 cmd->src = MSM_NAND_SFLASHC_STATUS;
6557 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[0]);
6558 cmd->len = 4;
6559 cmd++;
6560
6561 /*************************************************************/
6562 /* Wait for the interrupt from the Onenand device controller */
6563 /*************************************************************/
6564
6565 /* Block on cmd ready and write CMD register */
6566 cmd->cmd = DST_CRCI_NAND_CMD;
6567 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[1]);
6568 cmd->dst = MSM_NAND_SFLASHC_CMD;
6569 cmd->len = 4;
6570 cmd++;
6571
6572 /* Kick the execute command */
6573 cmd->cmd = 0;
6574 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
6575 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6576 cmd->len = 4;
6577 cmd++;
6578
6579 /* Block on data ready, and read the status register */
6580 cmd->cmd = SRC_CRCI_NAND_DATA;
6581 cmd->src = MSM_NAND_SFLASHC_STATUS;
6582 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[1]);
6583 cmd->len = 4;
6584 cmd++;
6585
6586 /*********************************************************/
6587 /* Read the necessary status reg from the onenand device */
6588 /*********************************************************/
6589
6590 /* Block on cmd ready and write CMD register */
6591 cmd->cmd = DST_CRCI_NAND_CMD;
6592 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[2]);
6593 cmd->dst = MSM_NAND_SFLASHC_CMD;
6594 cmd->len = 4;
6595 cmd++;
6596
6597 /* Kick the execute command */
6598 cmd->cmd = 0;
6599 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
6600 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6601 cmd->len = 4;
6602 cmd++;
6603
6604 /* Block on data ready, and read the status register */
6605 cmd->cmd = SRC_CRCI_NAND_DATA;
6606 cmd->src = MSM_NAND_SFLASHC_STATUS;
6607 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[2]);
6608 cmd->len = 4;
6609 cmd++;
6610
6611 /* Read the GENP3 register */
6612 cmd->cmd = 0;
6613 cmd->src = MSM_NAND_GENP_REG3;
6614 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data3);
6615 cmd->len = 4;
6616 cmd++;
6617
6618 /* Read the DEVCMD4 register */
6619 cmd->cmd = 0;
6620 cmd->src = MSM_NAND_DEV_CMD4;
6621 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.data4);
6622 cmd->len = 4;
6623 cmd++;
6624
6625 /************************************************************/
6626 /* Restore the necessary registers to proper values */
6627 /************************************************************/
6628
6629 /* Block on cmd ready and write CMD register */
6630 cmd->cmd = DST_CRCI_NAND_CMD;
6631 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfcmd[3]);
6632 cmd->dst = MSM_NAND_SFLASHC_CMD;
6633 cmd->len = 4;
6634 cmd++;
6635
6636 /* Kick the execute command */
6637 cmd->cmd = 0;
6638 cmd->src = msm_virt_to_dma(chip, &dma_buffer->data.sfexec);
6639 cmd->dst = MSM_NAND_SFLASHC_EXEC_CMD;
6640 cmd->len = 4;
6641 cmd++;
6642
6643 /* Block on data ready, and read the status register */
6644 cmd->cmd = SRC_CRCI_NAND_DATA;
6645 cmd->src = MSM_NAND_SFLASHC_STATUS;
6646 cmd->dst = msm_virt_to_dma(chip, &dma_buffer->data.sfstat[3]);
6647 cmd->len = 4;
6648 cmd++;
6649
6650
6651 BUILD_BUG_ON(20 != ARRAY_SIZE(dma_buffer->cmd));
6652 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
6653 dma_buffer->cmd[0].cmd |= CMD_OCB;
6654 cmd[-1].cmd |= CMD_OCU | CMD_LC;
6655
6656 dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd)
6657 >> 3) | CMD_PTR_LP;
6658
6659 mb();
Jeff Ohlsteindc39f972011-09-02 13:55:16 -07006660 msm_dmov_exec_cmd(chip->dma_channel,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006661 DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
6662 &dma_buffer->cmdptr)));
6663 mb();
6664
6665 write_prot_status = (dma_buffer->data.data3 >> 16) & 0x0000FFFF;
6666 interrupt_status = (dma_buffer->data.data4 >> 0) & 0x0000FFFF;
6667 controller_status = (dma_buffer->data.data4 >> 16) & 0x0000FFFF;
6668
6669#if VERBOSE
6670 pr_info("\n%s: sflash status %x %x %x %x\n", __func__,
6671 dma_buffer->data.sfstat[0],
6672 dma_buffer->data.sfstat[1],
6673 dma_buffer->data.sfstat[2],
6674 dma_buffer->data.sfstat[3]);
6675
6676 pr_info("%s: controller_status = %x\n", __func__,
6677 controller_status);
6678 pr_info("%s: interrupt_status = %x\n", __func__,
6679 interrupt_status);
6680 pr_info("%s: write_prot_status = %x\n", __func__,
6681 write_prot_status);
6682#endif
6683 /* Check for errors, protection violations etc */
6684 if ((controller_status != 0)
6685 || (dma_buffer->data.sfstat[0] & 0x110)
6686 || (dma_buffer->data.sfstat[1] & 0x110)
6687 || (dma_buffer->data.sfstat[2] & 0x110)
6688 || (dma_buffer->data.sfstat[3] & 0x110)) {
6689 pr_err("%s: ECC/MPU/OP error\n", __func__);
6690 err = -EIO;
6691 }
6692
6693 if (!(write_prot_status & ONENAND_WP_LS)) {
6694 pr_err("%s: Unexpected status ofs = 0x%llx,"
6695 "wp_status = %x\n",
6696 __func__, ofs, write_prot_status);
6697 err = -EIO;
6698 }
6699
6700 if (err)
6701 break;
6702 }
6703
6704 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
6705
6706#if VERBOSE
6707 pr_info("\n%s: ret %d\n", __func__, err);
6708 pr_info("===================================================="
6709 "=============\n");
6710#endif
6711 return err;
6712}
6713
6714static int msm_onenand_suspend(struct mtd_info *mtd)
6715{
6716 return 0;
6717}
6718
6719static void msm_onenand_resume(struct mtd_info *mtd)
6720{
6721}
6722
6723int msm_onenand_scan(struct mtd_info *mtd, int maxchips)
6724{
6725 struct msm_nand_chip *chip = mtd->priv;
6726
6727 /* Probe and check whether onenand device is present */
6728 if (flash_onenand_probe(chip))
6729 return -ENODEV;
6730
6731 mtd->size = 0x1000000 << ((onenand_info.device_id & 0xF0) >> 4);
6732 mtd->writesize = onenand_info.data_buf_size;
6733 mtd->oobsize = mtd->writesize >> 5;
6734 mtd->erasesize = mtd->writesize << 6;
6735 mtd->oobavail = msm_onenand_oob_64.oobavail;
6736 mtd->ecclayout = &msm_onenand_oob_64;
6737
6738 mtd->type = MTD_NANDFLASH;
6739 mtd->flags = MTD_CAP_NANDFLASH;
Steve Mucklef132c6c2012-06-06 18:30:57 -07006740 mtd->_erase = msm_onenand_erase;
6741 mtd->_point = NULL;
6742 mtd->_unpoint = NULL;
6743 mtd->_read = msm_onenand_read;
6744 mtd->_write = msm_onenand_write;
6745 mtd->_read_oob = msm_onenand_read_oob;
6746 mtd->_write_oob = msm_onenand_write_oob;
6747 mtd->_lock = msm_onenand_lock;
6748 mtd->_unlock = msm_onenand_unlock;
6749 mtd->_suspend = msm_onenand_suspend;
6750 mtd->_resume = msm_onenand_resume;
6751 mtd->_block_isbad = msm_onenand_block_isbad;
6752 mtd->_block_markbad = msm_onenand_block_markbad;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006753 mtd->owner = THIS_MODULE;
6754
6755 pr_info("Found a supported onenand device\n");
6756
6757 return 0;
6758}
6759
Sujit Reddy Thummadb273532012-11-22 09:46:12 +05306760static const unsigned int bch_sup_cntrl[] = {
6761 0x307, /* MSM7x2xA */
6762 0x4030, /* MDM 9x15 */
6763};
6764
6765static inline bool msm_nand_has_bch_ecc_engine(unsigned int hw_id)
6766{
6767 int i;
6768
6769 for (i = 0; i < ARRAY_SIZE(bch_sup_cntrl); i++) {
6770 if (hw_id == bch_sup_cntrl[i])
6771 return true;
6772 }
6773
6774 return false;
6775}
6776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006777/**
6778 * msm_nand_scan - [msm_nand Interface] Scan for the msm_nand device
6779 * @param mtd MTD device structure
6780 * @param maxchips Number of chips to scan for
6781 *
6782 * This fills out all the not initialized function pointers
6783 * with the defaults.
6784 * The flash ID is read and the mtd/chip structures are
6785 * filled with the appropriate values.
6786 */
6787int msm_nand_scan(struct mtd_info *mtd, int maxchips)
6788{
6789 struct msm_nand_chip *chip = mtd->priv;
6790 uint32_t flash_id = 0, i, mtd_writesize;
6791 uint8_t dev_found = 0;
6792 uint8_t wide_bus;
6793 uint32_t manid;
6794 uint32_t devid;
6795 uint32_t devcfg;
6796 struct nand_flash_dev *flashdev = NULL;
6797 struct nand_manufacturers *flashman = NULL;
Sujit Reddy Thummadb273532012-11-22 09:46:12 +05306798 unsigned int hw_id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006799
6800 /* Probe the Flash device for ONFI compliance */
6801 if (!flash_onfi_probe(chip)) {
6802 dev_found = 1;
6803 } else {
6804 /* Read the Flash ID from the Nand Flash Device */
6805 flash_id = flash_read_id(chip);
6806 manid = flash_id & 0xFF;
6807 devid = (flash_id >> 8) & 0xFF;
6808 devcfg = (flash_id >> 24) & 0xFF;
6809
6810 for (i = 0; !flashman && nand_manuf_ids[i].id; ++i)
6811 if (nand_manuf_ids[i].id == manid)
6812 flashman = &nand_manuf_ids[i];
6813 for (i = 0; !flashdev && nand_flash_ids[i].id; ++i)
6814 if (nand_flash_ids[i].id == devid)
6815 flashdev = &nand_flash_ids[i];
6816 if (!flashdev || !flashman) {
6817 pr_err("ERROR: unknown nand device manuf=%x devid=%x\n",
6818 manid, devid);
6819 return -ENOENT;
6820 } else
6821 dev_found = 1;
6822
6823 if (!flashdev->pagesize) {
6824 supported_flash.flash_id = flash_id;
6825 supported_flash.density = flashdev->chipsize << 20;
6826 supported_flash.widebus = devcfg & (1 << 6) ? 1 : 0;
6827 supported_flash.pagesize = 1024 << (devcfg & 0x3);
6828 supported_flash.blksize = (64 * 1024) <<
6829 ((devcfg >> 4) & 0x3);
Krishna Konda99e362f2012-02-29 21:16:34 -08006830 supported_flash.oobsize = (8 << ((devcfg >> 2) & 0x3)) *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006831 (supported_flash.pagesize >> 9);
Krishna Kondad9d4dae52012-02-29 21:33:14 -08006832
6833 if ((supported_flash.oobsize > 64) &&
6834 (supported_flash.pagesize == 2048)) {
6835 pr_info("msm_nand: Found a 2K page device with"
6836 " %d oobsize - changing oobsize to 64 "
6837 "bytes.\n", supported_flash.oobsize);
6838 supported_flash.oobsize = 64;
6839 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006840 } else {
6841 supported_flash.flash_id = flash_id;
6842 supported_flash.density = flashdev->chipsize << 20;
6843 supported_flash.widebus = flashdev->options &
6844 NAND_BUSWIDTH_16 ? 1 : 0;
6845 supported_flash.pagesize = flashdev->pagesize;
6846 supported_flash.blksize = flashdev->erasesize;
6847 supported_flash.oobsize = flashdev->pagesize >> 5;
6848 }
6849 }
6850
6851 if (dev_found) {
6852 (!interleave_enable) ? (i = 1) : (i = 2);
6853 wide_bus = supported_flash.widebus;
6854 mtd->size = supported_flash.density * i;
6855 mtd->writesize = supported_flash.pagesize * i;
6856 mtd->oobsize = supported_flash.oobsize * i;
6857 mtd->erasesize = supported_flash.blksize * i;
Sujit Reddy Thummaf6e83862012-05-23 13:59:57 -04006858 mtd->writebufsize = mtd->writesize;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006859
6860 if (!interleave_enable)
6861 mtd_writesize = mtd->writesize;
6862 else
6863 mtd_writesize = mtd->writesize >> 1;
6864
6865 /* Check whether controller and NAND device support 8bit ECC*/
Sujit Reddy Thummadb273532012-11-22 09:46:12 +05306866 hw_id = flash_rd_reg(chip, MSM_NAND_HW_INFO);
6867 if (msm_nand_has_bch_ecc_engine(hw_id)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006868 && (supported_flash.ecc_correctability >= 8)) {
6869 pr_info("Found supported NAND device for %dbit ECC\n",
6870 supported_flash.ecc_correctability);
6871 enable_bch_ecc = 1;
6872 } else {
6873 pr_info("Found a supported NAND device\n");
6874 }
Sujit Reddy Thummadb273532012-11-22 09:46:12 +05306875 pr_info("NAND Controller ID : 0x%x\n", hw_id);
6876 pr_info("NAND Device ID : 0x%x\n", supported_flash.flash_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006877 pr_info("Buswidth : %d Bits\n", (wide_bus) ? 16 : 8);
6878 pr_info("Density : %lld MByte\n", (mtd->size>>20));
6879 pr_info("Pagesize : %d Bytes\n", mtd->writesize);
6880 pr_info("Erasesize: %d Bytes\n", mtd->erasesize);
6881 pr_info("Oobsize : %d Bytes\n", mtd->oobsize);
6882 } else {
6883 pr_err("Unsupported Nand,Id: 0x%x \n", flash_id);
6884 return -ENODEV;
6885 }
6886
6887 /* Size of each codeword is 532Bytes incase of 8bit BCH ECC*/
6888 chip->cw_size = enable_bch_ecc ? 532 : 528;
6889 chip->CFG0 = (((mtd_writesize >> 9)-1) << 6) /* 4/8 cw/pg for 2/4k */
6890 | (516 << 9) /* 516 user data bytes */
6891 | (10 << 19) /* 10 parity bytes */
6892 | (5 << 27) /* 5 address cycles */
6893 | (0 << 30) /* Do not read status before data */
6894 | (1 << 31) /* Send read cmd */
6895 /* 0 spare bytes for 16 bit nand or 1/2 spare bytes for 8 bit */
6896 | (wide_bus ? 0 << 23 : (enable_bch_ecc ? 2 << 23 : 1 << 23));
6897
6898 chip->CFG1 = (0 << 0) /* Enable ecc */
6899 | (7 << 2) /* 8 recovery cycles */
6900 | (0 << 5) /* Allow CS deassertion */
6901 /* Bad block marker location */
6902 | ((mtd_writesize - (chip->cw_size * (
6903 (mtd_writesize >> 9) - 1)) + 1) << 6)
6904 | (0 << 16) /* Bad block in user data area */
6905 | (2 << 17) /* 6 cycle tWB/tRB */
6906 | ((wide_bus) ? CFG1_WIDE_FLASH : 0); /* Wide flash bit */
6907
6908 chip->ecc_buf_cfg = 0x203;
6909 chip->CFG0_RAW = 0xA80420C0;
6910 chip->CFG1_RAW = 0x5045D;
6911
6912 if (enable_bch_ecc) {
6913 chip->CFG1 |= (1 << 27); /* Enable BCH engine */
6914 chip->ecc_bch_cfg = (0 << 0) /* Enable ECC*/
6915 | (0 << 1) /* Enable/Disable SW reset of ECC engine */
6916 | (1 << 4) /* 8bit ecc*/
6917 | ((wide_bus) ? (14 << 8) : (13 << 8))/*parity bytes*/
6918 | (516 << 16) /* 516 user data bytes */
6919 | (1 << 30); /* Turn on ECC engine clocks always */
6920 chip->CFG0_RAW = 0xA80428C0; /* CW size is increased to 532B */
6921 }
6922
6923 /*
6924 * For 4bit RS ECC (default ECC), parity bytes = 10 (for x8 and x16 I/O)
6925 * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O).
6926 */
6927 chip->ecc_parity_bytes = enable_bch_ecc ? (wide_bus ? 14 : 13) : 10;
6928
6929 pr_info("CFG0 Init : 0x%08x\n", chip->CFG0);
6930 pr_info("CFG1 Init : 0x%08x\n", chip->CFG1);
6931 pr_info("ECCBUFCFG : 0x%08x\n", chip->ecc_buf_cfg);
6932
6933 if (mtd->oobsize == 64) {
6934 mtd->oobavail = msm_nand_oob_64.oobavail;
6935 mtd->ecclayout = &msm_nand_oob_64;
6936 } else if (mtd->oobsize == 128) {
6937 mtd->oobavail = msm_nand_oob_128.oobavail;
6938 mtd->ecclayout = &msm_nand_oob_128;
6939 } else if (mtd->oobsize == 224) {
6940 mtd->oobavail = wide_bus ? msm_nand_oob_224_x16.oobavail :
6941 msm_nand_oob_224_x8.oobavail;
6942 mtd->ecclayout = wide_bus ? &msm_nand_oob_224_x16 :
6943 &msm_nand_oob_224_x8;
6944 } else if (mtd->oobsize == 256) {
6945 mtd->oobavail = msm_nand_oob_256.oobavail;
6946 mtd->ecclayout = &msm_nand_oob_256;
6947 } else {
6948 pr_err("Unsupported Nand, oobsize: 0x%x \n",
6949 mtd->oobsize);
6950 return -ENODEV;
6951 }
6952
6953 /* Fill in remaining MTD driver data */
6954 mtd->type = MTD_NANDFLASH;
6955 mtd->flags = MTD_CAP_NANDFLASH;
6956 /* mtd->ecctype = MTD_ECC_SW; */
Steve Mucklef132c6c2012-06-06 18:30:57 -07006957 mtd->_erase = msm_nand_erase;
6958 mtd->_block_isbad = msm_nand_block_isbad;
6959 mtd->_block_markbad = msm_nand_block_markbad;
6960 mtd->_point = NULL;
6961 mtd->_unpoint = NULL;
6962 mtd->_read = msm_nand_read;
6963 mtd->_write = msm_nand_write;
6964 mtd->_read_oob = msm_nand_read_oob;
6965 mtd->_write_oob = msm_nand_write_oob;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006966 if (dual_nand_ctlr_present) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07006967 mtd->_read_oob = msm_nand_read_oob_dualnandc;
6968 mtd->_write_oob = msm_nand_write_oob_dualnandc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006969 if (interleave_enable) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07006970 mtd->_erase = msm_nand_erase_dualnandc;
6971 mtd->_block_isbad = msm_nand_block_isbad_dualnandc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006972 }
6973 }
6974
6975 /* mtd->sync = msm_nand_sync; */
Steve Mucklef132c6c2012-06-06 18:30:57 -07006976 mtd->_lock = NULL;
6977 /* mtd->_unlock = msm_nand_unlock; */
6978 mtd->_suspend = msm_nand_suspend;
6979 mtd->_resume = msm_nand_resume;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006980 mtd->owner = THIS_MODULE;
6981
6982 /* Unlock whole block */
6983 /* msm_nand_unlock_all(mtd); */
6984
6985 /* return this->scan_bbt(mtd); */
6986 return 0;
6987}
6988EXPORT_SYMBOL_GPL(msm_nand_scan);
6989
6990/**
6991 * msm_nand_release - [msm_nand Interface] Free resources held by the msm_nand device
6992 * @param mtd MTD device structure
6993 */
6994void msm_nand_release(struct mtd_info *mtd)
6995{
6996 /* struct msm_nand_chip *this = mtd->priv; */
6997
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006998 /* Deregister the device */
6999 mtd_device_unregister(mtd);
7000}
7001EXPORT_SYMBOL_GPL(msm_nand_release);
7002
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007003struct msm_nand_info {
7004 struct mtd_info mtd;
7005 struct mtd_partition *parts;
7006 struct msm_nand_chip msm_nand;
7007};
7008
7009/* duplicating the NC01 XFR contents to NC10 */
7010static int msm_nand_nc10_xfr_settings(struct mtd_info *mtd)
7011{
7012 struct msm_nand_chip *chip = mtd->priv;
7013
7014 struct {
7015 dmov_s cmd[2];
7016 unsigned cmdptr;
7017 } *dma_buffer;
7018 dmov_s *cmd;
7019
7020 wait_event(chip->wait_queue,
7021 (dma_buffer = msm_nand_get_dma_buffer(
7022 chip, sizeof(*dma_buffer))));
7023
7024 cmd = dma_buffer->cmd;
7025
7026 /* Copying XFR register contents from NC01 --> NC10 */
7027 cmd->cmd = 0;
7028 cmd->src = NC01(MSM_NAND_XFR_STEP1);
7029 cmd->dst = NC10(MSM_NAND_XFR_STEP1);
7030 cmd->len = 28;
7031 cmd++;
7032
7033 BUILD_BUG_ON(2 != ARRAY_SIZE(dma_buffer->cmd));
7034 BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
7035 dma_buffer->cmd[0].cmd |= CMD_OCB;
7036 cmd[-1].cmd |= CMD_OCU | CMD_LC;
7037 dma_buffer->cmdptr = (msm_virt_to_dma(chip, dma_buffer->cmd) >> 3)
7038 | CMD_PTR_LP;
7039
7040 mb();
Jeff Ohlsteindc39f972011-09-02 13:55:16 -07007041 msm_dmov_exec_cmd(chip->dma_channel, DMOV_CMD_PTR_LIST
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007042 | DMOV_CMD_ADDR(msm_virt_to_dma(chip,
7043 &dma_buffer->cmdptr)));
7044 mb();
7045 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
7046 return 0;
7047}
7048
Sujith Reddy Thumma5e4d9872011-08-24 09:07:55 +05307049static int setup_mtd_device(struct platform_device *pdev,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007050 struct msm_nand_info *info)
7051{
Sujith Reddy Thumma5e4d9872011-08-24 09:07:55 +05307052 int i, err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007053 struct flash_platform_data *pdata = pdev->dev.platform_data;
7054
Sujith Reddy Thumma5e4d9872011-08-24 09:07:55 +05307055 if (pdata) {
7056 for (i = 0; i < pdata->nr_parts; i++) {
7057 pdata->parts[i].offset = pdata->parts[i].offset
7058 * info->mtd.erasesize;
7059 pdata->parts[i].size = pdata->parts[i].size
7060 * info->mtd.erasesize;
7061 }
7062 err = mtd_device_register(&info->mtd, pdata->parts,
7063 pdata->nr_parts);
7064 } else {
7065 err = mtd_device_register(&info->mtd, NULL, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007066 }
Sujith Reddy Thumma5e4d9872011-08-24 09:07:55 +05307067 return err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007068}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007069
7070static int __devinit msm_nand_probe(struct platform_device *pdev)
7071{
7072 struct msm_nand_info *info;
7073 struct resource *res;
7074 int err;
7075 struct flash_platform_data *plat_data;
7076
7077 plat_data = pdev->dev.platform_data;
7078
7079 res = platform_get_resource_byname(pdev,
7080 IORESOURCE_MEM, "msm_nand_phys");
7081 if (!res || !res->start) {
7082 pr_err("%s: msm_nand_phys resource invalid/absent\n",
7083 __func__);
7084 return -ENODEV;
7085 }
7086 msm_nand_phys = res->start;
7087 pr_info("%s: phys addr 0x%lx \n", __func__, msm_nand_phys);
7088
7089 res = platform_get_resource_byname(pdev,
7090 IORESOURCE_MEM, "msm_nandc01_phys");
7091 if (!res || !res->start)
7092 goto no_dual_nand_ctlr_support;
7093 msm_nandc01_phys = res->start;
7094
7095 res = platform_get_resource_byname(pdev,
7096 IORESOURCE_MEM, "msm_nandc10_phys");
7097 if (!res || !res->start)
7098 goto no_dual_nand_ctlr_support;
7099 msm_nandc10_phys = res->start;
7100
7101 res = platform_get_resource_byname(pdev,
7102 IORESOURCE_MEM, "msm_nandc11_phys");
7103 if (!res || !res->start)
7104 goto no_dual_nand_ctlr_support;
7105 msm_nandc11_phys = res->start;
7106
7107 res = platform_get_resource_byname(pdev,
7108 IORESOURCE_MEM, "ebi2_reg_base");
7109 if (!res || !res->start)
7110 goto no_dual_nand_ctlr_support;
7111 ebi2_register_base = res->start;
7112
7113 dual_nand_ctlr_present = 1;
7114 if (plat_data != NULL)
7115 interleave_enable = plat_data->interleave;
7116 else
7117 interleave_enable = 0;
7118
7119 if (!interleave_enable)
7120 pr_info("%s: Dual Nand Ctrl in ping-pong mode\n", __func__);
7121 else
7122 pr_info("%s: Dual Nand Ctrl in interleave mode\n", __func__);
7123
7124no_dual_nand_ctlr_support:
7125 res = platform_get_resource_byname(pdev,
7126 IORESOURCE_DMA, "msm_nand_dmac");
7127 if (!res || !res->start) {
7128 pr_err("%s: invalid msm_nand_dmac resource\n", __func__);
7129 return -ENODEV;
7130 }
7131
7132 info = kzalloc(sizeof(struct msm_nand_info), GFP_KERNEL);
7133 if (!info) {
7134 pr_err("%s: No memory for msm_nand_info\n", __func__);
7135 return -ENOMEM;
7136 }
7137
7138 info->msm_nand.dev = &pdev->dev;
7139
7140 init_waitqueue_head(&info->msm_nand.wait_queue);
7141
7142 info->msm_nand.dma_channel = res->start;
7143 pr_info("%s: dmac 0x%x\n", __func__, info->msm_nand.dma_channel);
7144
7145 /* this currently fails if dev is passed in */
7146 info->msm_nand.dma_buffer =
7147 dma_alloc_coherent(/*dev*/ NULL, MSM_NAND_DMA_BUFFER_SIZE,
7148 &info->msm_nand.dma_addr, GFP_KERNEL);
7149 if (info->msm_nand.dma_buffer == NULL) {
7150 pr_err("%s: No memory for msm_nand.dma_buffer\n", __func__);
7151 err = -ENOMEM;
7152 goto out_free_info;
7153 }
7154
7155 pr_info("%s: allocated dma buffer at %p, dma_addr %x\n",
7156 __func__, info->msm_nand.dma_buffer, info->msm_nand.dma_addr);
7157
Sujit Reddy Thummaec9b3252012-04-23 15:53:45 +05307158 /* Let default be VERSION_1 for backward compatibility */
7159 info->msm_nand.uncorrectable_bit_mask = BIT(3);
7160 info->msm_nand.num_err_mask = 0x7;
7161
7162 if (plat_data && (plat_data->version == VERSION_2)) {
7163 info->msm_nand.uncorrectable_bit_mask = BIT(8);
7164 info->msm_nand.num_err_mask = 0x1F;
7165 }
7166
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007167 info->mtd.name = dev_name(&pdev->dev);
7168 info->mtd.priv = &info->msm_nand;
7169 info->mtd.owner = THIS_MODULE;
7170
7171 /* config ebi2_cfg register only for ping pong mode!!! */
7172 if (!interleave_enable && dual_nand_ctlr_present)
7173 flash_wr_reg(&info->msm_nand, EBI2_CFG_REG, 0x4010080);
7174
7175 if (dual_nand_ctlr_present)
7176 msm_nand_nc10_xfr_settings(&info->mtd);
7177
7178 if (msm_nand_scan(&info->mtd, 1))
7179 if (msm_onenand_scan(&info->mtd, 1)) {
7180 pr_err("%s: No nand device found\n", __func__);
7181 err = -ENXIO;
7182 goto out_free_dma_buffer;
7183 }
7184
Sujith Reddy Thumma5e4d9872011-08-24 09:07:55 +05307185 err = setup_mtd_device(pdev, info);
7186 if (err < 0) {
7187 pr_err("%s: setup_mtd_device failed with err=%d\n",
7188 __func__, err);
7189 goto out_free_dma_buffer;
7190 }
7191
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007192 dev_set_drvdata(&pdev->dev, info);
7193
7194 return 0;
7195
7196out_free_dma_buffer:
7197 dma_free_coherent(NULL, MSM_NAND_DMA_BUFFER_SIZE,
7198 info->msm_nand.dma_buffer,
7199 info->msm_nand.dma_addr);
7200out_free_info:
7201 kfree(info);
7202
7203 return err;
7204}
7205
7206static int __devexit msm_nand_remove(struct platform_device *pdev)
7207{
7208 struct msm_nand_info *info = dev_get_drvdata(&pdev->dev);
7209
7210 dev_set_drvdata(&pdev->dev, NULL);
7211
7212 if (info) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007213 msm_nand_release(&info->mtd);
7214 dma_free_coherent(NULL, MSM_NAND_DMA_BUFFER_SIZE,
7215 info->msm_nand.dma_buffer,
7216 info->msm_nand.dma_addr);
7217 kfree(info);
7218 }
7219
7220 return 0;
7221}
7222
7223#define DRIVER_NAME "msm_nand"
7224
7225static struct platform_driver msm_nand_driver = {
7226 .probe = msm_nand_probe,
7227 .remove = __devexit_p(msm_nand_remove),
7228 .driver = {
7229 .name = DRIVER_NAME,
7230 }
7231};
7232
7233MODULE_ALIAS(DRIVER_NAME);
7234
7235static int __init msm_nand_init(void)
7236{
7237 return platform_driver_register(&msm_nand_driver);
7238}
7239
7240static void __exit msm_nand_exit(void)
7241{
7242 platform_driver_unregister(&msm_nand_driver);
7243}
7244
7245module_init(msm_nand_init);
7246module_exit(msm_nand_exit);
7247
7248MODULE_LICENSE("GPL");
7249MODULE_DESCRIPTION("msm_nand flash driver code");