blob: c41d2780eaee18a3362190f4cfe1f83db99b7626 [file] [log] [blame]
Jason Robertsce082592010-05-13 15:57:33 +01001/*
2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include <linux/interrupt.h>
21#include <linux/delay.h>
22#include <linux/wait.h>
23#include <linux/mutex.h>
David Millerb8664b32010-08-04 22:57:51 -070024#include <linux/slab.h>
Jason Robertsce082592010-05-13 15:57:33 +010025#include <linux/pci.h>
26#include <linux/mtd/mtd.h>
27#include <linux/module.h>
28
29#include "denali.h"
30
31MODULE_LICENSE("GPL");
32
Chuanxiao5bac3ac2010-08-05 23:06:04 +080033/* We define a module parameter that allows the user to override
Jason Robertsce082592010-05-13 15:57:33 +010034 * the hardware and decide what timing mode should be used.
35 */
36#define NAND_DEFAULT_TIMINGS -1
37
38static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
39module_param(onfi_timing_mode, int, S_IRUGO);
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +080040MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting."
41 " -1 indicates use default timings");
Jason Robertsce082592010-05-13 15:57:33 +010042
43#define DENALI_NAND_NAME "denali-nand"
44
45/* We define a macro here that combines all interrupts this driver uses into
46 * a single constant value, for convenience. */
47#define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \
48 INTR_STATUS0__ECC_TRANSACTION_DONE | \
49 INTR_STATUS0__ECC_ERR | \
50 INTR_STATUS0__PROGRAM_FAIL | \
51 INTR_STATUS0__LOAD_COMP | \
52 INTR_STATUS0__PROGRAM_COMP | \
53 INTR_STATUS0__TIME_OUT | \
54 INTR_STATUS0__ERASE_FAIL | \
55 INTR_STATUS0__RST_COMP | \
56 INTR_STATUS0__ERASE_COMP)
57
Chuanxiao5bac3ac2010-08-05 23:06:04 +080058/* indicates whether or not the internal value for the flash bank is
Jason Robertsce082592010-05-13 15:57:33 +010059 valid or not */
Chuanxiao5bac3ac2010-08-05 23:06:04 +080060#define CHIP_SELECT_INVALID -1
Jason Robertsce082592010-05-13 15:57:33 +010061
62#define SUPPORT_8BITECC 1
63
Chuanxiao5bac3ac2010-08-05 23:06:04 +080064/* This macro divides two integers and rounds fractional values up
Jason Robertsce082592010-05-13 15:57:33 +010065 * to the nearest integer value. */
66#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
67
68/* this macro allows us to convert from an MTD structure to our own
69 * device context (denali) structure.
70 */
71#define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
72
73/* These constants are defined by the driver to enable common driver
74 configuration options. */
75#define SPARE_ACCESS 0x41
76#define MAIN_ACCESS 0x42
77#define MAIN_SPARE_ACCESS 0x43
78
79#define DENALI_READ 0
80#define DENALI_WRITE 0x100
81
82/* types of device accesses. We can issue commands and get status */
83#define COMMAND_CYCLE 0
84#define ADDR_CYCLE 1
85#define STATUS_CYCLE 2
86
Chuanxiao5bac3ac2010-08-05 23:06:04 +080087/* this is a helper macro that allows us to
Jason Robertsce082592010-05-13 15:57:33 +010088 * format the bank into the proper bits for the controller */
89#define BANK(x) ((x) << 24)
90
91/* List of platforms this NAND controller has be integrated into */
92static const struct pci_device_id denali_pci_ids[] = {
93 { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
94 { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
95 { /* end: all zeroes */ }
96};
97
98
Chuanxiao5bac3ac2010-08-05 23:06:04 +080099/* these are static lookup tables that give us easy access to
100 registers in the NAND controller.
Jason Robertsce082592010-05-13 15:57:33 +0100101 */
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800102static const uint32_t intr_status_addresses[4] = {INTR_STATUS0,
103 INTR_STATUS1,
104 INTR_STATUS2,
Jason Robertsce082592010-05-13 15:57:33 +0100105 INTR_STATUS3};
106
107static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0,
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800108 DEVICE_RESET__BANK1,
109 DEVICE_RESET__BANK2,
110 DEVICE_RESET__BANK3};
Jason Robertsce082592010-05-13 15:57:33 +0100111
112static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT,
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800113 INTR_STATUS1__TIME_OUT,
114 INTR_STATUS2__TIME_OUT,
115 INTR_STATUS3__TIME_OUT};
Jason Robertsce082592010-05-13 15:57:33 +0100116
117static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP,
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800118 INTR_STATUS1__RST_COMP,
119 INTR_STATUS2__RST_COMP,
120 INTR_STATUS3__RST_COMP};
Jason Robertsce082592010-05-13 15:57:33 +0100121
122/* specifies the debug level of the driver */
Chuanxiao Donga99d1792010-07-27 11:32:21 +0800123static int nand_debug_level;
Jason Robertsce082592010-05-13 15:57:33 +0100124
125/* forward declarations */
126static void clear_interrupts(struct denali_nand_info *denali);
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800127static uint32_t wait_for_irq(struct denali_nand_info *denali,
128 uint32_t irq_mask);
129static void denali_irq_enable(struct denali_nand_info *denali,
130 uint32_t int_mask);
Jason Robertsce082592010-05-13 15:57:33 +0100131static uint32_t read_interrupt_status(struct denali_nand_info *denali);
132
133#define DEBUG_DENALI 0
134
135/* This is a wrapper for writing to the denali registers.
136 * this allows us to create debug information so we can
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800137 * observe how the driver is programming the device.
Jason Robertsce082592010-05-13 15:57:33 +0100138 * it uses standard linux convention for (val, addr) */
139static void denali_write32(uint32_t value, void *addr)
140{
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800141 iowrite32(value, addr);
Jason Robertsce082592010-05-13 15:57:33 +0100142
143#if DEBUG_DENALI
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800144 printk(KERN_INFO "wrote: 0x%x -> 0x%x\n", value,
145 (uint32_t)((uint32_t)addr & 0x1fff));
Jason Robertsce082592010-05-13 15:57:33 +0100146#endif
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800147}
Jason Robertsce082592010-05-13 15:57:33 +0100148
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800149/* Certain operations for the denali NAND controller use
150 * an indexed mode to read/write data. The operation is
151 * performed by writing the address value of the command
152 * to the device memory followed by the data. This function
153 * abstracts this common operation.
Jason Robertsce082592010-05-13 15:57:33 +0100154*/
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800155static void index_addr(struct denali_nand_info *denali,
156 uint32_t address, uint32_t data)
Jason Robertsce082592010-05-13 15:57:33 +0100157{
158 denali_write32(address, denali->flash_mem);
159 denali_write32(data, denali->flash_mem + 0x10);
160}
161
162/* Perform an indexed read of the device */
163static void index_addr_read_data(struct denali_nand_info *denali,
164 uint32_t address, uint32_t *pdata)
165{
166 denali_write32(address, denali->flash_mem);
167 *pdata = ioread32(denali->flash_mem + 0x10);
168}
169
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800170/* We need to buffer some data for some of the NAND core routines.
Jason Robertsce082592010-05-13 15:57:33 +0100171 * The operations manage buffering that data. */
172static void reset_buf(struct denali_nand_info *denali)
173{
174 denali->buf.head = denali->buf.tail = 0;
175}
176
177static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
178{
179 BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf));
180 denali->buf.buf[denali->buf.tail++] = byte;
181}
182
183/* reads the status of the device */
184static void read_status(struct denali_nand_info *denali)
185{
186 uint32_t cmd = 0x0;
187
188 /* initialize the data buffer to store status */
189 reset_buf(denali);
190
191 /* initiate a device status read */
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800192 cmd = MODE_11 | BANK(denali->flash_bank);
Jason Robertsce082592010-05-13 15:57:33 +0100193 index_addr(denali, cmd | COMMAND_CYCLE, 0x70);
194 denali_write32(cmd | STATUS_CYCLE, denali->flash_mem);
195
196 /* update buffer with status value */
197 write_byte_to_buf(denali, ioread32(denali->flash_mem + 0x10));
198
199#if DEBUG_DENALI
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800200 printk(KERN_INFO "device reporting status value of 0x%2x\n",
201 denali->buf.buf[0]);
Jason Robertsce082592010-05-13 15:57:33 +0100202#endif
203}
204
205/* resets a specific device connected to the core */
206static void reset_bank(struct denali_nand_info *denali)
207{
208 uint32_t irq_status = 0;
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800209 uint32_t irq_mask = reset_complete[denali->flash_bank] |
Jason Robertsce082592010-05-13 15:57:33 +0100210 operation_timeout[denali->flash_bank];
211 int bank = 0;
212
213 clear_interrupts(denali);
214
215 bank = device_reset_banks[denali->flash_bank];
216 denali_write32(bank, denali->flash_reg + DEVICE_RESET);
217
218 irq_status = wait_for_irq(denali, irq_mask);
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800219
Jason Robertsce082592010-05-13 15:57:33 +0100220 if (irq_status & operation_timeout[denali->flash_bank])
Jason Robertsce082592010-05-13 15:57:33 +0100221 printk(KERN_ERR "reset bank failed.\n");
Jason Robertsce082592010-05-13 15:57:33 +0100222}
223
224/* Reset the flash controller */
Chuanxiao Dongeda936e2010-07-27 14:17:37 +0800225static uint16_t denali_nand_reset(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100226{
227 uint32_t i;
228
229 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
230 __FILE__, __LINE__, __func__);
231
232 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
233 denali_write32(reset_complete[i] | operation_timeout[i],
234 denali->flash_reg + intr_status_addresses[i]);
235
236 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800237 denali_write32(device_reset_banks[i],
238 denali->flash_reg + DEVICE_RESET);
239 while (!(ioread32(denali->flash_reg +
240 intr_status_addresses[i]) &
Jason Robertsce082592010-05-13 15:57:33 +0100241 (reset_complete[i] | operation_timeout[i])))
242 ;
243 if (ioread32(denali->flash_reg + intr_status_addresses[i]) &
244 operation_timeout[i])
245 nand_dbg_print(NAND_DBG_WARN,
246 "NAND Reset operation timed out on bank %d\n", i);
247 }
248
249 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
250 denali_write32(reset_complete[i] | operation_timeout[i],
251 denali->flash_reg + intr_status_addresses[i]);
252
253 return PASS;
254}
255
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800256/* this routine calculates the ONFI timing values for a given mode and
257 * programs the clocking register accordingly. The mode is determined by
258 * the get_onfi_nand_para routine.
Jason Robertsce082592010-05-13 15:57:33 +0100259 */
Chuanxiao Dongeda936e2010-07-27 14:17:37 +0800260static void nand_onfi_timing_set(struct denali_nand_info *denali,
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800261 uint16_t mode)
Jason Robertsce082592010-05-13 15:57:33 +0100262{
263 uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
264 uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
265 uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
266 uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
267 uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
268 uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
269 uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
270 uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
271 uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
272 uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
273 uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
274 uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
275
276 uint16_t TclsRising = 1;
277 uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
278 uint16_t dv_window = 0;
279 uint16_t en_lo, en_hi;
280 uint16_t acc_clks;
281 uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
282
283 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
284 __FILE__, __LINE__, __func__);
285
286 en_lo = CEIL_DIV(Trp[mode], CLK_X);
287 en_hi = CEIL_DIV(Treh[mode], CLK_X);
288#if ONFI_BLOOM_TIME
289 if ((en_hi * CLK_X) < (Treh[mode] + 2))
290 en_hi++;
291#endif
292
293 if ((en_lo + en_hi) * CLK_X < Trc[mode])
294 en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
295
296 if ((en_lo + en_hi) < CLK_MULTI)
297 en_lo += CLK_MULTI - en_lo - en_hi;
298
299 while (dv_window < 8) {
300 data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
301
302 data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
303
304 data_invalid =
305 data_invalid_rhoh <
306 data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
307
308 dv_window = data_invalid - Trea[mode];
309
310 if (dv_window < 8)
311 en_lo++;
312 }
313
314 acc_clks = CEIL_DIV(Trea[mode], CLK_X);
315
316 while (((acc_clks * CLK_X) - Trea[mode]) < 3)
317 acc_clks++;
318
319 if ((data_invalid - acc_clks * CLK_X) < 2)
320 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
321 __FILE__, __LINE__);
322
323 addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
324 re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
325 re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
326 we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
327 cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
328 if (!TclsRising)
329 cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
330 if (cs_cnt == 0)
331 cs_cnt = 1;
332
333 if (Tcea[mode]) {
334 while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
335 cs_cnt++;
336 }
337
338#if MODE5_WORKAROUND
339 if (mode == 5)
340 acc_clks = 5;
341#endif
342
343 /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
344 if ((ioread32(denali->flash_reg + MANUFACTURER_ID) == 0) &&
345 (ioread32(denali->flash_reg + DEVICE_ID) == 0x88))
346 acc_clks = 6;
347
348 denali_write32(acc_clks, denali->flash_reg + ACC_CLKS);
349 denali_write32(re_2_we, denali->flash_reg + RE_2_WE);
350 denali_write32(re_2_re, denali->flash_reg + RE_2_RE);
351 denali_write32(we_2_re, denali->flash_reg + WE_2_RE);
352 denali_write32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
353 denali_write32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
354 denali_write32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
355 denali_write32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
356}
357
358/* configures the initial ECC settings for the controller */
359static void set_ecc_config(struct denali_nand_info *denali)
360{
361#if SUPPORT_8BITECC
362 if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
363 (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) <= 128))
364 denali_write32(8, denali->flash_reg + ECC_CORRECTION);
365#endif
366
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800367 if ((ioread32(denali->flash_reg + ECC_CORRECTION) &
368 ECC_CORRECTION__VALUE) == 1) {
Jason Robertsce082592010-05-13 15:57:33 +0100369 denali->dev_info.wECCBytesPerSector = 4;
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800370 denali->dev_info.wECCBytesPerSector *=
371 denali->dev_info.wDevicesConnected;
Jason Robertsce082592010-05-13 15:57:33 +0100372 denali->dev_info.wNumPageSpareFlag =
373 denali->dev_info.wPageSpareSize -
374 denali->dev_info.wPageDataSize /
375 (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
376 denali->dev_info.wECCBytesPerSector
377 - denali->dev_info.wSpareSkipBytes;
378 } else {
379 denali->dev_info.wECCBytesPerSector =
380 (ioread32(denali->flash_reg + ECC_CORRECTION) &
381 ECC_CORRECTION__VALUE) * 13 / 8;
382 if ((denali->dev_info.wECCBytesPerSector) % 2 == 0)
383 denali->dev_info.wECCBytesPerSector += 2;
384 else
385 denali->dev_info.wECCBytesPerSector += 1;
386
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800387 denali->dev_info.wECCBytesPerSector *=
388 denali->dev_info.wDevicesConnected;
389 denali->dev_info.wNumPageSpareFlag =
390 denali->dev_info.wPageSpareSize -
Jason Robertsce082592010-05-13 15:57:33 +0100391 denali->dev_info.wPageDataSize /
392 (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
393 denali->dev_info.wECCBytesPerSector
394 - denali->dev_info.wSpareSkipBytes;
395 }
396}
397
398/* queries the NAND device to see what ONFI modes it supports. */
399static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
400{
401 int i;
402 uint16_t blks_lun_l, blks_lun_h, n_of_luns;
403 uint32_t blockperlun, id;
404
405 denali_write32(DEVICE_RESET__BANK0, denali->flash_reg + DEVICE_RESET);
406
407 while (!((ioread32(denali->flash_reg + INTR_STATUS0) &
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800408 INTR_STATUS0__RST_COMP) |
409 (ioread32(denali->flash_reg + INTR_STATUS0) &
410 INTR_STATUS0__TIME_OUT)))
Jason Robertsce082592010-05-13 15:57:33 +0100411 ;
412
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800413 if (ioread32(denali->flash_reg + INTR_STATUS0) &
414 INTR_STATUS0__RST_COMP) {
415 denali_write32(DEVICE_RESET__BANK1,
416 denali->flash_reg + DEVICE_RESET);
Jason Robertsce082592010-05-13 15:57:33 +0100417 while (!((ioread32(denali->flash_reg + INTR_STATUS1) &
418 INTR_STATUS1__RST_COMP) |
419 (ioread32(denali->flash_reg + INTR_STATUS1) &
420 INTR_STATUS1__TIME_OUT)))
421 ;
422
423 if (ioread32(denali->flash_reg + INTR_STATUS1) &
424 INTR_STATUS1__RST_COMP) {
425 denali_write32(DEVICE_RESET__BANK2,
426 denali->flash_reg + DEVICE_RESET);
427 while (!((ioread32(denali->flash_reg + INTR_STATUS2) &
428 INTR_STATUS2__RST_COMP) |
429 (ioread32(denali->flash_reg + INTR_STATUS2) &
430 INTR_STATUS2__TIME_OUT)))
431 ;
432
433 if (ioread32(denali->flash_reg + INTR_STATUS2) &
434 INTR_STATUS2__RST_COMP) {
435 denali_write32(DEVICE_RESET__BANK3,
436 denali->flash_reg + DEVICE_RESET);
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800437 while (!((ioread32(denali->flash_reg +
438 INTR_STATUS3) &
439 INTR_STATUS3__RST_COMP) |
440 (ioread32(denali->flash_reg +
441 INTR_STATUS3) &
442 INTR_STATUS3__TIME_OUT)))
Jason Robertsce082592010-05-13 15:57:33 +0100443 ;
444 } else {
445 printk(KERN_ERR "Getting a time out for bank 2!\n");
446 }
447 } else {
448 printk(KERN_ERR "Getting a time out for bank 1!\n");
449 }
450 }
451
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800452 denali_write32(INTR_STATUS0__TIME_OUT,
453 denali->flash_reg + INTR_STATUS0);
454 denali_write32(INTR_STATUS1__TIME_OUT,
455 denali->flash_reg + INTR_STATUS1);
456 denali_write32(INTR_STATUS2__TIME_OUT,
457 denali->flash_reg + INTR_STATUS2);
458 denali_write32(INTR_STATUS3__TIME_OUT,
459 denali->flash_reg + INTR_STATUS3);
Jason Robertsce082592010-05-13 15:57:33 +0100460
461 denali->dev_info.wONFIDevFeatures =
462 ioread32(denali->flash_reg + ONFI_DEVICE_FEATURES);
463 denali->dev_info.wONFIOptCommands =
464 ioread32(denali->flash_reg + ONFI_OPTIONAL_COMMANDS);
465 denali->dev_info.wONFITimingMode =
466 ioread32(denali->flash_reg + ONFI_TIMING_MODE);
467 denali->dev_info.wONFIPgmCacheTimingMode =
468 ioread32(denali->flash_reg + ONFI_PGM_CACHE_TIMING_MODE);
469
470 n_of_luns = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
471 ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800472 blks_lun_l = ioread32(denali->flash_reg +
473 ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
474 blks_lun_h = ioread32(denali->flash_reg +
475 ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
Jason Robertsce082592010-05-13 15:57:33 +0100476
477 blockperlun = (blks_lun_h << 16) | blks_lun_l;
478
479 denali->dev_info.wTotalBlocks = n_of_luns * blockperlun;
480
481 if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
482 ONFI_TIMING_MODE__VALUE))
483 return FAIL;
484
485 for (i = 5; i > 0; i--) {
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800486 if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
487 (0x01 << i))
Jason Robertsce082592010-05-13 15:57:33 +0100488 break;
489 }
490
Chuanxiao Dongeda936e2010-07-27 14:17:37 +0800491 nand_onfi_timing_set(denali, i);
Jason Robertsce082592010-05-13 15:57:33 +0100492
493 index_addr(denali, MODE_11 | 0, 0x90);
494 index_addr(denali, MODE_11 | 1, 0);
495
496 for (i = 0; i < 3; i++)
497 index_addr_read_data(denali, MODE_11 | 2, &id);
498
499 nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
500
501 denali->dev_info.MLCDevice = id & 0x0C;
502
503 /* By now, all the ONFI devices we know support the page cache */
504 /* rw feature. So here we enable the pipeline_rw_ahead feature */
505 /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
506 /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */
507
508 return PASS;
509}
510
511static void get_samsung_nand_para(struct denali_nand_info *denali)
512{
513 uint8_t no_of_planes;
514 uint32_t blk_size;
515 uint64_t plane_size, capacity;
516 uint32_t id_bytes[5];
517 int i;
518
519 index_addr(denali, (uint32_t)(MODE_11 | 0), 0x90);
520 index_addr(denali, (uint32_t)(MODE_11 | 1), 0);
521 for (i = 0; i < 5; i++)
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800522 index_addr_read_data(denali, (uint32_t)(MODE_11 | 2),
523 &id_bytes[i]);
Jason Robertsce082592010-05-13 15:57:33 +0100524
525 nand_dbg_print(NAND_DBG_DEBUG,
526 "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
527 id_bytes[0], id_bytes[1], id_bytes[2],
528 id_bytes[3], id_bytes[4]);
529
530 if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
531 /* Set timing register values according to datasheet */
532 denali_write32(5, denali->flash_reg + ACC_CLKS);
533 denali_write32(20, denali->flash_reg + RE_2_WE);
534 denali_write32(12, denali->flash_reg + WE_2_RE);
535 denali_write32(14, denali->flash_reg + ADDR_2_DATA);
536 denali_write32(3, denali->flash_reg + RDWR_EN_LO_CNT);
537 denali_write32(2, denali->flash_reg + RDWR_EN_HI_CNT);
538 denali_write32(2, denali->flash_reg + CS_SETUP_CNT);
539 }
540
541 no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
542 plane_size = (uint64_t)64 << ((id_bytes[4] & 0x70) >> 4);
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800543 blk_size = 64 << ((ioread32(denali->flash_reg + DEVICE_PARAM_1) &
544 0x30) >> 4);
Jason Robertsce082592010-05-13 15:57:33 +0100545 capacity = (uint64_t)128 * plane_size * no_of_planes;
546
547 do_div(capacity, blk_size);
548 denali->dev_info.wTotalBlocks = capacity;
549}
550
551static void get_toshiba_nand_para(struct denali_nand_info *denali)
552{
Jason Robertsce082592010-05-13 15:57:33 +0100553 uint32_t tmp;
554
555 /* Workaround to fix a controller bug which reports a wrong */
556 /* spare area size for some kind of Toshiba NAND device */
557 if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
558 (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
559 denali_write32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
560 tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
561 ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800562 denali_write32(tmp,
563 denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
Jason Robertsce082592010-05-13 15:57:33 +0100564#if SUPPORT_15BITECC
565 denali_write32(15, denali->flash_reg + ECC_CORRECTION);
566#elif SUPPORT_8BITECC
567 denali_write32(8, denali->flash_reg + ECC_CORRECTION);
568#endif
569 }
Jason Robertsce082592010-05-13 15:57:33 +0100570}
571
Chuanxiao Dongef41e1b2010-08-06 00:48:49 +0800572static void get_hynix_nand_para(struct denali_nand_info *denali,
573 uint8_t device_id)
Jason Robertsce082592010-05-13 15:57:33 +0100574{
Jason Robertsce082592010-05-13 15:57:33 +0100575 uint32_t main_size, spare_size;
576
Chuanxiao Dongef41e1b2010-08-06 00:48:49 +0800577 switch (device_id) {
Jason Robertsce082592010-05-13 15:57:33 +0100578 case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
579 case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
580 denali_write32(128, denali->flash_reg + PAGES_PER_BLOCK);
581 denali_write32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
582 denali_write32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800583 main_size = 4096 *
584 ioread32(denali->flash_reg + DEVICES_CONNECTED);
585 spare_size = 224 *
586 ioread32(denali->flash_reg + DEVICES_CONNECTED);
587 denali_write32(main_size,
588 denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
589 denali_write32(spare_size,
590 denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
Jason Robertsce082592010-05-13 15:57:33 +0100591 denali_write32(0, denali->flash_reg + DEVICE_WIDTH);
592#if SUPPORT_15BITECC
593 denali_write32(15, denali->flash_reg + ECC_CORRECTION);
594#elif SUPPORT_8BITECC
595 denali_write32(8, denali->flash_reg + ECC_CORRECTION);
596#endif
597 denali->dev_info.MLCDevice = 1;
598 break;
599 default:
600 nand_dbg_print(NAND_DBG_WARN,
601 "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
602 "Will use default parameter values instead.\n",
603 denali->dev_info.wDeviceID);
604 }
Jason Robertsce082592010-05-13 15:57:33 +0100605}
606
607/* determines how many NAND chips are connected to the controller. Note for
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800608 Intel CE4100 devices we don't support more than one device.
Jason Robertsce082592010-05-13 15:57:33 +0100609 */
610static void find_valid_banks(struct denali_nand_info *denali)
611{
612 uint32_t id[LLD_MAX_FLASH_BANKS];
613 int i;
614
615 denali->total_used_banks = 1;
616 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
617 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
618 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800619 index_addr_read_data(denali,
620 (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
Jason Robertsce082592010-05-13 15:57:33 +0100621
622 nand_dbg_print(NAND_DBG_DEBUG,
623 "Return 1st ID for bank[%d]: %x\n", i, id[i]);
624
625 if (i == 0) {
626 if (!(id[i] & 0x0ff))
627 break; /* WTF? */
628 } else {
629 if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
630 denali->total_used_banks++;
631 else
632 break;
633 }
634 }
635
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800636 if (denali->platform == INTEL_CE4100) {
Jason Robertsce082592010-05-13 15:57:33 +0100637 /* Platform limitations of the CE4100 device limit
638 * users to a single chip solution for NAND.
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800639 * Multichip support is not enabled.
640 */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800641 if (denali->total_used_banks != 1) {
Jason Robertsce082592010-05-13 15:57:33 +0100642 printk(KERN_ERR "Sorry, Intel CE4100 only supports "
643 "a single NAND device.\n");
644 BUG();
645 }
646 }
647 nand_dbg_print(NAND_DBG_DEBUG,
648 "denali->total_used_banks: %d\n", denali->total_used_banks);
649}
650
651static void detect_partition_feature(struct denali_nand_info *denali)
652{
653 if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
654 if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) &
655 PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
656 denali->dev_info.wSpectraStartBlock =
657 ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
658 MIN_MAX_BANK_1__MIN_VALUE) *
659 denali->dev_info.wTotalBlocks)
660 +
661 (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) &
662 MIN_BLK_ADDR_1__VALUE);
663
664 denali->dev_info.wSpectraEndBlock =
665 (((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
666 MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
667 denali->dev_info.wTotalBlocks)
668 +
669 (ioread32(denali->flash_reg + MAX_BLK_ADDR_1) &
670 MAX_BLK_ADDR_1__VALUE);
671
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800672 denali->dev_info.wTotalBlocks *=
673 denali->total_used_banks;
Jason Robertsce082592010-05-13 15:57:33 +0100674
675 if (denali->dev_info.wSpectraEndBlock >=
676 denali->dev_info.wTotalBlocks) {
677 denali->dev_info.wSpectraEndBlock =
678 denali->dev_info.wTotalBlocks - 1;
679 }
680
681 denali->dev_info.wDataBlockNum =
682 denali->dev_info.wSpectraEndBlock -
683 denali->dev_info.wSpectraStartBlock + 1;
684 } else {
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800685 denali->dev_info.wTotalBlocks *=
686 denali->total_used_banks;
687 denali->dev_info.wSpectraStartBlock =
688 SPECTRA_START_BLOCK;
Jason Robertsce082592010-05-13 15:57:33 +0100689 denali->dev_info.wSpectraEndBlock =
690 denali->dev_info.wTotalBlocks - 1;
691 denali->dev_info.wDataBlockNum =
692 denali->dev_info.wSpectraEndBlock -
693 denali->dev_info.wSpectraStartBlock + 1;
694 }
695 } else {
696 denali->dev_info.wTotalBlocks *= denali->total_used_banks;
697 denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK;
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800698 denali->dev_info.wSpectraEndBlock =
699 denali->dev_info.wTotalBlocks - 1;
Jason Robertsce082592010-05-13 15:57:33 +0100700 denali->dev_info.wDataBlockNum =
701 denali->dev_info.wSpectraEndBlock -
702 denali->dev_info.wSpectraStartBlock + 1;
703 }
704}
705
706static void dump_device_info(struct denali_nand_info *denali)
707{
708 nand_dbg_print(NAND_DBG_DEBUG, "denali->dev_info:\n");
709 nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
710 denali->dev_info.wDeviceMaker);
711 nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
712 denali->dev_info.wDeviceID);
713 nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
714 denali->dev_info.wDeviceType);
715 nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
716 denali->dev_info.wSpectraStartBlock);
717 nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
718 denali->dev_info.wSpectraEndBlock);
719 nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
720 denali->dev_info.wTotalBlocks);
721 nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
722 denali->dev_info.wPagesPerBlock);
723 nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
724 denali->dev_info.wPageSize);
725 nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
726 denali->dev_info.wPageDataSize);
727 nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
728 denali->dev_info.wPageSpareSize);
729 nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
730 denali->dev_info.wNumPageSpareFlag);
731 nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
732 denali->dev_info.wECCBytesPerSector);
733 nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
734 denali->dev_info.wBlockSize);
735 nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
736 denali->dev_info.wBlockDataSize);
737 nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
738 denali->dev_info.wDataBlockNum);
739 nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
740 denali->dev_info.bPlaneNum);
741 nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
742 denali->dev_info.wDeviceMainAreaSize);
743 nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
744 denali->dev_info.wDeviceSpareAreaSize);
745 nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
746 denali->dev_info.wDevicesConnected);
747 nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
748 denali->dev_info.wDeviceWidth);
749 nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
750 denali->dev_info.wHWRevision);
751 nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
752 denali->dev_info.wHWFeatures);
753 nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
754 denali->dev_info.wONFIDevFeatures);
755 nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
756 denali->dev_info.wONFIOptCommands);
757 nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
758 denali->dev_info.wONFITimingMode);
759 nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
760 denali->dev_info.wONFIPgmCacheTimingMode);
761 nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
762 denali->dev_info.MLCDevice ? "Yes" : "No");
763 nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
764 denali->dev_info.wSpareSkipBytes);
765 nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
766 denali->dev_info.nBitsInPageNumber);
767 nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
768 denali->dev_info.nBitsInPageDataSize);
769 nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
770 denali->dev_info.nBitsInBlockDataSize);
771}
772
Chuanxiao Dongeda936e2010-07-27 14:17:37 +0800773static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
Jason Robertsce082592010-05-13 15:57:33 +0100774{
775 uint16_t status = PASS;
776 uint8_t no_of_planes;
Chuanxiao Dongef41e1b2010-08-06 00:48:49 +0800777 uint32_t id_bytes[5], addr;
778 uint8_t i, maf_id, device_id;
Jason Robertsce082592010-05-13 15:57:33 +0100779
780 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
781 __FILE__, __LINE__, __func__);
782
Chuanxiao Dongef41e1b2010-08-06 00:48:49 +0800783 /* Use read id method to get device ID and other
784 * params. For some NAND chips, controller can't
785 * report the correct device ID by reading from
786 * DEVICE_ID register
787 * */
788 addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
789 index_addr(denali, (uint32_t)addr | 0, 0x90);
790 index_addr(denali, (uint32_t)addr | 1, 0);
791 for (i = 0; i < 5; i++)
792 index_addr_read_data(denali, addr | 2, &id_bytes[i]);
793 maf_id = id_bytes[0];
794 device_id = id_bytes[1];
Jason Robertsce082592010-05-13 15:57:33 +0100795
796 if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
797 ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
798 if (FAIL == get_onfi_nand_para(denali))
799 return FAIL;
Chuanxiao Dongef41e1b2010-08-06 00:48:49 +0800800 } else if (maf_id == 0xEC) { /* Samsung NAND */
Jason Robertsce082592010-05-13 15:57:33 +0100801 get_samsung_nand_para(denali);
Chuanxiao Dongef41e1b2010-08-06 00:48:49 +0800802 } else if (maf_id == 0x98) { /* Toshiba NAND */
Jason Robertsce082592010-05-13 15:57:33 +0100803 get_toshiba_nand_para(denali);
Chuanxiao Dongef41e1b2010-08-06 00:48:49 +0800804 } else if (maf_id == 0xAD) { /* Hynix NAND */
805 get_hynix_nand_para(denali, device_id);
Jason Robertsce082592010-05-13 15:57:33 +0100806 } else {
807 denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
808 }
809
810 nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
811 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
812 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
813 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
814 ioread32(denali->flash_reg + ACC_CLKS),
815 ioread32(denali->flash_reg + RE_2_WE),
816 ioread32(denali->flash_reg + WE_2_RE),
817 ioread32(denali->flash_reg + ADDR_2_DATA),
818 ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
819 ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
820 ioread32(denali->flash_reg + CS_SETUP_CNT));
821
822 denali->dev_info.wHWRevision = ioread32(denali->flash_reg + REVISION);
823 denali->dev_info.wHWFeatures = ioread32(denali->flash_reg + FEATURES);
824
825 denali->dev_info.wDeviceMainAreaSize =
826 ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
827 denali->dev_info.wDeviceSpareAreaSize =
828 ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
829
830 denali->dev_info.wPageDataSize =
831 ioread32(denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
832
833 /* Note: When using the Micon 4K NAND device, the controller will report
834 * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
835 * And if force set it to 218 bytes, the controller can not work
836 * correctly. So just let it be. But keep in mind that this bug may
837 * cause
838 * other problems in future. - Yunpeng 2008-10-10
839 */
840 denali->dev_info.wPageSpareSize =
841 ioread32(denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
842
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800843 denali->dev_info.wPagesPerBlock =
844 ioread32(denali->flash_reg + PAGES_PER_BLOCK);
Jason Robertsce082592010-05-13 15:57:33 +0100845
846 denali->dev_info.wPageSize =
847 denali->dev_info.wPageDataSize + denali->dev_info.wPageSpareSize;
848 denali->dev_info.wBlockSize =
849 denali->dev_info.wPageSize * denali->dev_info.wPagesPerBlock;
850 denali->dev_info.wBlockDataSize =
851 denali->dev_info.wPagesPerBlock * denali->dev_info.wPageDataSize;
852
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800853 denali->dev_info.wDeviceWidth =
854 ioread32(denali->flash_reg + DEVICE_WIDTH);
Jason Robertsce082592010-05-13 15:57:33 +0100855 denali->dev_info.wDeviceType =
856 ((ioread32(denali->flash_reg + DEVICE_WIDTH) > 0) ? 16 : 8);
857
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800858 denali->dev_info.wDevicesConnected =
859 ioread32(denali->flash_reg + DEVICES_CONNECTED);
Jason Robertsce082592010-05-13 15:57:33 +0100860
861 denali->dev_info.wSpareSkipBytes =
862 ioread32(denali->flash_reg + SPARE_AREA_SKIP_BYTES) *
863 denali->dev_info.wDevicesConnected;
864
865 denali->dev_info.nBitsInPageNumber =
866 ilog2(denali->dev_info.wPagesPerBlock);
867 denali->dev_info.nBitsInPageDataSize =
868 ilog2(denali->dev_info.wPageDataSize);
869 denali->dev_info.nBitsInBlockDataSize =
870 ilog2(denali->dev_info.wBlockDataSize);
871
872 set_ecc_config(denali);
873
874 no_of_planes = ioread32(denali->flash_reg + NUMBER_OF_PLANES) &
875 NUMBER_OF_PLANES__VALUE;
876
877 switch (no_of_planes) {
878 case 0:
879 case 1:
880 case 3:
881 case 7:
882 denali->dev_info.bPlaneNum = no_of_planes + 1;
883 break;
884 default:
885 status = FAIL;
886 break;
887 }
888
889 find_valid_banks(denali);
890
891 detect_partition_feature(denali);
892
893 dump_device_info(denali);
894
895 /* If the user specified to override the default timings
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800896 * with a specific ONFI mode, we apply those changes here.
Jason Robertsce082592010-05-13 15:57:33 +0100897 */
898 if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
Chuanxiao Dongeda936e2010-07-27 14:17:37 +0800899 nand_onfi_timing_set(denali, onfi_timing_mode);
Jason Robertsce082592010-05-13 15:57:33 +0100900
901 return status;
902}
903
Chuanxiao Dongeda936e2010-07-27 14:17:37 +0800904static void denali_set_intr_modes(struct denali_nand_info *denali,
Jason Robertsce082592010-05-13 15:57:33 +0100905 uint16_t INT_ENABLE)
906{
907 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
908 __FILE__, __LINE__, __func__);
909
910 if (INT_ENABLE)
911 denali_write32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
912 else
913 denali_write32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
914}
915
916/* validation function to verify that the controlling software is making
917 a valid request
918 */
919static inline bool is_flash_bank_valid(int flash_bank)
920{
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800921 return (flash_bank >= 0 && flash_bank < 4);
Jason Robertsce082592010-05-13 15:57:33 +0100922}
923
924static void denali_irq_init(struct denali_nand_info *denali)
925{
926 uint32_t int_mask = 0;
927
928 /* Disable global interrupts */
Chuanxiao Dongeda936e2010-07-27 14:17:37 +0800929 denali_set_intr_modes(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +0100930
931 int_mask = DENALI_IRQ_ALL;
932
933 /* Clear all status bits */
934 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS0);
935 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS1);
936 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS2);
937 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS3);
938
939 denali_irq_enable(denali, int_mask);
940}
941
942static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
943{
Chuanxiao Dongeda936e2010-07-27 14:17:37 +0800944 denali_set_intr_modes(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +0100945 free_irq(irqnum, denali);
946}
947
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800948static void denali_irq_enable(struct denali_nand_info *denali,
949 uint32_t int_mask)
Jason Robertsce082592010-05-13 15:57:33 +0100950{
951 denali_write32(int_mask, denali->flash_reg + INTR_EN0);
952 denali_write32(int_mask, denali->flash_reg + INTR_EN1);
953 denali_write32(int_mask, denali->flash_reg + INTR_EN2);
954 denali_write32(int_mask, denali->flash_reg + INTR_EN3);
955}
956
957/* This function only returns when an interrupt that this driver cares about
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800958 * occurs. This is to reduce the overhead of servicing interrupts
Jason Robertsce082592010-05-13 15:57:33 +0100959 */
960static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
961{
Chuanxiao Donga99d1792010-07-27 11:32:21 +0800962 return read_interrupt_status(denali) & DENALI_IRQ_ALL;
Jason Robertsce082592010-05-13 15:57:33 +0100963}
964
965/* Interrupts are cleared by writing a 1 to the appropriate status bit */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +0800966static inline void clear_interrupt(struct denali_nand_info *denali,
967 uint32_t irq_mask)
Jason Robertsce082592010-05-13 15:57:33 +0100968{
969 uint32_t intr_status_reg = 0;
970
971 intr_status_reg = intr_status_addresses[denali->flash_bank];
972
973 denali_write32(irq_mask, denali->flash_reg + intr_status_reg);
974}
975
976static void clear_interrupts(struct denali_nand_info *denali)
977{
978 uint32_t status = 0x0;
979 spin_lock_irq(&denali->irq_lock);
980
981 status = read_interrupt_status(denali);
982
983#if DEBUG_DENALI
984 denali->irq_debug_array[denali->idx++] = 0x30000000 | status;
985 denali->idx %= 32;
986#endif
987
988 denali->irq_status = 0x0;
989 spin_unlock_irq(&denali->irq_lock);
990}
991
992static uint32_t read_interrupt_status(struct denali_nand_info *denali)
993{
994 uint32_t intr_status_reg = 0;
995
996 intr_status_reg = intr_status_addresses[denali->flash_bank];
997
998 return ioread32(denali->flash_reg + intr_status_reg);
999}
1000
1001#if DEBUG_DENALI
1002static void print_irq_log(struct denali_nand_info *denali)
1003{
1004 int i = 0;
1005
Chuanxiao Dongbf1806d2010-07-27 10:48:34 +08001006 printk(KERN_INFO "ISR debug log index = %X\n", denali->idx);
Jason Robertsce082592010-05-13 15:57:33 +01001007 for (i = 0; i < 32; i++)
Chuanxiao Dongbf1806d2010-07-27 10:48:34 +08001008 printk(KERN_INFO "%08X: %08X\n", i, denali->irq_debug_array[i]);
Jason Robertsce082592010-05-13 15:57:33 +01001009}
1010#endif
1011
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001012/* This is the interrupt service routine. It handles all interrupts
1013 * sent to this device. Note that on CE4100, this is a shared
1014 * interrupt.
Jason Robertsce082592010-05-13 15:57:33 +01001015 */
1016static irqreturn_t denali_isr(int irq, void *dev_id)
1017{
1018 struct denali_nand_info *denali = dev_id;
1019 uint32_t irq_status = 0x0;
1020 irqreturn_t result = IRQ_NONE;
1021
1022 spin_lock(&denali->irq_lock);
1023
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001024 /* check to see if a valid NAND chip has
1025 * been selected.
Jason Robertsce082592010-05-13 15:57:33 +01001026 */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001027 if (is_flash_bank_valid(denali->flash_bank)) {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001028 /* check to see if controller generated
Jason Robertsce082592010-05-13 15:57:33 +01001029 * the interrupt, since this is a shared interrupt */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001030 irq_status = denali_irq_detected(denali);
1031 if (irq_status != 0) {
Jason Robertsce082592010-05-13 15:57:33 +01001032#if DEBUG_DENALI
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001033 denali->irq_debug_array[denali->idx++] =
1034 0x10000000 | irq_status;
Jason Robertsce082592010-05-13 15:57:33 +01001035 denali->idx %= 32;
1036
Chuanxiao Dongbf1806d2010-07-27 10:48:34 +08001037 printk(KERN_INFO "IRQ status = 0x%04x\n", irq_status);
Jason Robertsce082592010-05-13 15:57:33 +01001038#endif
1039 /* handle interrupt */
1040 /* first acknowledge it */
1041 clear_interrupt(denali, irq_status);
1042 /* store the status in the device context for someone
1043 to read */
1044 denali->irq_status |= irq_status;
1045 /* notify anyone who cares that it happened */
1046 complete(&denali->complete);
1047 /* tell the OS that we've handled this */
1048 result = IRQ_HANDLED;
1049 }
1050 }
1051 spin_unlock(&denali->irq_lock);
1052 return result;
1053}
1054#define BANK(x) ((x) << 24)
1055
1056static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
1057{
1058 unsigned long comp_res = 0;
1059 uint32_t intr_status = 0;
1060 bool retry = false;
1061 unsigned long timeout = msecs_to_jiffies(1000);
1062
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001063 do {
Jason Robertsce082592010-05-13 15:57:33 +01001064#if DEBUG_DENALI
Chuanxiao Dongbf1806d2010-07-27 10:48:34 +08001065 printk(KERN_INFO "waiting for 0x%x\n", irq_mask);
Jason Robertsce082592010-05-13 15:57:33 +01001066#endif
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001067 comp_res =
1068 wait_for_completion_timeout(&denali->complete, timeout);
Jason Robertsce082592010-05-13 15:57:33 +01001069 spin_lock_irq(&denali->irq_lock);
1070 intr_status = denali->irq_status;
1071
1072#if DEBUG_DENALI
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001073 denali->irq_debug_array[denali->idx++] =
1074 0x20000000 | (irq_mask << 16) | intr_status;
Jason Robertsce082592010-05-13 15:57:33 +01001075 denali->idx %= 32;
1076#endif
1077
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001078 if (intr_status & irq_mask) {
Jason Robertsce082592010-05-13 15:57:33 +01001079 denali->irq_status &= ~irq_mask;
1080 spin_unlock_irq(&denali->irq_lock);
1081#if DEBUG_DENALI
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001082 if (retry)
1083 printk(KERN_INFO "status on retry = 0x%x\n",
1084 intr_status);
Jason Robertsce082592010-05-13 15:57:33 +01001085#endif
1086 /* our interrupt was detected */
1087 break;
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001088 } else {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001089 /* these are not the interrupts you are looking for -
1090 * need to wait again */
Jason Robertsce082592010-05-13 15:57:33 +01001091 spin_unlock_irq(&denali->irq_lock);
1092#if DEBUG_DENALI
1093 print_irq_log(denali);
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001094 printk(KERN_INFO "received irq nobody cared:"
1095 " irq_status = 0x%x, irq_mask = 0x%x,"
1096 " timeout = %ld\n", intr_status,
1097 irq_mask, comp_res);
Jason Robertsce082592010-05-13 15:57:33 +01001098#endif
1099 retry = true;
1100 }
1101 } while (comp_res != 0);
1102
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001103 if (comp_res == 0) {
Jason Robertsce082592010-05-13 15:57:33 +01001104 /* timeout */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001105 printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
1106 intr_status, irq_mask);
Jason Robertsce082592010-05-13 15:57:33 +01001107
1108 intr_status = 0;
1109 }
1110 return intr_status;
1111}
1112
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001113/* This helper function setups the registers for ECC and whether or not
Jason Robertsce082592010-05-13 15:57:33 +01001114 the spare area will be transfered. */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001115static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
Jason Robertsce082592010-05-13 15:57:33 +01001116 bool transfer_spare)
1117{
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001118 int ecc_en_flag = 0, transfer_spare_flag = 0;
Jason Robertsce082592010-05-13 15:57:33 +01001119
1120 /* set ECC, transfer spare bits if needed */
1121 ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
1122 transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
1123
1124 /* Enable spare area/ECC per user's request. */
1125 denali_write32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001126 denali_write32(transfer_spare_flag,
1127 denali->flash_reg + TRANSFER_SPARE_REG);
Jason Robertsce082592010-05-13 15:57:33 +01001128}
1129
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001130/* sends a pipeline command operation to the controller. See the Denali NAND
1131 controller's user guide for more information (section 4.2.3.6).
Jason Robertsce082592010-05-13 15:57:33 +01001132 */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001133static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
1134 bool ecc_en,
1135 bool transfer_spare,
1136 int access_type,
1137 int op)
Jason Robertsce082592010-05-13 15:57:33 +01001138{
1139 int status = PASS;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001140 uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0,
Jason Robertsce082592010-05-13 15:57:33 +01001141 irq_mask = 0;
1142
Chuanxiao Donga99d1792010-07-27 11:32:21 +08001143 if (op == DENALI_READ)
1144 irq_mask = INTR_STATUS0__LOAD_COMP;
1145 else if (op == DENALI_WRITE)
1146 irq_mask = 0;
1147 else
1148 BUG();
Jason Robertsce082592010-05-13 15:57:33 +01001149
1150 setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
1151
1152#if DEBUG_DENALI
1153 spin_lock_irq(&denali->irq_lock);
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001154 denali->irq_debug_array[denali->idx++] =
1155 0x40000000 | ioread32(denali->flash_reg + ECC_ENABLE) |
1156 (access_type << 4);
Jason Robertsce082592010-05-13 15:57:33 +01001157 denali->idx %= 32;
1158 spin_unlock_irq(&denali->irq_lock);
1159#endif
1160
1161
1162 /* clear interrupts */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001163 clear_interrupts(denali);
Jason Robertsce082592010-05-13 15:57:33 +01001164
1165 addr = BANK(denali->flash_bank) | denali->page;
1166
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001167 if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001168 cmd = MODE_01 | addr;
Jason Robertsce082592010-05-13 15:57:33 +01001169 denali_write32(cmd, denali->flash_mem);
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001170 } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
Jason Robertsce082592010-05-13 15:57:33 +01001171 /* read spare area */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001172 cmd = MODE_10 | addr;
Jason Robertsce082592010-05-13 15:57:33 +01001173 index_addr(denali, (uint32_t)cmd, access_type);
1174
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001175 cmd = MODE_01 | addr;
Jason Robertsce082592010-05-13 15:57:33 +01001176 denali_write32(cmd, denali->flash_mem);
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001177 } else if (op == DENALI_READ) {
Jason Robertsce082592010-05-13 15:57:33 +01001178 /* setup page read request for access type */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001179 cmd = MODE_10 | addr;
Jason Robertsce082592010-05-13 15:57:33 +01001180 index_addr(denali, (uint32_t)cmd, access_type);
1181
1182 /* page 33 of the NAND controller spec indicates we should not
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001183 use the pipeline commands in Spare area only mode. So we
Jason Robertsce082592010-05-13 15:57:33 +01001184 don't.
1185 */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001186 if (access_type == SPARE_ACCESS) {
Jason Robertsce082592010-05-13 15:57:33 +01001187 cmd = MODE_01 | addr;
1188 denali_write32(cmd, denali->flash_mem);
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001189 } else {
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001190 index_addr(denali, (uint32_t)cmd,
1191 0x2000 | op | page_count);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001192
1193 /* wait for command to be accepted
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001194 * can always use status0 bit as the
1195 * mask is identical for each
Jason Robertsce082592010-05-13 15:57:33 +01001196 * bank. */
1197 irq_status = wait_for_irq(denali, irq_mask);
1198
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001199 if (irq_status == 0) {
Jason Robertsce082592010-05-13 15:57:33 +01001200 printk(KERN_ERR "cmd, page, addr on timeout "
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001201 "(0x%x, 0x%x, 0x%x)\n", cmd,
1202 denali->page, addr);
Jason Robertsce082592010-05-13 15:57:33 +01001203 status = FAIL;
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001204 } else {
Jason Robertsce082592010-05-13 15:57:33 +01001205 cmd = MODE_01 | addr;
1206 denali_write32(cmd, denali->flash_mem);
1207 }
1208 }
1209 }
1210 return status;
1211}
1212
1213/* helper function that simply writes a buffer to the flash */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001214static int write_data_to_flash_mem(struct denali_nand_info *denali,
1215 const uint8_t *buf,
1216 int len)
Jason Robertsce082592010-05-13 15:57:33 +01001217{
1218 uint32_t i = 0, *buf32;
1219
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001220 /* verify that the len is a multiple of 4. see comment in
1221 * read_data_from_flash_mem() */
Jason Robertsce082592010-05-13 15:57:33 +01001222 BUG_ON((len % 4) != 0);
1223
1224 /* write the data to the flash memory */
1225 buf32 = (uint32_t *)buf;
1226 for (i = 0; i < len / 4; i++)
Jason Robertsce082592010-05-13 15:57:33 +01001227 denali_write32(*buf32++, denali->flash_mem + 0x10);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001228 return i*4; /* intent is to return the number of bytes read */
Jason Robertsce082592010-05-13 15:57:33 +01001229}
1230
1231/* helper function that simply reads a buffer from the flash */
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001232static int read_data_from_flash_mem(struct denali_nand_info *denali,
1233 uint8_t *buf,
1234 int len)
Jason Robertsce082592010-05-13 15:57:33 +01001235{
1236 uint32_t i = 0, *buf32;
1237
1238 /* we assume that len will be a multiple of 4, if not
1239 * it would be nice to know about it ASAP rather than
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001240 * have random failures...
1241 * This assumption is based on the fact that this
1242 * function is designed to be used to read flash pages,
Jason Robertsce082592010-05-13 15:57:33 +01001243 * which are typically multiples of 4...
1244 */
1245
1246 BUG_ON((len % 4) != 0);
1247
1248 /* transfer the data from the flash */
1249 buf32 = (uint32_t *)buf;
1250 for (i = 0; i < len / 4; i++)
Jason Robertsce082592010-05-13 15:57:33 +01001251 *buf32++ = ioread32(denali->flash_mem + 0x10);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001252 return i*4; /* intent is to return the number of bytes read */
Jason Robertsce082592010-05-13 15:57:33 +01001253}
1254
1255/* writes OOB data to the device */
1256static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
1257{
1258 struct denali_nand_info *denali = mtd_to_denali(mtd);
1259 uint32_t irq_status = 0;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001260 uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP |
Jason Robertsce082592010-05-13 15:57:33 +01001261 INTR_STATUS0__PROGRAM_FAIL;
1262 int status = 0;
1263
1264 denali->page = page;
1265
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001266 if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001267 DENALI_WRITE) == PASS) {
Jason Robertsce082592010-05-13 15:57:33 +01001268 write_data_to_flash_mem(denali, buf, mtd->oobsize);
1269
1270#if DEBUG_DENALI
1271 spin_lock_irq(&denali->irq_lock);
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001272 denali->irq_debug_array[denali->idx++] =
1273 0x80000000 | mtd->oobsize;
Jason Robertsce082592010-05-13 15:57:33 +01001274 denali->idx %= 32;
1275 spin_unlock_irq(&denali->irq_lock);
1276#endif
1277
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001278
Jason Robertsce082592010-05-13 15:57:33 +01001279 /* wait for operation to complete */
1280 irq_status = wait_for_irq(denali, irq_mask);
1281
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001282 if (irq_status == 0) {
Jason Robertsce082592010-05-13 15:57:33 +01001283 printk(KERN_ERR "OOB write failed\n");
1284 status = -EIO;
1285 }
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001286 } else {
Jason Robertsce082592010-05-13 15:57:33 +01001287 printk(KERN_ERR "unable to send pipeline command\n");
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001288 status = -EIO;
Jason Robertsce082592010-05-13 15:57:33 +01001289 }
1290 return status;
1291}
1292
1293/* reads OOB data from the device */
1294static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
1295{
1296 struct denali_nand_info *denali = mtd_to_denali(mtd);
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001297 uint32_t irq_mask = INTR_STATUS0__LOAD_COMP,
1298 irq_status = 0, addr = 0x0, cmd = 0x0;
Jason Robertsce082592010-05-13 15:57:33 +01001299
1300 denali->page = page;
1301
1302#if DEBUG_DENALI
Chuanxiao Dongbf1806d2010-07-27 10:48:34 +08001303 printk(KERN_INFO "read_oob %d\n", page);
Jason Robertsce082592010-05-13 15:57:33 +01001304#endif
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001305 if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001306 DENALI_READ) == PASS) {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001307 read_data_from_flash_mem(denali, buf, mtd->oobsize);
Jason Robertsce082592010-05-13 15:57:33 +01001308
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001309 /* wait for command to be accepted
Jason Robertsce082592010-05-13 15:57:33 +01001310 * can always use status0 bit as the mask is identical for each
1311 * bank. */
1312 irq_status = wait_for_irq(denali, irq_mask);
1313
1314 if (irq_status == 0)
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001315 printk(KERN_ERR "page on OOB timeout %d\n",
1316 denali->page);
Jason Robertsce082592010-05-13 15:57:33 +01001317
1318 /* We set the device back to MAIN_ACCESS here as I observed
1319 * instability with the controller if you do a block erase
1320 * and the last transaction was a SPARE_ACCESS. Block erase
1321 * is reliable (according to the MTD test infrastructure)
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001322 * if you are in MAIN_ACCESS.
Jason Robertsce082592010-05-13 15:57:33 +01001323 */
1324 addr = BANK(denali->flash_bank) | denali->page;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001325 cmd = MODE_10 | addr;
Jason Robertsce082592010-05-13 15:57:33 +01001326 index_addr(denali, (uint32_t)cmd, MAIN_ACCESS);
1327
1328#if DEBUG_DENALI
1329 spin_lock_irq(&denali->irq_lock);
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001330 denali->irq_debug_array[denali->idx++] =
1331 0x60000000 | mtd->oobsize;
Jason Robertsce082592010-05-13 15:57:33 +01001332 denali->idx %= 32;
1333 spin_unlock_irq(&denali->irq_lock);
1334#endif
1335 }
1336}
1337
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001338/* this function examines buffers to see if they contain data that
Jason Robertsce082592010-05-13 15:57:33 +01001339 * indicate that the buffer is part of an erased region of flash.
1340 */
1341bool is_erased(uint8_t *buf, int len)
1342{
1343 int i = 0;
1344 for (i = 0; i < len; i++)
Jason Robertsce082592010-05-13 15:57:33 +01001345 if (buf[i] != 0xFF)
Jason Robertsce082592010-05-13 15:57:33 +01001346 return false;
Jason Robertsce082592010-05-13 15:57:33 +01001347 return true;
1348}
1349#define ECC_SECTOR_SIZE 512
1350
1351#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
1352#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
1353#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
1354#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO))
1355#define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8)
1356#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
1357
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001358static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
Jason Robertsce082592010-05-13 15:57:33 +01001359 uint8_t *oobbuf, uint32_t irq_status)
1360{
1361 bool check_erased_page = false;
1362
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001363 if (irq_status & INTR_STATUS0__ECC_ERR) {
Jason Robertsce082592010-05-13 15:57:33 +01001364 /* read the ECC errors. we'll ignore them for now */
1365 uint32_t err_address = 0, err_correction_info = 0;
1366 uint32_t err_byte = 0, err_sector = 0, err_device = 0;
1367 uint32_t err_correction_value = 0;
1368
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001369 do {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001370 err_address = ioread32(denali->flash_reg +
Jason Robertsce082592010-05-13 15:57:33 +01001371 ECC_ERROR_ADDRESS);
1372 err_sector = ECC_SECTOR(err_address);
1373 err_byte = ECC_BYTE(err_address);
1374
1375
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001376 err_correction_info = ioread32(denali->flash_reg +
Jason Robertsce082592010-05-13 15:57:33 +01001377 ERR_CORRECTION_INFO);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001378 err_correction_value =
Jason Robertsce082592010-05-13 15:57:33 +01001379 ECC_CORRECTION_VALUE(err_correction_info);
1380 err_device = ECC_ERR_DEVICE(err_correction_info);
1381
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001382 if (ECC_ERROR_CORRECTABLE(err_correction_info)) {
Jason Robertsce082592010-05-13 15:57:33 +01001383 /* offset in our buffer is computed as:
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001384 sector number * sector size + offset in
Jason Robertsce082592010-05-13 15:57:33 +01001385 sector
1386 */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001387 int offset = err_sector * ECC_SECTOR_SIZE +
Jason Robertsce082592010-05-13 15:57:33 +01001388 err_byte;
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001389 if (offset < denali->mtd.writesize) {
Jason Robertsce082592010-05-13 15:57:33 +01001390 /* correct the ECC error */
1391 buf[offset] ^= err_correction_value;
1392 denali->mtd.ecc_stats.corrected++;
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001393 } else {
Jason Robertsce082592010-05-13 15:57:33 +01001394 /* bummer, couldn't correct the error */
1395 printk(KERN_ERR "ECC offset invalid\n");
1396 denali->mtd.ecc_stats.failed++;
1397 }
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001398 } else {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001399 /* if the error is not correctable, need to
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001400 * look at the page to see if it is an erased
1401 * page. if so, then it's not a real ECC error
1402 * */
Jason Robertsce082592010-05-13 15:57:33 +01001403 check_erased_page = true;
1404 }
1405
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001406#if DEBUG_DENALI
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001407 printk(KERN_INFO "Detected ECC error in page %d:"
1408 " err_addr = 0x%08x, info to fix is"
1409 " 0x%08x\n", denali->page, err_address,
1410 err_correction_info);
Jason Robertsce082592010-05-13 15:57:33 +01001411#endif
1412 } while (!ECC_LAST_ERR(err_correction_info));
1413 }
1414 return check_erased_page;
1415}
1416
1417/* programs the controller to either enable/disable DMA transfers */
David Woodhouseaadff492010-05-13 16:12:43 +01001418static void denali_enable_dma(struct denali_nand_info *denali, bool en)
Jason Robertsce082592010-05-13 15:57:33 +01001419{
1420 uint32_t reg_val = 0x0;
1421
Chuanxiao Donga99d1792010-07-27 11:32:21 +08001422 if (en)
1423 reg_val = DMA_ENABLE__FLAG;
Jason Robertsce082592010-05-13 15:57:33 +01001424
1425 denali_write32(reg_val, denali->flash_reg + DMA_ENABLE);
1426 ioread32(denali->flash_reg + DMA_ENABLE);
1427}
1428
1429/* setups the HW to perform the data DMA */
David Woodhouseaadff492010-05-13 16:12:43 +01001430static void denali_setup_dma(struct denali_nand_info *denali, int op)
Jason Robertsce082592010-05-13 15:57:33 +01001431{
1432 uint32_t mode = 0x0;
1433 const int page_count = 1;
1434 dma_addr_t addr = denali->buf.dma_buf;
1435
1436 mode = MODE_10 | BANK(denali->flash_bank);
1437
1438 /* DMA is a four step process */
1439
1440 /* 1. setup transfer type and # of pages */
1441 index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
1442
1443 /* 2. set memory high address bits 23:8 */
1444 index_addr(denali, mode | ((uint16_t)(addr >> 16) << 8), 0x2200);
1445
1446 /* 3. set memory low address bits 23:8 */
1447 index_addr(denali, mode | ((uint16_t)addr << 8), 0x2300);
1448
1449 /* 4. interrupt when complete, burst len = 64 bytes*/
1450 index_addr(denali, mode | 0x14000, 0x2400);
1451}
1452
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001453/* writes a page. user specifies type, and this function handles the
Jason Robertsce082592010-05-13 15:57:33 +01001454 configuration details. */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001455static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
Jason Robertsce082592010-05-13 15:57:33 +01001456 const uint8_t *buf, bool raw_xfer)
1457{
1458 struct denali_nand_info *denali = mtd_to_denali(mtd);
1459 struct pci_dev *pci_dev = denali->dev;
1460
1461 dma_addr_t addr = denali->buf.dma_buf;
1462 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1463
1464 uint32_t irq_status = 0;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001465 uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP |
Jason Robertsce082592010-05-13 15:57:33 +01001466 INTR_STATUS0__PROGRAM_FAIL;
1467
1468 /* if it is a raw xfer, we want to disable ecc, and send
1469 * the spare area.
1470 * !raw_xfer - enable ecc
1471 * raw_xfer - transfer spare
1472 */
1473 setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
1474
1475 /* copy buffer into DMA buffer */
1476 memcpy(denali->buf.buf, buf, mtd->writesize);
1477
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001478 if (raw_xfer) {
Jason Robertsce082592010-05-13 15:57:33 +01001479 /* transfer the data to the spare area */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001480 memcpy(denali->buf.buf + mtd->writesize,
1481 chip->oob_poi,
1482 mtd->oobsize);
Jason Robertsce082592010-05-13 15:57:33 +01001483 }
1484
1485 pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE);
1486
1487 clear_interrupts(denali);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001488 denali_enable_dma(denali, true);
Jason Robertsce082592010-05-13 15:57:33 +01001489
David Woodhouseaadff492010-05-13 16:12:43 +01001490 denali_setup_dma(denali, DENALI_WRITE);
Jason Robertsce082592010-05-13 15:57:33 +01001491
1492 /* wait for operation to complete */
1493 irq_status = wait_for_irq(denali, irq_mask);
1494
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001495 if (irq_status == 0) {
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001496 printk(KERN_ERR "timeout on write_page"
1497 " (type = %d)\n", raw_xfer);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001498 denali->status =
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001499 (irq_status & INTR_STATUS0__PROGRAM_FAIL) ?
1500 NAND_STATUS_FAIL : PASS;
Jason Robertsce082592010-05-13 15:57:33 +01001501 }
1502
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001503 denali_enable_dma(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +01001504 pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE);
1505}
1506
1507/* NAND core entry points */
1508
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001509/* this is the callback that the NAND core calls to write a page. Since
1510 writing a page with ECC or without is similar, all the work is done
Jason Robertsce082592010-05-13 15:57:33 +01001511 by write_page above. */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001512static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
Jason Robertsce082592010-05-13 15:57:33 +01001513 const uint8_t *buf)
1514{
1515 /* for regular page writes, we let HW handle all the ECC
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001516 * data written to the device. */
Jason Robertsce082592010-05-13 15:57:33 +01001517 write_page(mtd, chip, buf, false);
1518}
1519
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001520/* This is the callback that the NAND core calls to write a page without ECC.
Jason Robertsce082592010-05-13 15:57:33 +01001521 raw access is similiar to ECC page writes, so all the work is done in the
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001522 write_page() function above.
Jason Robertsce082592010-05-13 15:57:33 +01001523 */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001524static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
Jason Robertsce082592010-05-13 15:57:33 +01001525 const uint8_t *buf)
1526{
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001527 /* for raw page writes, we want to disable ECC and simply write
Jason Robertsce082592010-05-13 15:57:33 +01001528 whatever data is in the buffer. */
1529 write_page(mtd, chip, buf, true);
1530}
1531
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001532static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
Jason Robertsce082592010-05-13 15:57:33 +01001533 int page)
1534{
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001535 return write_oob_data(mtd, chip->oob_poi, page);
Jason Robertsce082592010-05-13 15:57:33 +01001536}
1537
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001538static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
Jason Robertsce082592010-05-13 15:57:33 +01001539 int page, int sndcmd)
1540{
1541 read_oob_data(mtd, chip->oob_poi, page);
1542
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001543 return 0; /* notify NAND core to send command to
1544 NAND device. */
Jason Robertsce082592010-05-13 15:57:33 +01001545}
1546
1547static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1548 uint8_t *buf, int page)
1549{
1550 struct denali_nand_info *denali = mtd_to_denali(mtd);
1551 struct pci_dev *pci_dev = denali->dev;
1552
1553 dma_addr_t addr = denali->buf.dma_buf;
1554 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1555
1556 uint32_t irq_status = 0;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001557 uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE |
Jason Robertsce082592010-05-13 15:57:33 +01001558 INTR_STATUS0__ECC_ERR;
1559 bool check_erased_page = false;
1560
1561 setup_ecc_for_xfer(denali, true, false);
1562
David Woodhouseaadff492010-05-13 16:12:43 +01001563 denali_enable_dma(denali, true);
Jason Robertsce082592010-05-13 15:57:33 +01001564 pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
1565
1566 clear_interrupts(denali);
David Woodhouseaadff492010-05-13 16:12:43 +01001567 denali_setup_dma(denali, DENALI_READ);
Jason Robertsce082592010-05-13 15:57:33 +01001568
1569 /* wait for operation to complete */
1570 irq_status = wait_for_irq(denali, irq_mask);
1571
1572 pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
1573
1574 memcpy(buf, denali->buf.buf, mtd->writesize);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001575
Jason Robertsce082592010-05-13 15:57:33 +01001576 check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status);
David Woodhouseaadff492010-05-13 16:12:43 +01001577 denali_enable_dma(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +01001578
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001579 if (check_erased_page) {
Jason Robertsce082592010-05-13 15:57:33 +01001580 read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
1581
1582 /* check ECC failures that may have occurred on erased pages */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001583 if (check_erased_page) {
Jason Robertsce082592010-05-13 15:57:33 +01001584 if (!is_erased(buf, denali->mtd.writesize))
Jason Robertsce082592010-05-13 15:57:33 +01001585 denali->mtd.ecc_stats.failed++;
Jason Robertsce082592010-05-13 15:57:33 +01001586 if (!is_erased(buf, denali->mtd.oobsize))
Jason Robertsce082592010-05-13 15:57:33 +01001587 denali->mtd.ecc_stats.failed++;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001588 }
Jason Robertsce082592010-05-13 15:57:33 +01001589 }
1590 return 0;
1591}
1592
1593static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1594 uint8_t *buf, int page)
1595{
1596 struct denali_nand_info *denali = mtd_to_denali(mtd);
1597 struct pci_dev *pci_dev = denali->dev;
1598
1599 dma_addr_t addr = denali->buf.dma_buf;
1600 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1601
1602 uint32_t irq_status = 0;
1603 uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001604
Jason Robertsce082592010-05-13 15:57:33 +01001605 setup_ecc_for_xfer(denali, false, true);
David Woodhouseaadff492010-05-13 16:12:43 +01001606 denali_enable_dma(denali, true);
Jason Robertsce082592010-05-13 15:57:33 +01001607
1608 pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
1609
1610 clear_interrupts(denali);
David Woodhouseaadff492010-05-13 16:12:43 +01001611 denali_setup_dma(denali, DENALI_READ);
Jason Robertsce082592010-05-13 15:57:33 +01001612
1613 /* wait for operation to complete */
1614 irq_status = wait_for_irq(denali, irq_mask);
1615
1616 pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
1617
David Woodhouseaadff492010-05-13 16:12:43 +01001618 denali_enable_dma(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +01001619
1620 memcpy(buf, denali->buf.buf, mtd->writesize);
1621 memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
1622
1623 return 0;
1624}
1625
1626static uint8_t denali_read_byte(struct mtd_info *mtd)
1627{
1628 struct denali_nand_info *denali = mtd_to_denali(mtd);
1629 uint8_t result = 0xff;
1630
1631 if (denali->buf.head < denali->buf.tail)
Jason Robertsce082592010-05-13 15:57:33 +01001632 result = denali->buf.buf[denali->buf.head++];
Jason Robertsce082592010-05-13 15:57:33 +01001633
1634#if DEBUG_DENALI
Chuanxiao Dongbf1806d2010-07-27 10:48:34 +08001635 printk(KERN_INFO "read byte -> 0x%02x\n", result);
Jason Robertsce082592010-05-13 15:57:33 +01001636#endif
1637 return result;
1638}
1639
1640static void denali_select_chip(struct mtd_info *mtd, int chip)
1641{
1642 struct denali_nand_info *denali = mtd_to_denali(mtd);
1643#if DEBUG_DENALI
Chuanxiao Dongbf1806d2010-07-27 10:48:34 +08001644 printk(KERN_INFO "denali select chip %d\n", chip);
Jason Robertsce082592010-05-13 15:57:33 +01001645#endif
1646 spin_lock_irq(&denali->irq_lock);
1647 denali->flash_bank = chip;
1648 spin_unlock_irq(&denali->irq_lock);
1649}
1650
1651static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
1652{
1653 struct denali_nand_info *denali = mtd_to_denali(mtd);
1654 int status = denali->status;
1655 denali->status = 0;
1656
1657#if DEBUG_DENALI
Chuanxiao Dongbf1806d2010-07-27 10:48:34 +08001658 printk(KERN_INFO "waitfunc %d\n", status);
Jason Robertsce082592010-05-13 15:57:33 +01001659#endif
1660 return status;
1661}
1662
1663static void denali_erase(struct mtd_info *mtd, int page)
1664{
1665 struct denali_nand_info *denali = mtd_to_denali(mtd);
1666
1667 uint32_t cmd = 0x0, irq_status = 0;
1668
1669#if DEBUG_DENALI
Chuanxiao Dongbf1806d2010-07-27 10:48:34 +08001670 printk(KERN_INFO "erase page: %d\n", page);
Jason Robertsce082592010-05-13 15:57:33 +01001671#endif
1672 /* clear interrupts */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001673 clear_interrupts(denali);
Jason Robertsce082592010-05-13 15:57:33 +01001674
1675 /* setup page read request for access type */
1676 cmd = MODE_10 | BANK(denali->flash_bank) | page;
1677 index_addr(denali, (uint32_t)cmd, 0x1);
1678
1679 /* wait for erase to complete or failure to occur */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001680 irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP |
Jason Robertsce082592010-05-13 15:57:33 +01001681 INTR_STATUS0__ERASE_FAIL);
1682
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001683 denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ?
1684 NAND_STATUS_FAIL : PASS;
Jason Robertsce082592010-05-13 15:57:33 +01001685}
1686
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001687static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
Jason Robertsce082592010-05-13 15:57:33 +01001688 int page)
1689{
1690 struct denali_nand_info *denali = mtd_to_denali(mtd);
Chuanxiao Dongef41e1b2010-08-06 00:48:49 +08001691 uint32_t addr, id;
1692 int i;
Jason Robertsce082592010-05-13 15:57:33 +01001693
1694#if DEBUG_DENALI
Chuanxiao Dongbf1806d2010-07-27 10:48:34 +08001695 printk(KERN_INFO "cmdfunc: 0x%x %d %d\n", cmd, col, page);
Jason Robertsce082592010-05-13 15:57:33 +01001696#endif
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001697 switch (cmd) {
Chuanxiao Donga99d1792010-07-27 11:32:21 +08001698 case NAND_CMD_PAGEPROG:
1699 break;
1700 case NAND_CMD_STATUS:
1701 read_status(denali);
1702 break;
1703 case NAND_CMD_READID:
1704 reset_buf(denali);
Chuanxiao Dongef41e1b2010-08-06 00:48:49 +08001705 /*sometimes ManufactureId read from register is not right
1706 * e.g. some of Micron MT29F32G08QAA MLC NAND chips
1707 * So here we send READID cmd to NAND insteand
1708 * */
1709 addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
1710 index_addr(denali, (uint32_t)addr | 0, 0x90);
1711 index_addr(denali, (uint32_t)addr | 1, 0);
1712 for (i = 0; i < 5; i++) {
1713 index_addr_read_data(denali,
1714 (uint32_t)addr | 2,
1715 &id);
1716 write_byte_to_buf(denali, id);
Chuanxiao Donga99d1792010-07-27 11:32:21 +08001717 }
1718 break;
1719 case NAND_CMD_READ0:
1720 case NAND_CMD_SEQIN:
1721 denali->page = page;
1722 break;
1723 case NAND_CMD_RESET:
1724 reset_bank(denali);
1725 break;
1726 case NAND_CMD_READOOB:
1727 /* TODO: Read OOB data */
1728 break;
1729 default:
1730 printk(KERN_ERR ": unsupported command"
1731 " received 0x%x\n", cmd);
1732 break;
Jason Robertsce082592010-05-13 15:57:33 +01001733 }
1734}
1735
1736/* stubs for ECC functions not used by the NAND core */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001737static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
Jason Robertsce082592010-05-13 15:57:33 +01001738 uint8_t *ecc_code)
1739{
1740 printk(KERN_ERR "denali_ecc_calculate called unexpectedly\n");
1741 BUG();
1742 return -EIO;
1743}
1744
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001745static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
Jason Robertsce082592010-05-13 15:57:33 +01001746 uint8_t *read_ecc, uint8_t *calc_ecc)
1747{
1748 printk(KERN_ERR "denali_ecc_correct called unexpectedly\n");
1749 BUG();
1750 return -EIO;
1751}
1752
1753static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
1754{
1755 printk(KERN_ERR "denali_ecc_hwctl called unexpectedly\n");
1756 BUG();
1757}
1758/* end NAND core entry points */
1759
1760/* Initialization code to bring the device up to a known good state */
1761static void denali_hw_init(struct denali_nand_info *denali)
1762{
1763 denali_irq_init(denali);
Chuanxiao Dongeda936e2010-07-27 14:17:37 +08001764 denali_nand_reset(denali);
Jason Robertsce082592010-05-13 15:57:33 +01001765 denali_write32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001766 denali_write32(CHIP_EN_DONT_CARE__FLAG,
1767 denali->flash_reg + CHIP_ENABLE_DONT_CARE);
Jason Robertsce082592010-05-13 15:57:33 +01001768
1769 denali_write32(0x0, denali->flash_reg + SPARE_AREA_SKIP_BYTES);
1770 denali_write32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
1771
1772 /* Should set value for these registers when init */
1773 denali_write32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1774 denali_write32(1, denali->flash_reg + ECC_ENABLE);
1775}
1776
1777/* ECC layout for SLC devices. Denali spec indicates SLC fixed at 4 bytes */
Chuanxiao Donga99d1792010-07-27 11:32:21 +08001778#define ECC_BYTES_SLC (4 * (2048 / ECC_SECTOR_SIZE))
Jason Robertsce082592010-05-13 15:57:33 +01001779static struct nand_ecclayout nand_oob_slc = {
1780 .eccbytes = 4,
1781 .eccpos = { 0, 1, 2, 3 }, /* not used */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001782 .oobfree = {
1783 {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001784 .offset = ECC_BYTES_SLC,
1785 .length = 64 - ECC_BYTES_SLC
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001786 }
1787 }
Jason Robertsce082592010-05-13 15:57:33 +01001788};
1789
Chuanxiao Donga99d1792010-07-27 11:32:21 +08001790#define ECC_BYTES_MLC (14 * (2048 / ECC_SECTOR_SIZE))
Jason Robertsce082592010-05-13 15:57:33 +01001791static struct nand_ecclayout nand_oob_mlc_14bit = {
1792 .eccbytes = 14,
1793 .eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001794 .oobfree = {
1795 {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001796 .offset = ECC_BYTES_MLC,
1797 .length = 64 - ECC_BYTES_MLC
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001798 }
1799 }
Jason Robertsce082592010-05-13 15:57:33 +01001800};
1801
1802static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
1803static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
1804
1805static struct nand_bbt_descr bbt_main_descr = {
1806 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1807 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1808 .offs = 8,
1809 .len = 4,
1810 .veroffs = 12,
1811 .maxblocks = 4,
1812 .pattern = bbt_pattern,
1813};
1814
1815static struct nand_bbt_descr bbt_mirror_descr = {
1816 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1817 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1818 .offs = 8,
1819 .len = 4,
1820 .veroffs = 12,
1821 .maxblocks = 4,
1822 .pattern = mirror_pattern,
1823};
1824
1825/* initalize driver data structures */
1826void denali_drv_init(struct denali_nand_info *denali)
1827{
1828 denali->idx = 0;
1829
1830 /* setup interrupt handler */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001831 /* the completion object will be used to notify
Jason Robertsce082592010-05-13 15:57:33 +01001832 * the callee that the interrupt is done */
1833 init_completion(&denali->complete);
1834
1835 /* the spinlock will be used to synchronize the ISR
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001836 * with any element that might be access shared
Jason Robertsce082592010-05-13 15:57:33 +01001837 * data (interrupt status) */
1838 spin_lock_init(&denali->irq_lock);
1839
1840 /* indicate that MTD has not selected a valid bank yet */
1841 denali->flash_bank = CHIP_SELECT_INVALID;
1842
1843 /* initialize our irq_status variable to indicate no interrupts */
1844 denali->irq_status = 0;
1845}
1846
1847/* driver entry point */
1848static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1849{
1850 int ret = -ENODEV;
1851 resource_size_t csr_base, mem_base;
1852 unsigned long csr_len, mem_len;
1853 struct denali_nand_info *denali;
1854
1855 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1856 __FILE__, __LINE__, __func__);
1857
1858 denali = kzalloc(sizeof(*denali), GFP_KERNEL);
1859 if (!denali)
1860 return -ENOMEM;
1861
1862 ret = pci_enable_device(dev);
1863 if (ret) {
1864 printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
1865 goto failed_enable;
1866 }
1867
1868 if (id->driver_data == INTEL_CE4100) {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001869 /* Due to a silicon limitation, we can only support
1870 * ONFI timing mode 1 and below.
1871 */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001872 if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001873 printk(KERN_ERR "Intel CE4100 only supports"
1874 " ONFI timing mode 1 or below\n");
Jason Robertsce082592010-05-13 15:57:33 +01001875 ret = -EINVAL;
1876 goto failed_enable;
1877 }
1878 denali->platform = INTEL_CE4100;
1879 mem_base = pci_resource_start(dev, 0);
1880 mem_len = pci_resource_len(dev, 1);
1881 csr_base = pci_resource_start(dev, 1);
1882 csr_len = pci_resource_len(dev, 1);
1883 } else {
1884 denali->platform = INTEL_MRST;
1885 csr_base = pci_resource_start(dev, 0);
1886 csr_len = pci_resource_start(dev, 0);
1887 mem_base = pci_resource_start(dev, 1);
1888 mem_len = pci_resource_len(dev, 1);
1889 if (!mem_len) {
1890 mem_base = csr_base + csr_len;
1891 mem_len = csr_len;
1892 nand_dbg_print(NAND_DBG_WARN,
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001893 "Spectra: No second"
1894 " BAR for PCI device;"
1895 " assuming %08Lx\n",
Jason Robertsce082592010-05-13 15:57:33 +01001896 (uint64_t)csr_base);
1897 }
1898 }
1899
1900 /* Is 32-bit DMA supported? */
1901 ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
1902
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001903 if (ret) {
Jason Robertsce082592010-05-13 15:57:33 +01001904 printk(KERN_ERR "Spectra: no usable DMA configuration\n");
1905 goto failed_enable;
1906 }
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08001907 denali->buf.dma_buf =
1908 pci_map_single(dev, denali->buf.buf,
1909 DENALI_BUF_SIZE,
1910 PCI_DMA_BIDIRECTIONAL);
Jason Robertsce082592010-05-13 15:57:33 +01001911
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001912 if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) {
Jason Robertsce082592010-05-13 15:57:33 +01001913 printk(KERN_ERR "Spectra: failed to map DMA buffer\n");
1914 goto failed_enable;
1915 }
1916
1917 pci_set_master(dev);
1918 denali->dev = dev;
1919
1920 ret = pci_request_regions(dev, DENALI_NAND_NAME);
1921 if (ret) {
1922 printk(KERN_ERR "Spectra: Unable to request memory regions\n");
1923 goto failed_req_csr;
1924 }
1925
1926 denali->flash_reg = ioremap_nocache(csr_base, csr_len);
1927 if (!denali->flash_reg) {
1928 printk(KERN_ERR "Spectra: Unable to remap memory region\n");
1929 ret = -ENOMEM;
1930 goto failed_remap_csr;
1931 }
1932 nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08Lx -> 0x%p (0x%lx)\n",
1933 (uint64_t)csr_base, denali->flash_reg, csr_len);
1934
1935 denali->flash_mem = ioremap_nocache(mem_base, mem_len);
1936 if (!denali->flash_mem) {
1937 printk(KERN_ERR "Spectra: ioremap_nocache failed!");
1938 iounmap(denali->flash_reg);
1939 ret = -ENOMEM;
1940 goto failed_remap_csr;
1941 }
1942
1943 nand_dbg_print(NAND_DBG_WARN,
1944 "Spectra: Remapped flash base address: "
1945 "0x%p, len: %ld\n",
1946 denali->flash_mem, csr_len);
1947
1948 denali_hw_init(denali);
1949 denali_drv_init(denali);
1950
1951 nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
1952 if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
1953 DENALI_NAND_NAME, denali)) {
1954 printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
1955 ret = -ENODEV;
1956 goto failed_request_irq;
1957 }
1958
1959 /* now that our ISR is registered, we can enable interrupts */
Chuanxiao Dongeda936e2010-07-27 14:17:37 +08001960 denali_set_intr_modes(denali, true);
Jason Robertsce082592010-05-13 15:57:33 +01001961
1962 pci_set_drvdata(dev, denali);
1963
Chuanxiao Dongeda936e2010-07-27 14:17:37 +08001964 denali_nand_timing_set(denali);
Jason Robertsce082592010-05-13 15:57:33 +01001965
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001966 /* MTD supported page sizes vary by kernel. We validate our
1967 * kernel supports the device here.
Jason Robertsce082592010-05-13 15:57:33 +01001968 */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001969 if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
Jason Robertsce082592010-05-13 15:57:33 +01001970 ret = -ENODEV;
1971 printk(KERN_ERR "Spectra: device size not supported by this "
1972 "version of MTD.");
1973 goto failed_nand;
1974 }
1975
1976 nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
1977 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
1978 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
1979 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
1980 ioread32(denali->flash_reg + ACC_CLKS),
1981 ioread32(denali->flash_reg + RE_2_WE),
1982 ioread32(denali->flash_reg + WE_2_RE),
1983 ioread32(denali->flash_reg + ADDR_2_DATA),
1984 ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
1985 ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
1986 ioread32(denali->flash_reg + CS_SETUP_CNT));
1987
1988 denali->mtd.name = "Denali NAND";
1989 denali->mtd.owner = THIS_MODULE;
1990 denali->mtd.priv = &denali->nand;
1991
1992 /* register the driver with the NAND core subsystem */
1993 denali->nand.select_chip = denali_select_chip;
1994 denali->nand.cmdfunc = denali_cmdfunc;
1995 denali->nand.read_byte = denali_read_byte;
1996 denali->nand.waitfunc = denali_waitfunc;
1997
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001998 /* scan for NAND devices attached to the controller
Jason Robertsce082592010-05-13 15:57:33 +01001999 * this is the first stage in a two step process to register
Chuanxiao5bac3ac2010-08-05 23:06:04 +08002000 * with the nand subsystem */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08002001 if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) {
Jason Robertsce082592010-05-13 15:57:33 +01002002 ret = -ENXIO;
2003 goto failed_nand;
2004 }
Chuanxiao5bac3ac2010-08-05 23:06:04 +08002005
2006 /* second stage of the NAND scan
2007 * this stage requires information regarding ECC and
2008 * bad block management. */
Jason Robertsce082592010-05-13 15:57:33 +01002009
2010 /* Bad block management */
2011 denali->nand.bbt_td = &bbt_main_descr;
2012 denali->nand.bbt_md = &bbt_mirror_descr;
2013
2014 /* skip the scan for now until we have OOB read and write support */
2015 denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
2016 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
2017
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08002018 if (denali->dev_info.MLCDevice) {
Jason Robertsce082592010-05-13 15:57:33 +01002019 denali->nand.ecc.layout = &nand_oob_mlc_14bit;
2020 denali->nand.ecc.bytes = ECC_BYTES_MLC;
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08002021 } else {/* SLC */
Jason Robertsce082592010-05-13 15:57:33 +01002022 denali->nand.ecc.layout = &nand_oob_slc;
2023 denali->nand.ecc.bytes = ECC_BYTES_SLC;
2024 }
2025
Chuanxiao5bac3ac2010-08-05 23:06:04 +08002026 /* These functions are required by the NAND core framework, otherwise,
2027 * the NAND core will assert. However, we don't need them, so we'll stub
2028 * them out. */
Jason Robertsce082592010-05-13 15:57:33 +01002029 denali->nand.ecc.calculate = denali_ecc_calculate;
2030 denali->nand.ecc.correct = denali_ecc_correct;
2031 denali->nand.ecc.hwctl = denali_ecc_hwctl;
2032
2033 /* override the default read operations */
2034 denali->nand.ecc.size = denali->mtd.writesize;
2035 denali->nand.ecc.read_page = denali_read_page;
2036 denali->nand.ecc.read_page_raw = denali_read_page_raw;
2037 denali->nand.ecc.write_page = denali_write_page;
2038 denali->nand.ecc.write_page_raw = denali_write_page_raw;
2039 denali->nand.ecc.read_oob = denali_read_oob;
2040 denali->nand.ecc.write_oob = denali_write_oob;
2041 denali->nand.erase_cmd = denali_erase;
2042
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08002043 if (nand_scan_tail(&denali->mtd)) {
Jason Robertsce082592010-05-13 15:57:33 +01002044 ret = -ENXIO;
2045 goto failed_nand;
2046 }
2047
2048 ret = add_mtd_device(&denali->mtd);
2049 if (ret) {
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08002050 printk(KERN_ERR "Spectra: Failed to register"
2051 " MTD device: %d\n", ret);
Jason Robertsce082592010-05-13 15:57:33 +01002052 goto failed_nand;
2053 }
2054 return 0;
2055
2056 failed_nand:
2057 denali_irq_cleanup(dev->irq, denali);
2058 failed_request_irq:
2059 iounmap(denali->flash_reg);
2060 iounmap(denali->flash_mem);
2061 failed_remap_csr:
2062 pci_release_regions(dev);
2063 failed_req_csr:
Chuanxiao5bac3ac2010-08-05 23:06:04 +08002064 pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
Jason Robertsce082592010-05-13 15:57:33 +01002065 PCI_DMA_BIDIRECTIONAL);
2066 failed_enable:
2067 kfree(denali);
2068 return ret;
2069}
2070
2071/* driver exit point */
2072static void denali_pci_remove(struct pci_dev *dev)
2073{
2074 struct denali_nand_info *denali = pci_get_drvdata(dev);
2075
2076 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2077 __FILE__, __LINE__, __func__);
2078
2079 nand_release(&denali->mtd);
2080 del_mtd_device(&denali->mtd);
2081
2082 denali_irq_cleanup(dev->irq, denali);
2083
2084 iounmap(denali->flash_reg);
2085 iounmap(denali->flash_mem);
2086 pci_release_regions(dev);
2087 pci_disable_device(dev);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08002088 pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
Jason Robertsce082592010-05-13 15:57:33 +01002089 PCI_DMA_BIDIRECTIONAL);
2090 pci_set_drvdata(dev, NULL);
2091 kfree(denali);
2092}
2093
2094MODULE_DEVICE_TABLE(pci, denali_pci_ids);
2095
2096static struct pci_driver denali_pci_driver = {
2097 .name = DENALI_NAND_NAME,
2098 .id_table = denali_pci_ids,
2099 .probe = denali_pci_probe,
2100 .remove = denali_pci_remove,
2101};
2102
2103static int __devinit denali_init(void)
2104{
Chuanxiao Dongbdca6da2010-07-27 11:28:09 +08002105 printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n",
2106 __DATE__, __TIME__);
Jason Robertsce082592010-05-13 15:57:33 +01002107 return pci_register_driver(&denali_pci_driver);
2108}
2109
2110/* Free memory */
2111static void __devexit denali_exit(void)
2112{
2113 pci_unregister_driver(&denali_pci_driver);
2114}
2115
2116module_init(denali_init);
2117module_exit(denali_exit);