blob: f850a6516bc98e749908a4df709ab9bf7b703ef6 [file] [log] [blame]
Jason Robertsce082592010-05-13 15:57:33 +01001/*
2 * NAND Flash Controller Device Driver
3 * Copyright © 2009-2010, Intel Corporation and its suppliers.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20#include <linux/interrupt.h>
21#include <linux/delay.h>
22#include <linux/wait.h>
23#include <linux/mutex.h>
24#include <linux/pci.h>
25#include <linux/mtd/mtd.h>
26#include <linux/module.h>
27
28#include "denali.h"
29
30MODULE_LICENSE("GPL");
31
Chuanxiao5bac3ac2010-08-05 23:06:04 +080032/* We define a module parameter that allows the user to override
Jason Robertsce082592010-05-13 15:57:33 +010033 * the hardware and decide what timing mode should be used.
34 */
35#define NAND_DEFAULT_TIMINGS -1
36
37static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
38module_param(onfi_timing_mode, int, S_IRUGO);
39MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates"
40 " use default timings");
41
42#define DENALI_NAND_NAME "denali-nand"
43
44/* We define a macro here that combines all interrupts this driver uses into
45 * a single constant value, for convenience. */
46#define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \
47 INTR_STATUS0__ECC_TRANSACTION_DONE | \
48 INTR_STATUS0__ECC_ERR | \
49 INTR_STATUS0__PROGRAM_FAIL | \
50 INTR_STATUS0__LOAD_COMP | \
51 INTR_STATUS0__PROGRAM_COMP | \
52 INTR_STATUS0__TIME_OUT | \
53 INTR_STATUS0__ERASE_FAIL | \
54 INTR_STATUS0__RST_COMP | \
55 INTR_STATUS0__ERASE_COMP)
56
Chuanxiao5bac3ac2010-08-05 23:06:04 +080057/* indicates whether or not the internal value for the flash bank is
Jason Robertsce082592010-05-13 15:57:33 +010058 valid or not */
Chuanxiao5bac3ac2010-08-05 23:06:04 +080059#define CHIP_SELECT_INVALID -1
Jason Robertsce082592010-05-13 15:57:33 +010060
61#define SUPPORT_8BITECC 1
62
Chuanxiao5bac3ac2010-08-05 23:06:04 +080063/* This macro divides two integers and rounds fractional values up
Jason Robertsce082592010-05-13 15:57:33 +010064 * to the nearest integer value. */
65#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
66
67/* this macro allows us to convert from an MTD structure to our own
68 * device context (denali) structure.
69 */
70#define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
71
72/* These constants are defined by the driver to enable common driver
73 configuration options. */
74#define SPARE_ACCESS 0x41
75#define MAIN_ACCESS 0x42
76#define MAIN_SPARE_ACCESS 0x43
77
78#define DENALI_READ 0
79#define DENALI_WRITE 0x100
80
81/* types of device accesses. We can issue commands and get status */
82#define COMMAND_CYCLE 0
83#define ADDR_CYCLE 1
84#define STATUS_CYCLE 2
85
Chuanxiao5bac3ac2010-08-05 23:06:04 +080086/* this is a helper macro that allows us to
Jason Robertsce082592010-05-13 15:57:33 +010087 * format the bank into the proper bits for the controller */
88#define BANK(x) ((x) << 24)
89
90/* List of platforms this NAND controller has be integrated into */
91static const struct pci_device_id denali_pci_ids[] = {
92 { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
93 { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
94 { /* end: all zeroes */ }
95};
96
97
Chuanxiao5bac3ac2010-08-05 23:06:04 +080098/* these are static lookup tables that give us easy access to
99 registers in the NAND controller.
Jason Robertsce082592010-05-13 15:57:33 +0100100 */
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800101static const uint32_t intr_status_addresses[4] = {INTR_STATUS0,
102 INTR_STATUS1,
103 INTR_STATUS2,
Jason Robertsce082592010-05-13 15:57:33 +0100104 INTR_STATUS3};
105
106static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0,
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800107 DEVICE_RESET__BANK1,
108 DEVICE_RESET__BANK2,
109 DEVICE_RESET__BANK3};
Jason Robertsce082592010-05-13 15:57:33 +0100110
111static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT,
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800112 INTR_STATUS1__TIME_OUT,
113 INTR_STATUS2__TIME_OUT,
114 INTR_STATUS3__TIME_OUT};
Jason Robertsce082592010-05-13 15:57:33 +0100115
116static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP,
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800117 INTR_STATUS1__RST_COMP,
118 INTR_STATUS2__RST_COMP,
119 INTR_STATUS3__RST_COMP};
Jason Robertsce082592010-05-13 15:57:33 +0100120
121/* specifies the debug level of the driver */
122static int nand_debug_level = 0;
123
124/* forward declarations */
125static void clear_interrupts(struct denali_nand_info *denali);
126static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask);
127static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask);
128static uint32_t read_interrupt_status(struct denali_nand_info *denali);
129
130#define DEBUG_DENALI 0
131
132/* This is a wrapper for writing to the denali registers.
133 * this allows us to create debug information so we can
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800134 * observe how the driver is programming the device.
Jason Robertsce082592010-05-13 15:57:33 +0100135 * it uses standard linux convention for (val, addr) */
136static void denali_write32(uint32_t value, void *addr)
137{
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800138 iowrite32(value, addr);
Jason Robertsce082592010-05-13 15:57:33 +0100139
140#if DEBUG_DENALI
141 printk(KERN_ERR "wrote: 0x%x -> 0x%x\n", value, (uint32_t)((uint32_t)addr & 0x1fff));
142#endif
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800143}
Jason Robertsce082592010-05-13 15:57:33 +0100144
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800145/* Certain operations for the denali NAND controller use an indexed mode to read/write
146 data. The operation is performed by writing the address value of the command to
147 the device memory followed by the data. This function abstracts this common
148 operation.
Jason Robertsce082592010-05-13 15:57:33 +0100149*/
150static void index_addr(struct denali_nand_info *denali, uint32_t address, uint32_t data)
151{
152 denali_write32(address, denali->flash_mem);
153 denali_write32(data, denali->flash_mem + 0x10);
154}
155
156/* Perform an indexed read of the device */
157static void index_addr_read_data(struct denali_nand_info *denali,
158 uint32_t address, uint32_t *pdata)
159{
160 denali_write32(address, denali->flash_mem);
161 *pdata = ioread32(denali->flash_mem + 0x10);
162}
163
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800164/* We need to buffer some data for some of the NAND core routines.
Jason Robertsce082592010-05-13 15:57:33 +0100165 * The operations manage buffering that data. */
166static void reset_buf(struct denali_nand_info *denali)
167{
168 denali->buf.head = denali->buf.tail = 0;
169}
170
171static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
172{
173 BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf));
174 denali->buf.buf[denali->buf.tail++] = byte;
175}
176
177/* reads the status of the device */
178static void read_status(struct denali_nand_info *denali)
179{
180 uint32_t cmd = 0x0;
181
182 /* initialize the data buffer to store status */
183 reset_buf(denali);
184
185 /* initiate a device status read */
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800186 cmd = MODE_11 | BANK(denali->flash_bank);
Jason Robertsce082592010-05-13 15:57:33 +0100187 index_addr(denali, cmd | COMMAND_CYCLE, 0x70);
188 denali_write32(cmd | STATUS_CYCLE, denali->flash_mem);
189
190 /* update buffer with status value */
191 write_byte_to_buf(denali, ioread32(denali->flash_mem + 0x10));
192
193#if DEBUG_DENALI
194 printk("device reporting status value of 0x%2x\n", denali->buf.buf[0]);
195#endif
196}
197
198/* resets a specific device connected to the core */
199static void reset_bank(struct denali_nand_info *denali)
200{
201 uint32_t irq_status = 0;
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800202 uint32_t irq_mask = reset_complete[denali->flash_bank] |
Jason Robertsce082592010-05-13 15:57:33 +0100203 operation_timeout[denali->flash_bank];
204 int bank = 0;
205
206 clear_interrupts(denali);
207
208 bank = device_reset_banks[denali->flash_bank];
209 denali_write32(bank, denali->flash_reg + DEVICE_RESET);
210
211 irq_status = wait_for_irq(denali, irq_mask);
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800212
Jason Robertsce082592010-05-13 15:57:33 +0100213 if (irq_status & operation_timeout[denali->flash_bank])
Jason Robertsce082592010-05-13 15:57:33 +0100214 printk(KERN_ERR "reset bank failed.\n");
Jason Robertsce082592010-05-13 15:57:33 +0100215}
216
217/* Reset the flash controller */
218static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali)
219{
220 uint32_t i;
221
222 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
223 __FILE__, __LINE__, __func__);
224
225 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
226 denali_write32(reset_complete[i] | operation_timeout[i],
227 denali->flash_reg + intr_status_addresses[i]);
228
229 for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
230 denali_write32(device_reset_banks[i], denali->flash_reg + DEVICE_RESET);
231 while (!(ioread32(denali->flash_reg + intr_status_addresses[i]) &
232 (reset_complete[i] | operation_timeout[i])))
233 ;
234 if (ioread32(denali->flash_reg + intr_status_addresses[i]) &
235 operation_timeout[i])
236 nand_dbg_print(NAND_DBG_WARN,
237 "NAND Reset operation timed out on bank %d\n", i);
238 }
239
240 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
241 denali_write32(reset_complete[i] | operation_timeout[i],
242 denali->flash_reg + intr_status_addresses[i]);
243
244 return PASS;
245}
246
247/* this routine calculates the ONFI timing values for a given mode and programs
248 * the clocking register accordingly. The mode is determined by the get_onfi_nand_para
249 routine.
250 */
251static void NAND_ONFi_Timing_Mode(struct denali_nand_info *denali, uint16_t mode)
252{
253 uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
254 uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
255 uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
256 uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
257 uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
258 uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
259 uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
260 uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
261 uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
262 uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
263 uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
264 uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
265
266 uint16_t TclsRising = 1;
267 uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
268 uint16_t dv_window = 0;
269 uint16_t en_lo, en_hi;
270 uint16_t acc_clks;
271 uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
272
273 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
274 __FILE__, __LINE__, __func__);
275
276 en_lo = CEIL_DIV(Trp[mode], CLK_X);
277 en_hi = CEIL_DIV(Treh[mode], CLK_X);
278#if ONFI_BLOOM_TIME
279 if ((en_hi * CLK_X) < (Treh[mode] + 2))
280 en_hi++;
281#endif
282
283 if ((en_lo + en_hi) * CLK_X < Trc[mode])
284 en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
285
286 if ((en_lo + en_hi) < CLK_MULTI)
287 en_lo += CLK_MULTI - en_lo - en_hi;
288
289 while (dv_window < 8) {
290 data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
291
292 data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
293
294 data_invalid =
295 data_invalid_rhoh <
296 data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
297
298 dv_window = data_invalid - Trea[mode];
299
300 if (dv_window < 8)
301 en_lo++;
302 }
303
304 acc_clks = CEIL_DIV(Trea[mode], CLK_X);
305
306 while (((acc_clks * CLK_X) - Trea[mode]) < 3)
307 acc_clks++;
308
309 if ((data_invalid - acc_clks * CLK_X) < 2)
310 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
311 __FILE__, __LINE__);
312
313 addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
314 re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
315 re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
316 we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
317 cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
318 if (!TclsRising)
319 cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
320 if (cs_cnt == 0)
321 cs_cnt = 1;
322
323 if (Tcea[mode]) {
324 while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
325 cs_cnt++;
326 }
327
328#if MODE5_WORKAROUND
329 if (mode == 5)
330 acc_clks = 5;
331#endif
332
333 /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
334 if ((ioread32(denali->flash_reg + MANUFACTURER_ID) == 0) &&
335 (ioread32(denali->flash_reg + DEVICE_ID) == 0x88))
336 acc_clks = 6;
337
338 denali_write32(acc_clks, denali->flash_reg + ACC_CLKS);
339 denali_write32(re_2_we, denali->flash_reg + RE_2_WE);
340 denali_write32(re_2_re, denali->flash_reg + RE_2_RE);
341 denali_write32(we_2_re, denali->flash_reg + WE_2_RE);
342 denali_write32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
343 denali_write32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
344 denali_write32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
345 denali_write32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
346}
347
348/* configures the initial ECC settings for the controller */
349static void set_ecc_config(struct denali_nand_info *denali)
350{
351#if SUPPORT_8BITECC
352 if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
353 (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) <= 128))
354 denali_write32(8, denali->flash_reg + ECC_CORRECTION);
355#endif
356
357 if ((ioread32(denali->flash_reg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
358 == 1) {
359 denali->dev_info.wECCBytesPerSector = 4;
360 denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected;
361 denali->dev_info.wNumPageSpareFlag =
362 denali->dev_info.wPageSpareSize -
363 denali->dev_info.wPageDataSize /
364 (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
365 denali->dev_info.wECCBytesPerSector
366 - denali->dev_info.wSpareSkipBytes;
367 } else {
368 denali->dev_info.wECCBytesPerSector =
369 (ioread32(denali->flash_reg + ECC_CORRECTION) &
370 ECC_CORRECTION__VALUE) * 13 / 8;
371 if ((denali->dev_info.wECCBytesPerSector) % 2 == 0)
372 denali->dev_info.wECCBytesPerSector += 2;
373 else
374 denali->dev_info.wECCBytesPerSector += 1;
375
376 denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected;
377 denali->dev_info.wNumPageSpareFlag = denali->dev_info.wPageSpareSize -
378 denali->dev_info.wPageDataSize /
379 (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
380 denali->dev_info.wECCBytesPerSector
381 - denali->dev_info.wSpareSkipBytes;
382 }
383}
384
385/* queries the NAND device to see what ONFI modes it supports. */
386static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
387{
388 int i;
389 uint16_t blks_lun_l, blks_lun_h, n_of_luns;
390 uint32_t blockperlun, id;
391
392 denali_write32(DEVICE_RESET__BANK0, denali->flash_reg + DEVICE_RESET);
393
394 while (!((ioread32(denali->flash_reg + INTR_STATUS0) &
395 INTR_STATUS0__RST_COMP) |
396 (ioread32(denali->flash_reg + INTR_STATUS0) &
397 INTR_STATUS0__TIME_OUT)))
398 ;
399
400 if (ioread32(denali->flash_reg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
401 denali_write32(DEVICE_RESET__BANK1, denali->flash_reg + DEVICE_RESET);
402 while (!((ioread32(denali->flash_reg + INTR_STATUS1) &
403 INTR_STATUS1__RST_COMP) |
404 (ioread32(denali->flash_reg + INTR_STATUS1) &
405 INTR_STATUS1__TIME_OUT)))
406 ;
407
408 if (ioread32(denali->flash_reg + INTR_STATUS1) &
409 INTR_STATUS1__RST_COMP) {
410 denali_write32(DEVICE_RESET__BANK2,
411 denali->flash_reg + DEVICE_RESET);
412 while (!((ioread32(denali->flash_reg + INTR_STATUS2) &
413 INTR_STATUS2__RST_COMP) |
414 (ioread32(denali->flash_reg + INTR_STATUS2) &
415 INTR_STATUS2__TIME_OUT)))
416 ;
417
418 if (ioread32(denali->flash_reg + INTR_STATUS2) &
419 INTR_STATUS2__RST_COMP) {
420 denali_write32(DEVICE_RESET__BANK3,
421 denali->flash_reg + DEVICE_RESET);
422 while (!((ioread32(denali->flash_reg + INTR_STATUS3) &
423 INTR_STATUS3__RST_COMP) |
424 (ioread32(denali->flash_reg + INTR_STATUS3) &
425 INTR_STATUS3__TIME_OUT)))
426 ;
427 } else {
428 printk(KERN_ERR "Getting a time out for bank 2!\n");
429 }
430 } else {
431 printk(KERN_ERR "Getting a time out for bank 1!\n");
432 }
433 }
434
435 denali_write32(INTR_STATUS0__TIME_OUT, denali->flash_reg + INTR_STATUS0);
436 denali_write32(INTR_STATUS1__TIME_OUT, denali->flash_reg + INTR_STATUS1);
437 denali_write32(INTR_STATUS2__TIME_OUT, denali->flash_reg + INTR_STATUS2);
438 denali_write32(INTR_STATUS3__TIME_OUT, denali->flash_reg + INTR_STATUS3);
439
440 denali->dev_info.wONFIDevFeatures =
441 ioread32(denali->flash_reg + ONFI_DEVICE_FEATURES);
442 denali->dev_info.wONFIOptCommands =
443 ioread32(denali->flash_reg + ONFI_OPTIONAL_COMMANDS);
444 denali->dev_info.wONFITimingMode =
445 ioread32(denali->flash_reg + ONFI_TIMING_MODE);
446 denali->dev_info.wONFIPgmCacheTimingMode =
447 ioread32(denali->flash_reg + ONFI_PGM_CACHE_TIMING_MODE);
448
449 n_of_luns = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
450 ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
451 blks_lun_l = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
452 blks_lun_h = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
453
454 blockperlun = (blks_lun_h << 16) | blks_lun_l;
455
456 denali->dev_info.wTotalBlocks = n_of_luns * blockperlun;
457
458 if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
459 ONFI_TIMING_MODE__VALUE))
460 return FAIL;
461
462 for (i = 5; i > 0; i--) {
463 if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) & (0x01 << i))
464 break;
465 }
466
467 NAND_ONFi_Timing_Mode(denali, i);
468
469 index_addr(denali, MODE_11 | 0, 0x90);
470 index_addr(denali, MODE_11 | 1, 0);
471
472 for (i = 0; i < 3; i++)
473 index_addr_read_data(denali, MODE_11 | 2, &id);
474
475 nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
476
477 denali->dev_info.MLCDevice = id & 0x0C;
478
479 /* By now, all the ONFI devices we know support the page cache */
480 /* rw feature. So here we enable the pipeline_rw_ahead feature */
481 /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
482 /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */
483
484 return PASS;
485}
486
487static void get_samsung_nand_para(struct denali_nand_info *denali)
488{
489 uint8_t no_of_planes;
490 uint32_t blk_size;
491 uint64_t plane_size, capacity;
492 uint32_t id_bytes[5];
493 int i;
494
495 index_addr(denali, (uint32_t)(MODE_11 | 0), 0x90);
496 index_addr(denali, (uint32_t)(MODE_11 | 1), 0);
497 for (i = 0; i < 5; i++)
498 index_addr_read_data(denali, (uint32_t)(MODE_11 | 2), &id_bytes[i]);
499
500 nand_dbg_print(NAND_DBG_DEBUG,
501 "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
502 id_bytes[0], id_bytes[1], id_bytes[2],
503 id_bytes[3], id_bytes[4]);
504
505 if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
506 /* Set timing register values according to datasheet */
507 denali_write32(5, denali->flash_reg + ACC_CLKS);
508 denali_write32(20, denali->flash_reg + RE_2_WE);
509 denali_write32(12, denali->flash_reg + WE_2_RE);
510 denali_write32(14, denali->flash_reg + ADDR_2_DATA);
511 denali_write32(3, denali->flash_reg + RDWR_EN_LO_CNT);
512 denali_write32(2, denali->flash_reg + RDWR_EN_HI_CNT);
513 denali_write32(2, denali->flash_reg + CS_SETUP_CNT);
514 }
515
516 no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
517 plane_size = (uint64_t)64 << ((id_bytes[4] & 0x70) >> 4);
518 blk_size = 64 << ((ioread32(denali->flash_reg + DEVICE_PARAM_1) & 0x30) >> 4);
519 capacity = (uint64_t)128 * plane_size * no_of_planes;
520
521 do_div(capacity, blk_size);
522 denali->dev_info.wTotalBlocks = capacity;
523}
524
525static void get_toshiba_nand_para(struct denali_nand_info *denali)
526{
527 void __iomem *scratch_reg;
528 uint32_t tmp;
529
530 /* Workaround to fix a controller bug which reports a wrong */
531 /* spare area size for some kind of Toshiba NAND device */
532 if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
533 (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
534 denali_write32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
535 tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
536 ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
537 denali_write32(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
538#if SUPPORT_15BITECC
539 denali_write32(15, denali->flash_reg + ECC_CORRECTION);
540#elif SUPPORT_8BITECC
541 denali_write32(8, denali->flash_reg + ECC_CORRECTION);
542#endif
543 }
544
545 /* As Toshiba NAND can not provide it's block number, */
546 /* so here we need user to provide the correct block */
547 /* number in a scratch register before the Linux NAND */
548 /* driver is loaded. If no valid value found in the scratch */
549 /* register, then we use default block number value */
550 scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
551 if (!scratch_reg) {
552 printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
553 __FILE__, __LINE__);
554 denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
555 } else {
556 nand_dbg_print(NAND_DBG_WARN,
557 "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
558 denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg);
559 if (denali->dev_info.wTotalBlocks < 512)
560 denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
561 iounmap(scratch_reg);
562 }
563}
564
565static void get_hynix_nand_para(struct denali_nand_info *denali)
566{
567 void __iomem *scratch_reg;
568 uint32_t main_size, spare_size;
569
570 switch (denali->dev_info.wDeviceID) {
571 case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
572 case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
573 denali_write32(128, denali->flash_reg + PAGES_PER_BLOCK);
574 denali_write32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
575 denali_write32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
576 main_size = 4096 * ioread32(denali->flash_reg + DEVICES_CONNECTED);
577 spare_size = 224 * ioread32(denali->flash_reg + DEVICES_CONNECTED);
578 denali_write32(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
579 denali_write32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
580 denali_write32(0, denali->flash_reg + DEVICE_WIDTH);
581#if SUPPORT_15BITECC
582 denali_write32(15, denali->flash_reg + ECC_CORRECTION);
583#elif SUPPORT_8BITECC
584 denali_write32(8, denali->flash_reg + ECC_CORRECTION);
585#endif
586 denali->dev_info.MLCDevice = 1;
587 break;
588 default:
589 nand_dbg_print(NAND_DBG_WARN,
590 "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
591 "Will use default parameter values instead.\n",
592 denali->dev_info.wDeviceID);
593 }
594
595 scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
596 if (!scratch_reg) {
597 printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
598 __FILE__, __LINE__);
599 denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
600 } else {
601 nand_dbg_print(NAND_DBG_WARN,
602 "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
603 denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg);
604 if (denali->dev_info.wTotalBlocks < 512)
605 denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
606 iounmap(scratch_reg);
607 }
608}
609
610/* determines how many NAND chips are connected to the controller. Note for
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800611 Intel CE4100 devices we don't support more than one device.
Jason Robertsce082592010-05-13 15:57:33 +0100612 */
613static void find_valid_banks(struct denali_nand_info *denali)
614{
615 uint32_t id[LLD_MAX_FLASH_BANKS];
616 int i;
617
618 denali->total_used_banks = 1;
619 for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
620 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
621 index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
622 index_addr_read_data(denali, (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
623
624 nand_dbg_print(NAND_DBG_DEBUG,
625 "Return 1st ID for bank[%d]: %x\n", i, id[i]);
626
627 if (i == 0) {
628 if (!(id[i] & 0x0ff))
629 break; /* WTF? */
630 } else {
631 if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
632 denali->total_used_banks++;
633 else
634 break;
635 }
636 }
637
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800638 if (denali->platform == INTEL_CE4100) {
Jason Robertsce082592010-05-13 15:57:33 +0100639 /* Platform limitations of the CE4100 device limit
640 * users to a single chip solution for NAND.
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800641 * Multichip support is not enabled.
642 */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +0800643 if (denali->total_used_banks != 1) {
Jason Robertsce082592010-05-13 15:57:33 +0100644 printk(KERN_ERR "Sorry, Intel CE4100 only supports "
645 "a single NAND device.\n");
646 BUG();
647 }
648 }
649 nand_dbg_print(NAND_DBG_DEBUG,
650 "denali->total_used_banks: %d\n", denali->total_used_banks);
651}
652
653static void detect_partition_feature(struct denali_nand_info *denali)
654{
655 if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
656 if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) &
657 PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
658 denali->dev_info.wSpectraStartBlock =
659 ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
660 MIN_MAX_BANK_1__MIN_VALUE) *
661 denali->dev_info.wTotalBlocks)
662 +
663 (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) &
664 MIN_BLK_ADDR_1__VALUE);
665
666 denali->dev_info.wSpectraEndBlock =
667 (((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
668 MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
669 denali->dev_info.wTotalBlocks)
670 +
671 (ioread32(denali->flash_reg + MAX_BLK_ADDR_1) &
672 MAX_BLK_ADDR_1__VALUE);
673
674 denali->dev_info.wTotalBlocks *= denali->total_used_banks;
675
676 if (denali->dev_info.wSpectraEndBlock >=
677 denali->dev_info.wTotalBlocks) {
678 denali->dev_info.wSpectraEndBlock =
679 denali->dev_info.wTotalBlocks - 1;
680 }
681
682 denali->dev_info.wDataBlockNum =
683 denali->dev_info.wSpectraEndBlock -
684 denali->dev_info.wSpectraStartBlock + 1;
685 } else {
686 denali->dev_info.wTotalBlocks *= denali->total_used_banks;
687 denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK;
688 denali->dev_info.wSpectraEndBlock =
689 denali->dev_info.wTotalBlocks - 1;
690 denali->dev_info.wDataBlockNum =
691 denali->dev_info.wSpectraEndBlock -
692 denali->dev_info.wSpectraStartBlock + 1;
693 }
694 } else {
695 denali->dev_info.wTotalBlocks *= denali->total_used_banks;
696 denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK;
697 denali->dev_info.wSpectraEndBlock = denali->dev_info.wTotalBlocks - 1;
698 denali->dev_info.wDataBlockNum =
699 denali->dev_info.wSpectraEndBlock -
700 denali->dev_info.wSpectraStartBlock + 1;
701 }
702}
703
704static void dump_device_info(struct denali_nand_info *denali)
705{
706 nand_dbg_print(NAND_DBG_DEBUG, "denali->dev_info:\n");
707 nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
708 denali->dev_info.wDeviceMaker);
709 nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
710 denali->dev_info.wDeviceID);
711 nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
712 denali->dev_info.wDeviceType);
713 nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
714 denali->dev_info.wSpectraStartBlock);
715 nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
716 denali->dev_info.wSpectraEndBlock);
717 nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
718 denali->dev_info.wTotalBlocks);
719 nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
720 denali->dev_info.wPagesPerBlock);
721 nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
722 denali->dev_info.wPageSize);
723 nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
724 denali->dev_info.wPageDataSize);
725 nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
726 denali->dev_info.wPageSpareSize);
727 nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
728 denali->dev_info.wNumPageSpareFlag);
729 nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
730 denali->dev_info.wECCBytesPerSector);
731 nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
732 denali->dev_info.wBlockSize);
733 nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
734 denali->dev_info.wBlockDataSize);
735 nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
736 denali->dev_info.wDataBlockNum);
737 nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
738 denali->dev_info.bPlaneNum);
739 nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
740 denali->dev_info.wDeviceMainAreaSize);
741 nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
742 denali->dev_info.wDeviceSpareAreaSize);
743 nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
744 denali->dev_info.wDevicesConnected);
745 nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
746 denali->dev_info.wDeviceWidth);
747 nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
748 denali->dev_info.wHWRevision);
749 nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
750 denali->dev_info.wHWFeatures);
751 nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
752 denali->dev_info.wONFIDevFeatures);
753 nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
754 denali->dev_info.wONFIOptCommands);
755 nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
756 denali->dev_info.wONFITimingMode);
757 nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
758 denali->dev_info.wONFIPgmCacheTimingMode);
759 nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
760 denali->dev_info.MLCDevice ? "Yes" : "No");
761 nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
762 denali->dev_info.wSpareSkipBytes);
763 nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
764 denali->dev_info.nBitsInPageNumber);
765 nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
766 denali->dev_info.nBitsInPageDataSize);
767 nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
768 denali->dev_info.nBitsInBlockDataSize);
769}
770
771static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali)
772{
773 uint16_t status = PASS;
774 uint8_t no_of_planes;
775
776 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
777 __FILE__, __LINE__, __func__);
778
779 denali->dev_info.wDeviceMaker = ioread32(denali->flash_reg + MANUFACTURER_ID);
780 denali->dev_info.wDeviceID = ioread32(denali->flash_reg + DEVICE_ID);
781 denali->dev_info.bDeviceParam0 = ioread32(denali->flash_reg + DEVICE_PARAM_0);
782 denali->dev_info.bDeviceParam1 = ioread32(denali->flash_reg + DEVICE_PARAM_1);
783 denali->dev_info.bDeviceParam2 = ioread32(denali->flash_reg + DEVICE_PARAM_2);
784
785 denali->dev_info.MLCDevice = ioread32(denali->flash_reg + DEVICE_PARAM_0) & 0x0c;
786
787 if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
788 ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
789 if (FAIL == get_onfi_nand_para(denali))
790 return FAIL;
791 } else if (denali->dev_info.wDeviceMaker == 0xEC) { /* Samsung NAND */
792 get_samsung_nand_para(denali);
793 } else if (denali->dev_info.wDeviceMaker == 0x98) { /* Toshiba NAND */
794 get_toshiba_nand_para(denali);
795 } else if (denali->dev_info.wDeviceMaker == 0xAD) { /* Hynix NAND */
796 get_hynix_nand_para(denali);
797 } else {
798 denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
799 }
800
801 nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
802 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
803 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
804 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
805 ioread32(denali->flash_reg + ACC_CLKS),
806 ioread32(denali->flash_reg + RE_2_WE),
807 ioread32(denali->flash_reg + WE_2_RE),
808 ioread32(denali->flash_reg + ADDR_2_DATA),
809 ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
810 ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
811 ioread32(denali->flash_reg + CS_SETUP_CNT));
812
813 denali->dev_info.wHWRevision = ioread32(denali->flash_reg + REVISION);
814 denali->dev_info.wHWFeatures = ioread32(denali->flash_reg + FEATURES);
815
816 denali->dev_info.wDeviceMainAreaSize =
817 ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
818 denali->dev_info.wDeviceSpareAreaSize =
819 ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
820
821 denali->dev_info.wPageDataSize =
822 ioread32(denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
823
824 /* Note: When using the Micon 4K NAND device, the controller will report
825 * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
826 * And if force set it to 218 bytes, the controller can not work
827 * correctly. So just let it be. But keep in mind that this bug may
828 * cause
829 * other problems in future. - Yunpeng 2008-10-10
830 */
831 denali->dev_info.wPageSpareSize =
832 ioread32(denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
833
834 denali->dev_info.wPagesPerBlock = ioread32(denali->flash_reg + PAGES_PER_BLOCK);
835
836 denali->dev_info.wPageSize =
837 denali->dev_info.wPageDataSize + denali->dev_info.wPageSpareSize;
838 denali->dev_info.wBlockSize =
839 denali->dev_info.wPageSize * denali->dev_info.wPagesPerBlock;
840 denali->dev_info.wBlockDataSize =
841 denali->dev_info.wPagesPerBlock * denali->dev_info.wPageDataSize;
842
843 denali->dev_info.wDeviceWidth = ioread32(denali->flash_reg + DEVICE_WIDTH);
844 denali->dev_info.wDeviceType =
845 ((ioread32(denali->flash_reg + DEVICE_WIDTH) > 0) ? 16 : 8);
846
847 denali->dev_info.wDevicesConnected = ioread32(denali->flash_reg + DEVICES_CONNECTED);
848
849 denali->dev_info.wSpareSkipBytes =
850 ioread32(denali->flash_reg + SPARE_AREA_SKIP_BYTES) *
851 denali->dev_info.wDevicesConnected;
852
853 denali->dev_info.nBitsInPageNumber =
854 ilog2(denali->dev_info.wPagesPerBlock);
855 denali->dev_info.nBitsInPageDataSize =
856 ilog2(denali->dev_info.wPageDataSize);
857 denali->dev_info.nBitsInBlockDataSize =
858 ilog2(denali->dev_info.wBlockDataSize);
859
860 set_ecc_config(denali);
861
862 no_of_planes = ioread32(denali->flash_reg + NUMBER_OF_PLANES) &
863 NUMBER_OF_PLANES__VALUE;
864
865 switch (no_of_planes) {
866 case 0:
867 case 1:
868 case 3:
869 case 7:
870 denali->dev_info.bPlaneNum = no_of_planes + 1;
871 break;
872 default:
873 status = FAIL;
874 break;
875 }
876
877 find_valid_banks(denali);
878
879 detect_partition_feature(denali);
880
881 dump_device_info(denali);
882
883 /* If the user specified to override the default timings
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800884 * with a specific ONFI mode, we apply those changes here.
Jason Robertsce082592010-05-13 15:57:33 +0100885 */
886 if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
Jason Robertsce082592010-05-13 15:57:33 +0100887 NAND_ONFi_Timing_Mode(denali, onfi_timing_mode);
Jason Robertsce082592010-05-13 15:57:33 +0100888
889 return status;
890}
891
892static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali,
893 uint16_t INT_ENABLE)
894{
895 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
896 __FILE__, __LINE__, __func__);
897
898 if (INT_ENABLE)
899 denali_write32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
900 else
901 denali_write32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
902}
903
904/* validation function to verify that the controlling software is making
905 a valid request
906 */
907static inline bool is_flash_bank_valid(int flash_bank)
908{
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800909 return (flash_bank >= 0 && flash_bank < 4);
Jason Robertsce082592010-05-13 15:57:33 +0100910}
911
912static void denali_irq_init(struct denali_nand_info *denali)
913{
914 uint32_t int_mask = 0;
915
916 /* Disable global interrupts */
917 NAND_LLD_Enable_Disable_Interrupts(denali, false);
918
919 int_mask = DENALI_IRQ_ALL;
920
921 /* Clear all status bits */
922 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS0);
923 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS1);
924 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS2);
925 denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS3);
926
927 denali_irq_enable(denali, int_mask);
928}
929
930static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
931{
932 NAND_LLD_Enable_Disable_Interrupts(denali, false);
933 free_irq(irqnum, denali);
934}
935
936static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask)
937{
938 denali_write32(int_mask, denali->flash_reg + INTR_EN0);
939 denali_write32(int_mask, denali->flash_reg + INTR_EN1);
940 denali_write32(int_mask, denali->flash_reg + INTR_EN2);
941 denali_write32(int_mask, denali->flash_reg + INTR_EN3);
942}
943
944/* This function only returns when an interrupt that this driver cares about
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800945 * occurs. This is to reduce the overhead of servicing interrupts
Jason Robertsce082592010-05-13 15:57:33 +0100946 */
947static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
948{
949 return (read_interrupt_status(denali) & DENALI_IRQ_ALL);
950}
951
952/* Interrupts are cleared by writing a 1 to the appropriate status bit */
953static inline void clear_interrupt(struct denali_nand_info *denali, uint32_t irq_mask)
954{
955 uint32_t intr_status_reg = 0;
956
957 intr_status_reg = intr_status_addresses[denali->flash_bank];
958
959 denali_write32(irq_mask, denali->flash_reg + intr_status_reg);
960}
961
962static void clear_interrupts(struct denali_nand_info *denali)
963{
964 uint32_t status = 0x0;
965 spin_lock_irq(&denali->irq_lock);
966
967 status = read_interrupt_status(denali);
968
969#if DEBUG_DENALI
970 denali->irq_debug_array[denali->idx++] = 0x30000000 | status;
971 denali->idx %= 32;
972#endif
973
974 denali->irq_status = 0x0;
975 spin_unlock_irq(&denali->irq_lock);
976}
977
978static uint32_t read_interrupt_status(struct denali_nand_info *denali)
979{
980 uint32_t intr_status_reg = 0;
981
982 intr_status_reg = intr_status_addresses[denali->flash_bank];
983
984 return ioread32(denali->flash_reg + intr_status_reg);
985}
986
987#if DEBUG_DENALI
988static void print_irq_log(struct denali_nand_info *denali)
989{
990 int i = 0;
991
992 printk("ISR debug log index = %X\n", denali->idx);
993 for (i = 0; i < 32; i++)
Jason Robertsce082592010-05-13 15:57:33 +0100994 printk("%08X: %08X\n", i, denali->irq_debug_array[i]);
Jason Robertsce082592010-05-13 15:57:33 +0100995}
996#endif
997
Chuanxiao5bac3ac2010-08-05 23:06:04 +0800998/* This is the interrupt service routine. It handles all interrupts
999 * sent to this device. Note that on CE4100, this is a shared
1000 * interrupt.
Jason Robertsce082592010-05-13 15:57:33 +01001001 */
1002static irqreturn_t denali_isr(int irq, void *dev_id)
1003{
1004 struct denali_nand_info *denali = dev_id;
1005 uint32_t irq_status = 0x0;
1006 irqreturn_t result = IRQ_NONE;
1007
1008 spin_lock(&denali->irq_lock);
1009
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001010 /* check to see if a valid NAND chip has
1011 * been selected.
Jason Robertsce082592010-05-13 15:57:33 +01001012 */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001013 if (is_flash_bank_valid(denali->flash_bank)) {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001014 /* check to see if controller generated
Jason Robertsce082592010-05-13 15:57:33 +01001015 * the interrupt, since this is a shared interrupt */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001016 if ((irq_status = denali_irq_detected(denali)) != 0) {
Jason Robertsce082592010-05-13 15:57:33 +01001017#if DEBUG_DENALI
1018 denali->irq_debug_array[denali->idx++] = 0x10000000 | irq_status;
1019 denali->idx %= 32;
1020
1021 printk("IRQ status = 0x%04x\n", irq_status);
1022#endif
1023 /* handle interrupt */
1024 /* first acknowledge it */
1025 clear_interrupt(denali, irq_status);
1026 /* store the status in the device context for someone
1027 to read */
1028 denali->irq_status |= irq_status;
1029 /* notify anyone who cares that it happened */
1030 complete(&denali->complete);
1031 /* tell the OS that we've handled this */
1032 result = IRQ_HANDLED;
1033 }
1034 }
1035 spin_unlock(&denali->irq_lock);
1036 return result;
1037}
1038#define BANK(x) ((x) << 24)
1039
1040static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
1041{
1042 unsigned long comp_res = 0;
1043 uint32_t intr_status = 0;
1044 bool retry = false;
1045 unsigned long timeout = msecs_to_jiffies(1000);
1046
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001047 do {
Jason Robertsce082592010-05-13 15:57:33 +01001048#if DEBUG_DENALI
1049 printk("waiting for 0x%x\n", irq_mask);
1050#endif
1051 comp_res = wait_for_completion_timeout(&denali->complete, timeout);
1052 spin_lock_irq(&denali->irq_lock);
1053 intr_status = denali->irq_status;
1054
1055#if DEBUG_DENALI
1056 denali->irq_debug_array[denali->idx++] = 0x20000000 | (irq_mask << 16) | intr_status;
1057 denali->idx %= 32;
1058#endif
1059
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001060 if (intr_status & irq_mask) {
Jason Robertsce082592010-05-13 15:57:33 +01001061 denali->irq_status &= ~irq_mask;
1062 spin_unlock_irq(&denali->irq_lock);
1063#if DEBUG_DENALI
1064 if (retry) printk("status on retry = 0x%x\n", intr_status);
1065#endif
1066 /* our interrupt was detected */
1067 break;
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001068 } else {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001069 /* these are not the interrupts you are looking for -
1070 * need to wait again */
Jason Robertsce082592010-05-13 15:57:33 +01001071 spin_unlock_irq(&denali->irq_lock);
1072#if DEBUG_DENALI
1073 print_irq_log(denali);
1074 printk("received irq nobody cared: irq_status = 0x%x,"
1075 " irq_mask = 0x%x, timeout = %ld\n", intr_status, irq_mask, comp_res);
1076#endif
1077 retry = true;
1078 }
1079 } while (comp_res != 0);
1080
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001081 if (comp_res == 0) {
Jason Robertsce082592010-05-13 15:57:33 +01001082 /* timeout */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001083 printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
1084 intr_status, irq_mask);
Jason Robertsce082592010-05-13 15:57:33 +01001085
1086 intr_status = 0;
1087 }
1088 return intr_status;
1089}
1090
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001091/* This helper function setups the registers for ECC and whether or not
Jason Robertsce082592010-05-13 15:57:33 +01001092 the spare area will be transfered. */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001093static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
Jason Robertsce082592010-05-13 15:57:33 +01001094 bool transfer_spare)
1095{
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001096 int ecc_en_flag = 0, transfer_spare_flag = 0;
Jason Robertsce082592010-05-13 15:57:33 +01001097
1098 /* set ECC, transfer spare bits if needed */
1099 ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
1100 transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
1101
1102 /* Enable spare area/ECC per user's request. */
1103 denali_write32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
1104 denali_write32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
1105}
1106
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001107/* sends a pipeline command operation to the controller. See the Denali NAND
1108 controller's user guide for more information (section 4.2.3.6).
Jason Robertsce082592010-05-13 15:57:33 +01001109 */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001110static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en,
1111 bool transfer_spare, int access_type,
Jason Robertsce082592010-05-13 15:57:33 +01001112 int op)
1113{
1114 int status = PASS;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001115 uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0,
Jason Robertsce082592010-05-13 15:57:33 +01001116 irq_mask = 0;
1117
1118 if (op == DENALI_READ) irq_mask = INTR_STATUS0__LOAD_COMP;
1119 else if (op == DENALI_WRITE) irq_mask = 0;
1120 else BUG();
1121
1122 setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
1123
1124#if DEBUG_DENALI
1125 spin_lock_irq(&denali->irq_lock);
1126 denali->irq_debug_array[denali->idx++] = 0x40000000 | ioread32(denali->flash_reg + ECC_ENABLE) | (access_type << 4);
1127 denali->idx %= 32;
1128 spin_unlock_irq(&denali->irq_lock);
1129#endif
1130
1131
1132 /* clear interrupts */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001133 clear_interrupts(denali);
Jason Robertsce082592010-05-13 15:57:33 +01001134
1135 addr = BANK(denali->flash_bank) | denali->page;
1136
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001137 if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001138 cmd = MODE_01 | addr;
Jason Robertsce082592010-05-13 15:57:33 +01001139 denali_write32(cmd, denali->flash_mem);
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001140 } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
Jason Robertsce082592010-05-13 15:57:33 +01001141 /* read spare area */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001142 cmd = MODE_10 | addr;
Jason Robertsce082592010-05-13 15:57:33 +01001143 index_addr(denali, (uint32_t)cmd, access_type);
1144
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001145 cmd = MODE_01 | addr;
Jason Robertsce082592010-05-13 15:57:33 +01001146 denali_write32(cmd, denali->flash_mem);
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001147 } else if (op == DENALI_READ) {
Jason Robertsce082592010-05-13 15:57:33 +01001148 /* setup page read request for access type */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001149 cmd = MODE_10 | addr;
Jason Robertsce082592010-05-13 15:57:33 +01001150 index_addr(denali, (uint32_t)cmd, access_type);
1151
1152 /* page 33 of the NAND controller spec indicates we should not
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001153 use the pipeline commands in Spare area only mode. So we
Jason Robertsce082592010-05-13 15:57:33 +01001154 don't.
1155 */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001156 if (access_type == SPARE_ACCESS) {
Jason Robertsce082592010-05-13 15:57:33 +01001157 cmd = MODE_01 | addr;
1158 denali_write32(cmd, denali->flash_mem);
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001159 } else {
Jason Robertsce082592010-05-13 15:57:33 +01001160 index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001161
1162 /* wait for command to be accepted
Jason Robertsce082592010-05-13 15:57:33 +01001163 * can always use status0 bit as the mask is identical for each
1164 * bank. */
1165 irq_status = wait_for_irq(denali, irq_mask);
1166
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001167 if (irq_status == 0) {
Jason Robertsce082592010-05-13 15:57:33 +01001168 printk(KERN_ERR "cmd, page, addr on timeout "
1169 "(0x%x, 0x%x, 0x%x)\n", cmd, denali->page, addr);
1170 status = FAIL;
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001171 } else {
Jason Robertsce082592010-05-13 15:57:33 +01001172 cmd = MODE_01 | addr;
1173 denali_write32(cmd, denali->flash_mem);
1174 }
1175 }
1176 }
1177 return status;
1178}
1179
1180/* helper function that simply writes a buffer to the flash */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001181static int write_data_to_flash_mem(struct denali_nand_info *denali, const uint8_t *buf,
1182 int len)
Jason Robertsce082592010-05-13 15:57:33 +01001183{
1184 uint32_t i = 0, *buf32;
1185
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001186 /* verify that the len is a multiple of 4. see comment in
1187 * read_data_from_flash_mem() */
Jason Robertsce082592010-05-13 15:57:33 +01001188 BUG_ON((len % 4) != 0);
1189
1190 /* write the data to the flash memory */
1191 buf32 = (uint32_t *)buf;
1192 for (i = 0; i < len / 4; i++)
Jason Robertsce082592010-05-13 15:57:33 +01001193 denali_write32(*buf32++, denali->flash_mem + 0x10);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001194 return i*4; /* intent is to return the number of bytes read */
Jason Robertsce082592010-05-13 15:57:33 +01001195}
1196
1197/* helper function that simply reads a buffer from the flash */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001198static int read_data_from_flash_mem(struct denali_nand_info *denali, uint8_t *buf,
Jason Robertsce082592010-05-13 15:57:33 +01001199 int len)
1200{
1201 uint32_t i = 0, *buf32;
1202
1203 /* we assume that len will be a multiple of 4, if not
1204 * it would be nice to know about it ASAP rather than
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001205 * have random failures...
1206 * This assumption is based on the fact that this
1207 * function is designed to be used to read flash pages,
Jason Robertsce082592010-05-13 15:57:33 +01001208 * which are typically multiples of 4...
1209 */
1210
1211 BUG_ON((len % 4) != 0);
1212
1213 /* transfer the data from the flash */
1214 buf32 = (uint32_t *)buf;
1215 for (i = 0; i < len / 4; i++)
Jason Robertsce082592010-05-13 15:57:33 +01001216 *buf32++ = ioread32(denali->flash_mem + 0x10);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001217 return i*4; /* intent is to return the number of bytes read */
Jason Robertsce082592010-05-13 15:57:33 +01001218}
1219
1220/* writes OOB data to the device */
1221static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
1222{
1223 struct denali_nand_info *denali = mtd_to_denali(mtd);
1224 uint32_t irq_status = 0;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001225 uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP |
Jason Robertsce082592010-05-13 15:57:33 +01001226 INTR_STATUS0__PROGRAM_FAIL;
1227 int status = 0;
1228
1229 denali->page = page;
1230
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001231 if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001232 DENALI_WRITE) == PASS) {
Jason Robertsce082592010-05-13 15:57:33 +01001233 write_data_to_flash_mem(denali, buf, mtd->oobsize);
1234
1235#if DEBUG_DENALI
1236 spin_lock_irq(&denali->irq_lock);
1237 denali->irq_debug_array[denali->idx++] = 0x80000000 | mtd->oobsize;
1238 denali->idx %= 32;
1239 spin_unlock_irq(&denali->irq_lock);
1240#endif
1241
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001242
Jason Robertsce082592010-05-13 15:57:33 +01001243 /* wait for operation to complete */
1244 irq_status = wait_for_irq(denali, irq_mask);
1245
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001246 if (irq_status == 0) {
Jason Robertsce082592010-05-13 15:57:33 +01001247 printk(KERN_ERR "OOB write failed\n");
1248 status = -EIO;
1249 }
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001250 } else {
Jason Robertsce082592010-05-13 15:57:33 +01001251 printk(KERN_ERR "unable to send pipeline command\n");
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001252 status = -EIO;
Jason Robertsce082592010-05-13 15:57:33 +01001253 }
1254 return status;
1255}
1256
1257/* reads OOB data from the device */
1258static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
1259{
1260 struct denali_nand_info *denali = mtd_to_denali(mtd);
1261 uint32_t irq_mask = INTR_STATUS0__LOAD_COMP, irq_status = 0, addr = 0x0, cmd = 0x0;
1262
1263 denali->page = page;
1264
1265#if DEBUG_DENALI
1266 printk("read_oob %d\n", page);
1267#endif
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001268 if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001269 DENALI_READ) == PASS) {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001270 read_data_from_flash_mem(denali, buf, mtd->oobsize);
Jason Robertsce082592010-05-13 15:57:33 +01001271
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001272 /* wait for command to be accepted
Jason Robertsce082592010-05-13 15:57:33 +01001273 * can always use status0 bit as the mask is identical for each
1274 * bank. */
1275 irq_status = wait_for_irq(denali, irq_mask);
1276
1277 if (irq_status == 0)
Jason Robertsce082592010-05-13 15:57:33 +01001278 printk(KERN_ERR "page on OOB timeout %d\n", denali->page);
Jason Robertsce082592010-05-13 15:57:33 +01001279
1280 /* We set the device back to MAIN_ACCESS here as I observed
1281 * instability with the controller if you do a block erase
1282 * and the last transaction was a SPARE_ACCESS. Block erase
1283 * is reliable (according to the MTD test infrastructure)
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001284 * if you are in MAIN_ACCESS.
Jason Robertsce082592010-05-13 15:57:33 +01001285 */
1286 addr = BANK(denali->flash_bank) | denali->page;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001287 cmd = MODE_10 | addr;
Jason Robertsce082592010-05-13 15:57:33 +01001288 index_addr(denali, (uint32_t)cmd, MAIN_ACCESS);
1289
1290#if DEBUG_DENALI
1291 spin_lock_irq(&denali->irq_lock);
1292 denali->irq_debug_array[denali->idx++] = 0x60000000 | mtd->oobsize;
1293 denali->idx %= 32;
1294 spin_unlock_irq(&denali->irq_lock);
1295#endif
1296 }
1297}
1298
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001299/* this function examines buffers to see if they contain data that
Jason Robertsce082592010-05-13 15:57:33 +01001300 * indicate that the buffer is part of an erased region of flash.
1301 */
1302bool is_erased(uint8_t *buf, int len)
1303{
1304 int i = 0;
1305 for (i = 0; i < len; i++)
Jason Robertsce082592010-05-13 15:57:33 +01001306 if (buf[i] != 0xFF)
Jason Robertsce082592010-05-13 15:57:33 +01001307 return false;
Jason Robertsce082592010-05-13 15:57:33 +01001308 return true;
1309}
1310#define ECC_SECTOR_SIZE 512
1311
1312#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
1313#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
1314#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
1315#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO))
1316#define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8)
1317#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
1318
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001319static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
Jason Robertsce082592010-05-13 15:57:33 +01001320 uint8_t *oobbuf, uint32_t irq_status)
1321{
1322 bool check_erased_page = false;
1323
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001324 if (irq_status & INTR_STATUS0__ECC_ERR) {
Jason Robertsce082592010-05-13 15:57:33 +01001325 /* read the ECC errors. we'll ignore them for now */
1326 uint32_t err_address = 0, err_correction_info = 0;
1327 uint32_t err_byte = 0, err_sector = 0, err_device = 0;
1328 uint32_t err_correction_value = 0;
1329
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001330 do {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001331 err_address = ioread32(denali->flash_reg +
Jason Robertsce082592010-05-13 15:57:33 +01001332 ECC_ERROR_ADDRESS);
1333 err_sector = ECC_SECTOR(err_address);
1334 err_byte = ECC_BYTE(err_address);
1335
1336
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001337 err_correction_info = ioread32(denali->flash_reg +
Jason Robertsce082592010-05-13 15:57:33 +01001338 ERR_CORRECTION_INFO);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001339 err_correction_value =
Jason Robertsce082592010-05-13 15:57:33 +01001340 ECC_CORRECTION_VALUE(err_correction_info);
1341 err_device = ECC_ERR_DEVICE(err_correction_info);
1342
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001343 if (ECC_ERROR_CORRECTABLE(err_correction_info)) {
Jason Robertsce082592010-05-13 15:57:33 +01001344 /* offset in our buffer is computed as:
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001345 sector number * sector size + offset in
Jason Robertsce082592010-05-13 15:57:33 +01001346 sector
1347 */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001348 int offset = err_sector * ECC_SECTOR_SIZE +
Jason Robertsce082592010-05-13 15:57:33 +01001349 err_byte;
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001350 if (offset < denali->mtd.writesize) {
Jason Robertsce082592010-05-13 15:57:33 +01001351 /* correct the ECC error */
1352 buf[offset] ^= err_correction_value;
1353 denali->mtd.ecc_stats.corrected++;
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001354 } else {
Jason Robertsce082592010-05-13 15:57:33 +01001355 /* bummer, couldn't correct the error */
1356 printk(KERN_ERR "ECC offset invalid\n");
1357 denali->mtd.ecc_stats.failed++;
1358 }
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001359 } else {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001360 /* if the error is not correctable, need to
Jason Robertsce082592010-05-13 15:57:33 +01001361 * look at the page to see if it is an erased page.
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001362 * if so, then it's not a real ECC error */
Jason Robertsce082592010-05-13 15:57:33 +01001363 check_erased_page = true;
1364 }
1365
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001366#if DEBUG_DENALI
Jason Robertsce082592010-05-13 15:57:33 +01001367 printk("Detected ECC error in page %d: err_addr = 0x%08x,"
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001368 " info to fix is 0x%08x\n", denali->page, err_address,
Jason Robertsce082592010-05-13 15:57:33 +01001369 err_correction_info);
1370#endif
1371 } while (!ECC_LAST_ERR(err_correction_info));
1372 }
1373 return check_erased_page;
1374}
1375
1376/* programs the controller to either enable/disable DMA transfers */
David Woodhouseaadff492010-05-13 16:12:43 +01001377static void denali_enable_dma(struct denali_nand_info *denali, bool en)
Jason Robertsce082592010-05-13 15:57:33 +01001378{
1379 uint32_t reg_val = 0x0;
1380
1381 if (en) reg_val = DMA_ENABLE__FLAG;
1382
1383 denali_write32(reg_val, denali->flash_reg + DMA_ENABLE);
1384 ioread32(denali->flash_reg + DMA_ENABLE);
1385}
1386
1387/* setups the HW to perform the data DMA */
David Woodhouseaadff492010-05-13 16:12:43 +01001388static void denali_setup_dma(struct denali_nand_info *denali, int op)
Jason Robertsce082592010-05-13 15:57:33 +01001389{
1390 uint32_t mode = 0x0;
1391 const int page_count = 1;
1392 dma_addr_t addr = denali->buf.dma_buf;
1393
1394 mode = MODE_10 | BANK(denali->flash_bank);
1395
1396 /* DMA is a four step process */
1397
1398 /* 1. setup transfer type and # of pages */
1399 index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
1400
1401 /* 2. set memory high address bits 23:8 */
1402 index_addr(denali, mode | ((uint16_t)(addr >> 16) << 8), 0x2200);
1403
1404 /* 3. set memory low address bits 23:8 */
1405 index_addr(denali, mode | ((uint16_t)addr << 8), 0x2300);
1406
1407 /* 4. interrupt when complete, burst len = 64 bytes*/
1408 index_addr(denali, mode | 0x14000, 0x2400);
1409}
1410
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001411/* writes a page. user specifies type, and this function handles the
Jason Robertsce082592010-05-13 15:57:33 +01001412 configuration details. */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001413static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
Jason Robertsce082592010-05-13 15:57:33 +01001414 const uint8_t *buf, bool raw_xfer)
1415{
1416 struct denali_nand_info *denali = mtd_to_denali(mtd);
1417 struct pci_dev *pci_dev = denali->dev;
1418
1419 dma_addr_t addr = denali->buf.dma_buf;
1420 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1421
1422 uint32_t irq_status = 0;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001423 uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP |
Jason Robertsce082592010-05-13 15:57:33 +01001424 INTR_STATUS0__PROGRAM_FAIL;
1425
1426 /* if it is a raw xfer, we want to disable ecc, and send
1427 * the spare area.
1428 * !raw_xfer - enable ecc
1429 * raw_xfer - transfer spare
1430 */
1431 setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
1432
1433 /* copy buffer into DMA buffer */
1434 memcpy(denali->buf.buf, buf, mtd->writesize);
1435
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001436 if (raw_xfer) {
Jason Robertsce082592010-05-13 15:57:33 +01001437 /* transfer the data to the spare area */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001438 memcpy(denali->buf.buf + mtd->writesize,
1439 chip->oob_poi,
1440 mtd->oobsize);
Jason Robertsce082592010-05-13 15:57:33 +01001441 }
1442
1443 pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE);
1444
1445 clear_interrupts(denali);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001446 denali_enable_dma(denali, true);
Jason Robertsce082592010-05-13 15:57:33 +01001447
David Woodhouseaadff492010-05-13 16:12:43 +01001448 denali_setup_dma(denali, DENALI_WRITE);
Jason Robertsce082592010-05-13 15:57:33 +01001449
1450 /* wait for operation to complete */
1451 irq_status = wait_for_irq(denali, irq_mask);
1452
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001453 if (irq_status == 0) {
Jason Robertsce082592010-05-13 15:57:33 +01001454 printk(KERN_ERR "timeout on write_page (type = %d)\n", raw_xfer);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001455 denali->status =
1456 (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL :
1457 PASS;
Jason Robertsce082592010-05-13 15:57:33 +01001458 }
1459
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001460 denali_enable_dma(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +01001461 pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE);
1462}
1463
1464/* NAND core entry points */
1465
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001466/* this is the callback that the NAND core calls to write a page. Since
1467 writing a page with ECC or without is similar, all the work is done
Jason Robertsce082592010-05-13 15:57:33 +01001468 by write_page above. */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001469static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
Jason Robertsce082592010-05-13 15:57:33 +01001470 const uint8_t *buf)
1471{
1472 /* for regular page writes, we let HW handle all the ECC
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001473 * data written to the device. */
Jason Robertsce082592010-05-13 15:57:33 +01001474 write_page(mtd, chip, buf, false);
1475}
1476
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001477/* This is the callback that the NAND core calls to write a page without ECC.
Jason Robertsce082592010-05-13 15:57:33 +01001478 raw access is similiar to ECC page writes, so all the work is done in the
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001479 write_page() function above.
Jason Robertsce082592010-05-13 15:57:33 +01001480 */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001481static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
Jason Robertsce082592010-05-13 15:57:33 +01001482 const uint8_t *buf)
1483{
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001484 /* for raw page writes, we want to disable ECC and simply write
Jason Robertsce082592010-05-13 15:57:33 +01001485 whatever data is in the buffer. */
1486 write_page(mtd, chip, buf, true);
1487}
1488
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001489static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
Jason Robertsce082592010-05-13 15:57:33 +01001490 int page)
1491{
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001492 return write_oob_data(mtd, chip->oob_poi, page);
Jason Robertsce082592010-05-13 15:57:33 +01001493}
1494
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001495static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
Jason Robertsce082592010-05-13 15:57:33 +01001496 int page, int sndcmd)
1497{
1498 read_oob_data(mtd, chip->oob_poi, page);
1499
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001500 return 0; /* notify NAND core to send command to
1501 NAND device. */
Jason Robertsce082592010-05-13 15:57:33 +01001502}
1503
1504static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1505 uint8_t *buf, int page)
1506{
1507 struct denali_nand_info *denali = mtd_to_denali(mtd);
1508 struct pci_dev *pci_dev = denali->dev;
1509
1510 dma_addr_t addr = denali->buf.dma_buf;
1511 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1512
1513 uint32_t irq_status = 0;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001514 uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE |
Jason Robertsce082592010-05-13 15:57:33 +01001515 INTR_STATUS0__ECC_ERR;
1516 bool check_erased_page = false;
1517
1518 setup_ecc_for_xfer(denali, true, false);
1519
David Woodhouseaadff492010-05-13 16:12:43 +01001520 denali_enable_dma(denali, true);
Jason Robertsce082592010-05-13 15:57:33 +01001521 pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
1522
1523 clear_interrupts(denali);
David Woodhouseaadff492010-05-13 16:12:43 +01001524 denali_setup_dma(denali, DENALI_READ);
Jason Robertsce082592010-05-13 15:57:33 +01001525
1526 /* wait for operation to complete */
1527 irq_status = wait_for_irq(denali, irq_mask);
1528
1529 pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
1530
1531 memcpy(buf, denali->buf.buf, mtd->writesize);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001532
Jason Robertsce082592010-05-13 15:57:33 +01001533 check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status);
David Woodhouseaadff492010-05-13 16:12:43 +01001534 denali_enable_dma(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +01001535
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001536 if (check_erased_page) {
Jason Robertsce082592010-05-13 15:57:33 +01001537 read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
1538
1539 /* check ECC failures that may have occurred on erased pages */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001540 if (check_erased_page) {
Jason Robertsce082592010-05-13 15:57:33 +01001541 if (!is_erased(buf, denali->mtd.writesize))
Jason Robertsce082592010-05-13 15:57:33 +01001542 denali->mtd.ecc_stats.failed++;
Jason Robertsce082592010-05-13 15:57:33 +01001543 if (!is_erased(buf, denali->mtd.oobsize))
Jason Robertsce082592010-05-13 15:57:33 +01001544 denali->mtd.ecc_stats.failed++;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001545 }
Jason Robertsce082592010-05-13 15:57:33 +01001546 }
1547 return 0;
1548}
1549
1550static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1551 uint8_t *buf, int page)
1552{
1553 struct denali_nand_info *denali = mtd_to_denali(mtd);
1554 struct pci_dev *pci_dev = denali->dev;
1555
1556 dma_addr_t addr = denali->buf.dma_buf;
1557 size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1558
1559 uint32_t irq_status = 0;
1560 uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001561
Jason Robertsce082592010-05-13 15:57:33 +01001562 setup_ecc_for_xfer(denali, false, true);
David Woodhouseaadff492010-05-13 16:12:43 +01001563 denali_enable_dma(denali, true);
Jason Robertsce082592010-05-13 15:57:33 +01001564
1565 pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
1566
1567 clear_interrupts(denali);
David Woodhouseaadff492010-05-13 16:12:43 +01001568 denali_setup_dma(denali, DENALI_READ);
Jason Robertsce082592010-05-13 15:57:33 +01001569
1570 /* wait for operation to complete */
1571 irq_status = wait_for_irq(denali, irq_mask);
1572
1573 pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
1574
David Woodhouseaadff492010-05-13 16:12:43 +01001575 denali_enable_dma(denali, false);
Jason Robertsce082592010-05-13 15:57:33 +01001576
1577 memcpy(buf, denali->buf.buf, mtd->writesize);
1578 memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
1579
1580 return 0;
1581}
1582
1583static uint8_t denali_read_byte(struct mtd_info *mtd)
1584{
1585 struct denali_nand_info *denali = mtd_to_denali(mtd);
1586 uint8_t result = 0xff;
1587
1588 if (denali->buf.head < denali->buf.tail)
Jason Robertsce082592010-05-13 15:57:33 +01001589 result = denali->buf.buf[denali->buf.head++];
Jason Robertsce082592010-05-13 15:57:33 +01001590
1591#if DEBUG_DENALI
1592 printk("read byte -> 0x%02x\n", result);
1593#endif
1594 return result;
1595}
1596
1597static void denali_select_chip(struct mtd_info *mtd, int chip)
1598{
1599 struct denali_nand_info *denali = mtd_to_denali(mtd);
1600#if DEBUG_DENALI
1601 printk("denali select chip %d\n", chip);
1602#endif
1603 spin_lock_irq(&denali->irq_lock);
1604 denali->flash_bank = chip;
1605 spin_unlock_irq(&denali->irq_lock);
1606}
1607
1608static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
1609{
1610 struct denali_nand_info *denali = mtd_to_denali(mtd);
1611 int status = denali->status;
1612 denali->status = 0;
1613
1614#if DEBUG_DENALI
1615 printk("waitfunc %d\n", status);
1616#endif
1617 return status;
1618}
1619
1620static void denali_erase(struct mtd_info *mtd, int page)
1621{
1622 struct denali_nand_info *denali = mtd_to_denali(mtd);
1623
1624 uint32_t cmd = 0x0, irq_status = 0;
1625
1626#if DEBUG_DENALI
1627 printk("erase page: %d\n", page);
1628#endif
1629 /* clear interrupts */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001630 clear_interrupts(denali);
Jason Robertsce082592010-05-13 15:57:33 +01001631
1632 /* setup page read request for access type */
1633 cmd = MODE_10 | BANK(denali->flash_bank) | page;
1634 index_addr(denali, (uint32_t)cmd, 0x1);
1635
1636 /* wait for erase to complete or failure to occur */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001637 irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP |
Jason Robertsce082592010-05-13 15:57:33 +01001638 INTR_STATUS0__ERASE_FAIL);
1639
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001640 denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ? NAND_STATUS_FAIL :
Jason Robertsce082592010-05-13 15:57:33 +01001641 PASS;
1642}
1643
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001644static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
Jason Robertsce082592010-05-13 15:57:33 +01001645 int page)
1646{
1647 struct denali_nand_info *denali = mtd_to_denali(mtd);
1648
1649#if DEBUG_DENALI
1650 printk("cmdfunc: 0x%x %d %d\n", cmd, col, page);
1651#endif
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001652 switch (cmd) {
Jason Robertsce082592010-05-13 15:57:33 +01001653 case NAND_CMD_PAGEPROG:
1654 break;
1655 case NAND_CMD_STATUS:
1656 read_status(denali);
1657 break;
1658 case NAND_CMD_READID:
1659 reset_buf(denali);
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001660 if (denali->flash_bank < denali->total_used_banks) {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001661 /* write manufacturer information into nand
Jason Robertsce082592010-05-13 15:57:33 +01001662 buffer for NAND subsystem to fetch.
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001663 */
1664 write_byte_to_buf(denali, denali->dev_info.wDeviceMaker);
1665 write_byte_to_buf(denali, denali->dev_info.wDeviceID);
1666 write_byte_to_buf(denali, denali->dev_info.bDeviceParam0);
1667 write_byte_to_buf(denali, denali->dev_info.bDeviceParam1);
1668 write_byte_to_buf(denali, denali->dev_info.bDeviceParam2);
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001669 } else {
Jason Robertsce082592010-05-13 15:57:33 +01001670 int i;
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001671 for (i = 0; i < 5; i++)
Jason Robertsce082592010-05-13 15:57:33 +01001672 write_byte_to_buf(denali, 0xff);
1673 }
1674 break;
1675 case NAND_CMD_READ0:
1676 case NAND_CMD_SEQIN:
1677 denali->page = page;
1678 break;
1679 case NAND_CMD_RESET:
1680 reset_bank(denali);
1681 break;
1682 case NAND_CMD_READOOB:
1683 /* TODO: Read OOB data */
1684 break;
1685 default:
1686 printk(KERN_ERR ": unsupported command received 0x%x\n", cmd);
1687 break;
1688 }
1689}
1690
1691/* stubs for ECC functions not used by the NAND core */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001692static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
Jason Robertsce082592010-05-13 15:57:33 +01001693 uint8_t *ecc_code)
1694{
1695 printk(KERN_ERR "denali_ecc_calculate called unexpectedly\n");
1696 BUG();
1697 return -EIO;
1698}
1699
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001700static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
Jason Robertsce082592010-05-13 15:57:33 +01001701 uint8_t *read_ecc, uint8_t *calc_ecc)
1702{
1703 printk(KERN_ERR "denali_ecc_correct called unexpectedly\n");
1704 BUG();
1705 return -EIO;
1706}
1707
1708static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
1709{
1710 printk(KERN_ERR "denali_ecc_hwctl called unexpectedly\n");
1711 BUG();
1712}
1713/* end NAND core entry points */
1714
1715/* Initialization code to bring the device up to a known good state */
1716static void denali_hw_init(struct denali_nand_info *denali)
1717{
1718 denali_irq_init(denali);
1719 NAND_Flash_Reset(denali);
1720 denali_write32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1721 denali_write32(CHIP_EN_DONT_CARE__FLAG, denali->flash_reg + CHIP_ENABLE_DONT_CARE);
1722
1723 denali_write32(0x0, denali->flash_reg + SPARE_AREA_SKIP_BYTES);
1724 denali_write32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
1725
1726 /* Should set value for these registers when init */
1727 denali_write32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1728 denali_write32(1, denali->flash_reg + ECC_ENABLE);
1729}
1730
1731/* ECC layout for SLC devices. Denali spec indicates SLC fixed at 4 bytes */
1732#define ECC_BYTES_SLC 4 * (2048 / ECC_SECTOR_SIZE)
1733static struct nand_ecclayout nand_oob_slc = {
1734 .eccbytes = 4,
1735 .eccpos = { 0, 1, 2, 3 }, /* not used */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001736 .oobfree = {
1737 {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001738 .offset = ECC_BYTES_SLC,
1739 .length = 64 - ECC_BYTES_SLC
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001740 }
1741 }
Jason Robertsce082592010-05-13 15:57:33 +01001742};
1743
1744#define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE)
1745static struct nand_ecclayout nand_oob_mlc_14bit = {
1746 .eccbytes = 14,
1747 .eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001748 .oobfree = {
1749 {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001750 .offset = ECC_BYTES_MLC,
1751 .length = 64 - ECC_BYTES_MLC
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001752 }
1753 }
Jason Robertsce082592010-05-13 15:57:33 +01001754};
1755
1756static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
1757static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
1758
1759static struct nand_bbt_descr bbt_main_descr = {
1760 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1761 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1762 .offs = 8,
1763 .len = 4,
1764 .veroffs = 12,
1765 .maxblocks = 4,
1766 .pattern = bbt_pattern,
1767};
1768
1769static struct nand_bbt_descr bbt_mirror_descr = {
1770 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1771 | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1772 .offs = 8,
1773 .len = 4,
1774 .veroffs = 12,
1775 .maxblocks = 4,
1776 .pattern = mirror_pattern,
1777};
1778
1779/* initalize driver data structures */
1780void denali_drv_init(struct denali_nand_info *denali)
1781{
1782 denali->idx = 0;
1783
1784 /* setup interrupt handler */
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001785 /* the completion object will be used to notify
Jason Robertsce082592010-05-13 15:57:33 +01001786 * the callee that the interrupt is done */
1787 init_completion(&denali->complete);
1788
1789 /* the spinlock will be used to synchronize the ISR
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001790 * with any element that might be access shared
Jason Robertsce082592010-05-13 15:57:33 +01001791 * data (interrupt status) */
1792 spin_lock_init(&denali->irq_lock);
1793
1794 /* indicate that MTD has not selected a valid bank yet */
1795 denali->flash_bank = CHIP_SELECT_INVALID;
1796
1797 /* initialize our irq_status variable to indicate no interrupts */
1798 denali->irq_status = 0;
1799}
1800
1801/* driver entry point */
1802static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
1803{
1804 int ret = -ENODEV;
1805 resource_size_t csr_base, mem_base;
1806 unsigned long csr_len, mem_len;
1807 struct denali_nand_info *denali;
1808
1809 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1810 __FILE__, __LINE__, __func__);
1811
1812 denali = kzalloc(sizeof(*denali), GFP_KERNEL);
1813 if (!denali)
1814 return -ENOMEM;
1815
1816 ret = pci_enable_device(dev);
1817 if (ret) {
1818 printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
1819 goto failed_enable;
1820 }
1821
1822 if (id->driver_data == INTEL_CE4100) {
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001823 /* Due to a silicon limitation, we can only support
1824 * ONFI timing mode 1 and below.
1825 */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001826 if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
Jason Robertsce082592010-05-13 15:57:33 +01001827 printk("Intel CE4100 only supports ONFI timing mode 1 "
1828 "or below\n");
1829 ret = -EINVAL;
1830 goto failed_enable;
1831 }
1832 denali->platform = INTEL_CE4100;
1833 mem_base = pci_resource_start(dev, 0);
1834 mem_len = pci_resource_len(dev, 1);
1835 csr_base = pci_resource_start(dev, 1);
1836 csr_len = pci_resource_len(dev, 1);
1837 } else {
1838 denali->platform = INTEL_MRST;
1839 csr_base = pci_resource_start(dev, 0);
1840 csr_len = pci_resource_start(dev, 0);
1841 mem_base = pci_resource_start(dev, 1);
1842 mem_len = pci_resource_len(dev, 1);
1843 if (!mem_len) {
1844 mem_base = csr_base + csr_len;
1845 mem_len = csr_len;
1846 nand_dbg_print(NAND_DBG_WARN,
1847 "Spectra: No second BAR for PCI device; assuming %08Lx\n",
1848 (uint64_t)csr_base);
1849 }
1850 }
1851
1852 /* Is 32-bit DMA supported? */
1853 ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
1854
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001855 if (ret) {
Jason Robertsce082592010-05-13 15:57:33 +01001856 printk(KERN_ERR "Spectra: no usable DMA configuration\n");
1857 goto failed_enable;
1858 }
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001859 denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, DENALI_BUF_SIZE,
Jason Robertsce082592010-05-13 15:57:33 +01001860 PCI_DMA_BIDIRECTIONAL);
1861
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001862 if (pci_dma_mapping_error(dev, denali->buf.dma_buf)) {
Jason Robertsce082592010-05-13 15:57:33 +01001863 printk(KERN_ERR "Spectra: failed to map DMA buffer\n");
1864 goto failed_enable;
1865 }
1866
1867 pci_set_master(dev);
1868 denali->dev = dev;
1869
1870 ret = pci_request_regions(dev, DENALI_NAND_NAME);
1871 if (ret) {
1872 printk(KERN_ERR "Spectra: Unable to request memory regions\n");
1873 goto failed_req_csr;
1874 }
1875
1876 denali->flash_reg = ioremap_nocache(csr_base, csr_len);
1877 if (!denali->flash_reg) {
1878 printk(KERN_ERR "Spectra: Unable to remap memory region\n");
1879 ret = -ENOMEM;
1880 goto failed_remap_csr;
1881 }
1882 nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08Lx -> 0x%p (0x%lx)\n",
1883 (uint64_t)csr_base, denali->flash_reg, csr_len);
1884
1885 denali->flash_mem = ioremap_nocache(mem_base, mem_len);
1886 if (!denali->flash_mem) {
1887 printk(KERN_ERR "Spectra: ioremap_nocache failed!");
1888 iounmap(denali->flash_reg);
1889 ret = -ENOMEM;
1890 goto failed_remap_csr;
1891 }
1892
1893 nand_dbg_print(NAND_DBG_WARN,
1894 "Spectra: Remapped flash base address: "
1895 "0x%p, len: %ld\n",
1896 denali->flash_mem, csr_len);
1897
1898 denali_hw_init(denali);
1899 denali_drv_init(denali);
1900
1901 nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
1902 if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
1903 DENALI_NAND_NAME, denali)) {
1904 printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
1905 ret = -ENODEV;
1906 goto failed_request_irq;
1907 }
1908
1909 /* now that our ISR is registered, we can enable interrupts */
1910 NAND_LLD_Enable_Disable_Interrupts(denali, true);
1911
1912 pci_set_drvdata(dev, denali);
1913
1914 NAND_Read_Device_ID(denali);
1915
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001916 /* MTD supported page sizes vary by kernel. We validate our
1917 * kernel supports the device here.
Jason Robertsce082592010-05-13 15:57:33 +01001918 */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001919 if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
Jason Robertsce082592010-05-13 15:57:33 +01001920 ret = -ENODEV;
1921 printk(KERN_ERR "Spectra: device size not supported by this "
1922 "version of MTD.");
1923 goto failed_nand;
1924 }
1925
1926 nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
1927 "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
1928 "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
1929 "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
1930 ioread32(denali->flash_reg + ACC_CLKS),
1931 ioread32(denali->flash_reg + RE_2_WE),
1932 ioread32(denali->flash_reg + WE_2_RE),
1933 ioread32(denali->flash_reg + ADDR_2_DATA),
1934 ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
1935 ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
1936 ioread32(denali->flash_reg + CS_SETUP_CNT));
1937
1938 denali->mtd.name = "Denali NAND";
1939 denali->mtd.owner = THIS_MODULE;
1940 denali->mtd.priv = &denali->nand;
1941
1942 /* register the driver with the NAND core subsystem */
1943 denali->nand.select_chip = denali_select_chip;
1944 denali->nand.cmdfunc = denali_cmdfunc;
1945 denali->nand.read_byte = denali_read_byte;
1946 denali->nand.waitfunc = denali_waitfunc;
1947
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001948 /* scan for NAND devices attached to the controller
Jason Robertsce082592010-05-13 15:57:33 +01001949 * this is the first stage in a two step process to register
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001950 * with the nand subsystem */
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001951 if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL)) {
Jason Robertsce082592010-05-13 15:57:33 +01001952 ret = -ENXIO;
1953 goto failed_nand;
1954 }
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001955
1956 /* second stage of the NAND scan
1957 * this stage requires information regarding ECC and
1958 * bad block management. */
Jason Robertsce082592010-05-13 15:57:33 +01001959
1960 /* Bad block management */
1961 denali->nand.bbt_td = &bbt_main_descr;
1962 denali->nand.bbt_md = &bbt_mirror_descr;
1963
1964 /* skip the scan for now until we have OOB read and write support */
1965 denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
1966 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
1967
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001968 if (denali->dev_info.MLCDevice) {
Jason Robertsce082592010-05-13 15:57:33 +01001969 denali->nand.ecc.layout = &nand_oob_mlc_14bit;
1970 denali->nand.ecc.bytes = ECC_BYTES_MLC;
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001971 } else {/* SLC */
Jason Robertsce082592010-05-13 15:57:33 +01001972 denali->nand.ecc.layout = &nand_oob_slc;
1973 denali->nand.ecc.bytes = ECC_BYTES_SLC;
1974 }
1975
Chuanxiao5bac3ac2010-08-05 23:06:04 +08001976 /* These functions are required by the NAND core framework, otherwise,
1977 * the NAND core will assert. However, we don't need them, so we'll stub
1978 * them out. */
Jason Robertsce082592010-05-13 15:57:33 +01001979 denali->nand.ecc.calculate = denali_ecc_calculate;
1980 denali->nand.ecc.correct = denali_ecc_correct;
1981 denali->nand.ecc.hwctl = denali_ecc_hwctl;
1982
1983 /* override the default read operations */
1984 denali->nand.ecc.size = denali->mtd.writesize;
1985 denali->nand.ecc.read_page = denali_read_page;
1986 denali->nand.ecc.read_page_raw = denali_read_page_raw;
1987 denali->nand.ecc.write_page = denali_write_page;
1988 denali->nand.ecc.write_page_raw = denali_write_page_raw;
1989 denali->nand.ecc.read_oob = denali_read_oob;
1990 denali->nand.ecc.write_oob = denali_write_oob;
1991 denali->nand.erase_cmd = denali_erase;
1992
Chuanxiao Dong345b1d32010-07-27 10:41:53 +08001993 if (nand_scan_tail(&denali->mtd)) {
Jason Robertsce082592010-05-13 15:57:33 +01001994 ret = -ENXIO;
1995 goto failed_nand;
1996 }
1997
1998 ret = add_mtd_device(&denali->mtd);
1999 if (ret) {
2000 printk(KERN_ERR "Spectra: Failed to register MTD device: %d\n", ret);
2001 goto failed_nand;
2002 }
2003 return 0;
2004
2005 failed_nand:
2006 denali_irq_cleanup(dev->irq, denali);
2007 failed_request_irq:
2008 iounmap(denali->flash_reg);
2009 iounmap(denali->flash_mem);
2010 failed_remap_csr:
2011 pci_release_regions(dev);
2012 failed_req_csr:
Chuanxiao5bac3ac2010-08-05 23:06:04 +08002013 pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
Jason Robertsce082592010-05-13 15:57:33 +01002014 PCI_DMA_BIDIRECTIONAL);
2015 failed_enable:
2016 kfree(denali);
2017 return ret;
2018}
2019
2020/* driver exit point */
2021static void denali_pci_remove(struct pci_dev *dev)
2022{
2023 struct denali_nand_info *denali = pci_get_drvdata(dev);
2024
2025 nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
2026 __FILE__, __LINE__, __func__);
2027
2028 nand_release(&denali->mtd);
2029 del_mtd_device(&denali->mtd);
2030
2031 denali_irq_cleanup(dev->irq, denali);
2032
2033 iounmap(denali->flash_reg);
2034 iounmap(denali->flash_mem);
2035 pci_release_regions(dev);
2036 pci_disable_device(dev);
Chuanxiao5bac3ac2010-08-05 23:06:04 +08002037 pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
Jason Robertsce082592010-05-13 15:57:33 +01002038 PCI_DMA_BIDIRECTIONAL);
2039 pci_set_drvdata(dev, NULL);
2040 kfree(denali);
2041}
2042
2043MODULE_DEVICE_TABLE(pci, denali_pci_ids);
2044
2045static struct pci_driver denali_pci_driver = {
2046 .name = DENALI_NAND_NAME,
2047 .id_table = denali_pci_ids,
2048 .probe = denali_pci_probe,
2049 .remove = denali_pci_remove,
2050};
2051
2052static int __devinit denali_init(void)
2053{
2054 printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);
2055 return pci_register_driver(&denali_pci_driver);
2056}
2057
2058/* Free memory */
2059static void __devexit denali_exit(void)
2060{
2061 pci_unregister_driver(&denali_pci_driver);
2062}
2063
2064module_init(denali_init);
2065module_exit(denali_exit);