blob: 6dc0369aa44b562a389495edaad48cadacebb548 [file] [log] [blame]
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +09001/*
2 * SuperH FLCTL nand controller
3 *
Magnus Dammb79c7ad2010-02-02 13:01:25 +09004 * Copyright (c) 2008 Renesas Solutions Corp.
5 * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +09006 *
Magnus Dammb79c7ad2010-02-02 13:01:25 +09007 * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +09008 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
Bastian Hecht83738d82012-10-19 12:15:35 +020026#include <linux/completion.h>
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +090027#include <linux/delay.h>
Bastian Hecht83738d82012-10-19 12:15:35 +020028#include <linux/dmaengine.h>
29#include <linux/dma-mapping.h>
Bastian Hecht3c7ea4e2012-05-14 14:14:41 +020030#include <linux/interrupt.h>
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +090031#include <linux/io.h>
32#include <linux/platform_device.h>
Bastian Hechtcfe78192012-03-18 15:13:20 +010033#include <linux/pm_runtime.h>
Bastian Hecht83738d82012-10-19 12:15:35 +020034#include <linux/sh_dma.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Bastian Hechtd76236f2012-07-05 12:41:01 +020036#include <linux/string.h>
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +090037
38#include <linux/mtd/mtd.h>
39#include <linux/mtd/nand.h>
40#include <linux/mtd/partitions.h>
41#include <linux/mtd/sh_flctl.h>
42
43static struct nand_ecclayout flctl_4secc_oob_16 = {
44 .eccbytes = 10,
45 .eccpos = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
46 .oobfree = {
47 {.offset = 12,
48 . length = 4} },
49};
50
51static struct nand_ecclayout flctl_4secc_oob_64 = {
Bastian Hechtaa32d1f2012-05-14 14:14:42 +020052 .eccbytes = 4 * 10,
53 .eccpos = {
54 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
55 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
56 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
57 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +090058 .oobfree = {
Bastian Hechtaa32d1f2012-05-14 14:14:42 +020059 {.offset = 2, .length = 4},
60 {.offset = 16, .length = 6},
61 {.offset = 32, .length = 6},
62 {.offset = 48, .length = 6} },
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +090063};
64
65static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
66
67static struct nand_bbt_descr flctl_4secc_smallpage = {
68 .options = NAND_BBT_SCAN2NDPAGE,
69 .offs = 11,
70 .len = 1,
71 .pattern = scan_ff_pattern,
72};
73
74static struct nand_bbt_descr flctl_4secc_largepage = {
Yoshihiro Shimodac0e66162009-03-24 18:27:24 +090075 .options = NAND_BBT_SCAN2NDPAGE,
Bastian Hechtaa32d1f2012-05-14 14:14:42 +020076 .offs = 0,
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +090077 .len = 2,
78 .pattern = scan_ff_pattern,
79};
80
81static void empty_fifo(struct sh_flctl *flctl)
82{
Bastian Hecht3c7ea4e2012-05-14 14:14:41 +020083 writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
84 writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +090085}
86
87static void start_translation(struct sh_flctl *flctl)
88{
89 writeb(TRSTRT, FLTRCR(flctl));
90}
91
Magnus Dammb79c7ad2010-02-02 13:01:25 +090092static void timeout_error(struct sh_flctl *flctl, const char *str)
93{
Lucas De Marchi25985ed2011-03-30 22:57:33 -030094 dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
Magnus Dammb79c7ad2010-02-02 13:01:25 +090095}
96
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +090097static void wait_completion(struct sh_flctl *flctl)
98{
99 uint32_t timeout = LOOP_TIMEOUT_MAX;
100
101 while (timeout--) {
102 if (readb(FLTRCR(flctl)) & TREND) {
103 writeb(0x0, FLTRCR(flctl));
104 return;
105 }
106 udelay(1);
107 }
108
Magnus Dammb79c7ad2010-02-02 13:01:25 +0900109 timeout_error(flctl, __func__);
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900110 writeb(0x0, FLTRCR(flctl));
111}
112
Bastian Hecht83738d82012-10-19 12:15:35 +0200113static void flctl_dma_complete(void *param)
114{
115 struct sh_flctl *flctl = param;
116
117 complete(&flctl->dma_complete);
118}
119
120static void flctl_release_dma(struct sh_flctl *flctl)
121{
122 if (flctl->chan_fifo0_rx) {
123 dma_release_channel(flctl->chan_fifo0_rx);
124 flctl->chan_fifo0_rx = NULL;
125 }
126 if (flctl->chan_fifo0_tx) {
127 dma_release_channel(flctl->chan_fifo0_tx);
128 flctl->chan_fifo0_tx = NULL;
129 }
130}
131
132static void flctl_setup_dma(struct sh_flctl *flctl)
133{
134 dma_cap_mask_t mask;
135 struct dma_slave_config cfg;
136 struct platform_device *pdev = flctl->pdev;
137 struct sh_flctl_platform_data *pdata = pdev->dev.platform_data;
138 int ret;
139
140 if (!pdata)
141 return;
142
143 if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
144 return;
145
146 /* We can only either use DMA for both Tx and Rx or not use it at all */
147 dma_cap_zero(mask);
148 dma_cap_set(DMA_SLAVE, mask);
149
150 flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
151 (void *)pdata->slave_id_fifo0_tx);
152 dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
153 flctl->chan_fifo0_tx);
154
155 if (!flctl->chan_fifo0_tx)
156 return;
157
158 memset(&cfg, 0, sizeof(cfg));
159 cfg.slave_id = pdata->slave_id_fifo0_tx;
160 cfg.direction = DMA_MEM_TO_DEV;
161 cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl);
162 cfg.src_addr = 0;
163 ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
164 if (ret < 0)
165 goto err;
166
167 flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
168 (void *)pdata->slave_id_fifo0_rx);
169 dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
170 flctl->chan_fifo0_rx);
171
172 if (!flctl->chan_fifo0_rx)
173 goto err;
174
175 cfg.slave_id = pdata->slave_id_fifo0_rx;
176 cfg.direction = DMA_DEV_TO_MEM;
177 cfg.dst_addr = 0;
178 cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl);
179 ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
180 if (ret < 0)
181 goto err;
182
183 init_completion(&flctl->dma_complete);
184
185 return;
186
187err:
188 flctl_release_dma(flctl);
189}
190
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900191static void set_addr(struct mtd_info *mtd, int column, int page_addr)
192{
193 struct sh_flctl *flctl = mtd_to_flctl(mtd);
194 uint32_t addr = 0;
195
196 if (column == -1) {
197 addr = page_addr; /* ERASE1 */
198 } else if (page_addr != -1) {
199 /* SEQIN, READ0, etc.. */
Magnus Damm010ab822010-01-27 09:17:21 +0000200 if (flctl->chip.options & NAND_BUSWIDTH_16)
201 column >>= 1;
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900202 if (flctl->page_size) {
203 addr = column & 0x0FFF;
204 addr |= (page_addr & 0xff) << 16;
205 addr |= ((page_addr >> 8) & 0xff) << 24;
206 /* big than 128MB */
207 if (flctl->rw_ADRCNT == ADRCNT2_E) {
208 uint32_t addr2;
209 addr2 = (page_addr >> 16) & 0xff;
210 writel(addr2, FLADR2(flctl));
211 }
212 } else {
213 addr = column;
214 addr |= (page_addr & 0xff) << 8;
215 addr |= ((page_addr >> 8) & 0xff) << 16;
216 addr |= ((page_addr >> 16) & 0xff) << 24;
217 }
218 }
219 writel(addr, FLADR(flctl));
220}
221
222static void wait_rfifo_ready(struct sh_flctl *flctl)
223{
224 uint32_t timeout = LOOP_TIMEOUT_MAX;
225
226 while (timeout--) {
227 uint32_t val;
228 /* check FIFO */
229 val = readl(FLDTCNTR(flctl)) >> 16;
230 if (val & 0xFF)
231 return;
232 udelay(1);
233 }
Magnus Dammb79c7ad2010-02-02 13:01:25 +0900234 timeout_error(flctl, __func__);
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900235}
236
237static void wait_wfifo_ready(struct sh_flctl *flctl)
238{
239 uint32_t len, timeout = LOOP_TIMEOUT_MAX;
240
241 while (timeout--) {
242 /* check FIFO */
243 len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
244 if (len >= 4)
245 return;
246 udelay(1);
247 }
Magnus Dammb79c7ad2010-02-02 13:01:25 +0900248 timeout_error(flctl, __func__);
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900249}
250
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200251static enum flctl_ecc_res_t wait_recfifo_ready
252 (struct sh_flctl *flctl, int sector_number)
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900253{
254 uint32_t timeout = LOOP_TIMEOUT_MAX;
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900255 void __iomem *ecc_reg[4];
256 int i;
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200257 int state = FL_SUCCESS;
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900258 uint32_t data, size;
259
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200260 /*
261 * First this loops checks in FLDTCNTR if we are ready to read out the
262 * oob data. This is the case if either all went fine without errors or
263 * if the bottom part of the loop corrected the errors or marked them as
264 * uncorrectable and the controller is given time to push the data into
265 * the FIFO.
266 */
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900267 while (timeout--) {
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200268 /* check if all is ok and we can read out the OOB */
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900269 size = readl(FLDTCNTR(flctl)) >> 24;
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200270 if ((size & 0xFF) == 4)
271 return state;
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900272
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200273 /* check if a correction code has been calculated */
274 if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
275 /*
276 * either we wait for the fifo to be filled or a
277 * correction pattern is being generated
278 */
279 udelay(1);
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900280 continue;
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200281 }
282
283 /* check for an uncorrectable error */
284 if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
285 /* check if we face a non-empty page */
286 for (i = 0; i < 512; i++) {
287 if (flctl->done_buff[i] != 0xff) {
288 state = FL_ERROR; /* can't correct */
289 break;
290 }
291 }
292
293 if (state == FL_SUCCESS)
294 dev_dbg(&flctl->pdev->dev,
295 "reading empty sector %d, ecc error ignored\n",
296 sector_number);
297
298 writel(0, FL4ECCCR(flctl));
299 continue;
300 }
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900301
302 /* start error correction */
303 ecc_reg[0] = FL4ECCRESULT0(flctl);
304 ecc_reg[1] = FL4ECCRESULT1(flctl);
305 ecc_reg[2] = FL4ECCRESULT2(flctl);
306 ecc_reg[3] = FL4ECCRESULT3(flctl);
307
308 for (i = 0; i < 3; i++) {
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200309 uint8_t org;
Bastian Hechte8a9d8f2012-10-19 12:15:34 +0200310 unsigned int index;
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200311
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900312 data = readl(ecc_reg[i]);
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900313
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200314 if (flctl->page_size)
315 index = (512 * sector_number) +
316 (data >> 16);
317 else
318 index = data >> 16;
Yoshihiro Shimodac0e66162009-03-24 18:27:24 +0900319
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200320 org = flctl->done_buff[index];
321 flctl->done_buff[index] = org ^ (data & 0xFF);
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900322 }
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200323 state = FL_REPAIRABLE;
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900324 writel(0, FL4ECCCR(flctl));
325 }
326
Magnus Dammb79c7ad2010-02-02 13:01:25 +0900327 timeout_error(flctl, __func__);
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200328 return FL_TIMEOUT; /* timeout */
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900329}
330
331static void wait_wecfifo_ready(struct sh_flctl *flctl)
332{
333 uint32_t timeout = LOOP_TIMEOUT_MAX;
334 uint32_t len;
335
336 while (timeout--) {
337 /* check FLECFIFO */
338 len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
339 if (len >= 4)
340 return;
341 udelay(1);
342 }
Magnus Dammb79c7ad2010-02-02 13:01:25 +0900343 timeout_error(flctl, __func__);
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900344}
345
Bastian Hecht83738d82012-10-19 12:15:35 +0200346static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
347 int len, enum dma_data_direction dir)
348{
349 struct dma_async_tx_descriptor *desc = NULL;
350 struct dma_chan *chan;
351 enum dma_transfer_direction tr_dir;
352 dma_addr_t dma_addr;
353 dma_cookie_t cookie = -EINVAL;
354 uint32_t reg;
355 int ret;
356
357 if (dir == DMA_FROM_DEVICE) {
358 chan = flctl->chan_fifo0_rx;
359 tr_dir = DMA_DEV_TO_MEM;
360 } else {
361 chan = flctl->chan_fifo0_tx;
362 tr_dir = DMA_MEM_TO_DEV;
363 }
364
365 dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
366
367 if (dma_addr)
368 desc = dmaengine_prep_slave_single(chan, dma_addr, len,
369 tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
370
371 if (desc) {
372 reg = readl(FLINTDMACR(flctl));
373 reg |= DREQ0EN;
374 writel(reg, FLINTDMACR(flctl));
375
376 desc->callback = flctl_dma_complete;
377 desc->callback_param = flctl;
378 cookie = dmaengine_submit(desc);
379
380 dma_async_issue_pending(chan);
381 } else {
382 /* DMA failed, fall back to PIO */
383 flctl_release_dma(flctl);
384 dev_warn(&flctl->pdev->dev,
385 "DMA failed, falling back to PIO\n");
386 ret = -EIO;
387 goto out;
388 }
389
390 ret =
391 wait_for_completion_timeout(&flctl->dma_complete,
392 msecs_to_jiffies(3000));
393
394 if (ret <= 0) {
395 chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
396 dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
397 }
398
399out:
400 reg = readl(FLINTDMACR(flctl));
401 reg &= ~DREQ0EN;
402 writel(reg, FLINTDMACR(flctl));
403
404 dma_unmap_single(chan->device->dev, dma_addr, len, dir);
405
406 /* ret > 0 is success */
407 return ret;
408}
409
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900410static void read_datareg(struct sh_flctl *flctl, int offset)
411{
412 unsigned long data;
413 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
414
415 wait_completion(flctl);
416
417 data = readl(FLDATAR(flctl));
418 *buf = le32_to_cpu(data);
419}
420
421static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
422{
423 int i, len_4align;
424 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900425
426 len_4align = (rlen + 3) / 4;
427
Bastian Hecht83738d82012-10-19 12:15:35 +0200428 /* initiate DMA transfer */
429 if (flctl->chan_fifo0_rx && rlen >= 32 &&
430 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
431 goto convert; /* DMA success */
432
433 /* do polling transfer */
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900434 for (i = 0; i < len_4align; i++) {
435 wait_rfifo_ready(flctl);
Bastian Hecht3166df02012-05-14 14:14:47 +0200436 buf[i] = readl(FLDTFIFO(flctl));
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900437 }
Bastian Hecht83738d82012-10-19 12:15:35 +0200438
439convert:
440 for (i = 0; i < len_4align; i++)
441 buf[i] = be32_to_cpu(buf[i]);
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900442}
443
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200444static enum flctl_ecc_res_t read_ecfiforeg
445 (struct sh_flctl *flctl, uint8_t *buff, int sector)
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900446{
447 int i;
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200448 enum flctl_ecc_res_t res;
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900449 unsigned long *ecc_buf = (unsigned long *)buff;
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900450
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200451 res = wait_recfifo_ready(flctl , sector);
452
453 if (res != FL_ERROR) {
454 for (i = 0; i < 4; i++) {
455 ecc_buf[i] = readl(FLECFIFO(flctl));
456 ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
457 }
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900458 }
459
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200460 return res;
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900461}
462
Bastian Hechte8a9d8f2012-10-19 12:15:34 +0200463static void write_fiforeg(struct sh_flctl *flctl, int rlen,
464 unsigned int offset)
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900465{
466 int i, len_4align;
Bastian Hechte8a9d8f2012-10-19 12:15:34 +0200467 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900468
469 len_4align = (rlen + 3) / 4;
470 for (i = 0; i < len_4align; i++) {
471 wait_wfifo_ready(flctl);
Bastian Hechte8a9d8f2012-10-19 12:15:34 +0200472 writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900473 }
474}
475
Bastian Hechte8a9d8f2012-10-19 12:15:34 +0200476static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
477 unsigned int offset)
Bastian Hecht3166df02012-05-14 14:14:47 +0200478{
479 int i, len_4align;
Bastian Hechte8a9d8f2012-10-19 12:15:34 +0200480 unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
Bastian Hecht3166df02012-05-14 14:14:47 +0200481
482 len_4align = (rlen + 3) / 4;
Bastian Hecht83738d82012-10-19 12:15:35 +0200483
484 for (i = 0; i < len_4align; i++)
485 buf[i] = cpu_to_be32(buf[i]);
486
487 /* initiate DMA transfer */
488 if (flctl->chan_fifo0_tx && rlen >= 32 &&
489 flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
490 return; /* DMA success */
491
492 /* do polling transfer */
Bastian Hecht3166df02012-05-14 14:14:47 +0200493 for (i = 0; i < len_4align; i++) {
494 wait_wecfifo_ready(flctl);
Bastian Hecht83738d82012-10-19 12:15:35 +0200495 writel(buf[i], FLECFIFO(flctl));
Bastian Hecht3166df02012-05-14 14:14:47 +0200496 }
497}
498
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900499static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
500{
501 struct sh_flctl *flctl = mtd_to_flctl(mtd);
Bastian Hecht0b3f0d12012-03-01 10:48:39 +0100502 uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900503 uint32_t flcmdcr_val, addr_len_bytes = 0;
504
505 /* Set SNAND bit if page size is 2048byte */
506 if (flctl->page_size)
507 flcmncr_val |= SNAND_E;
508 else
509 flcmncr_val &= ~SNAND_E;
510
511 /* default FLCMDCR val */
512 flcmdcr_val = DOCMD1_E | DOADR_E;
513
514 /* Set for FLCMDCR */
515 switch (cmd) {
516 case NAND_CMD_ERASE1:
517 addr_len_bytes = flctl->erase_ADRCNT;
518 flcmdcr_val |= DOCMD2_E;
519 break;
520 case NAND_CMD_READ0:
521 case NAND_CMD_READOOB:
Bastian Hechtdd5ab242012-03-01 10:48:38 +0100522 case NAND_CMD_RNDOUT:
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900523 addr_len_bytes = flctl->rw_ADRCNT;
524 flcmdcr_val |= CDSRC_E;
Magnus Damm010ab822010-01-27 09:17:21 +0000525 if (flctl->chip.options & NAND_BUSWIDTH_16)
526 flcmncr_val |= SEL_16BIT;
Yoshihiro Shimoda6028aa02008-10-14 21:23:26 +0900527 break;
528 case NAND_CMD_SEQIN:
529 /* This case is that cmd is READ0 or READ1 or READ00 */
530 flcmdcr_val &= ~DOADR_E; /* ONLY execute 1st cmd */
531 break;
532 case NAND_CMD_PAGEPROG:
533 addr_len_bytes = flctl->rw_ADRCNT;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900534 flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
Magnus Damm010ab822010-01-27 09:17:21 +0000535 if (flctl->chip.options & NAND_BUSWIDTH_16)
536 flcmncr_val |= SEL_16BIT;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900537 break;
538 case NAND_CMD_READID:
539 flcmncr_val &= ~SNAND_E;
Bastian Hecht7b6b2302012-03-01 10:48:37 +0100540 flcmdcr_val |= CDSRC_E;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900541 addr_len_bytes = ADRCNT_1;
542 break;
543 case NAND_CMD_STATUS:
544 case NAND_CMD_RESET:
545 flcmncr_val &= ~SNAND_E;
546 flcmdcr_val &= ~(DOADR_E | DOSR_E);
547 break;
548 default:
549 break;
550 }
551
552 /* Set address bytes parameter */
553 flcmdcr_val |= addr_len_bytes;
554
555 /* Now actually write */
556 writel(flcmncr_val, FLCMNCR(flctl));
557 writel(flcmdcr_val, FLCMDCR(flctl));
558 writel(flcmcdr_val, FLCMCDR(flctl));
559}
560
561static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
Brian Norris1fbb9382012-05-02 10:14:55 -0700562 uint8_t *buf, int oob_required, int page)
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900563{
Bastian Hecht50ed3992012-05-14 14:14:44 +0200564 chip->read_buf(mtd, buf, mtd->writesize);
Bastian Hecht894824f2012-07-05 12:41:02 +0200565 if (oob_required)
566 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900567 return 0;
568}
569
Josh Wufdbad98d2012-06-25 18:07:45 +0800570static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
Brian Norris1fbb9382012-05-02 10:14:55 -0700571 const uint8_t *buf, int oob_required)
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900572{
Bastian Hecht50ed3992012-05-14 14:14:44 +0200573 chip->write_buf(mtd, buf, mtd->writesize);
Bastian Hecht3166df02012-05-14 14:14:47 +0200574 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
Josh Wufdbad98d2012-06-25 18:07:45 +0800575 return 0;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900576}
577
578static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
579{
580 struct sh_flctl *flctl = mtd_to_flctl(mtd);
581 int sector, page_sectors;
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200582 enum flctl_ecc_res_t ecc_result;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900583
Bastian Hecht623c55c2012-05-14 14:14:45 +0200584 page_sectors = flctl->page_size ? 4 : 1;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900585
586 set_cmd_regs(mtd, NAND_CMD_READ0,
587 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
588
Bastian Hecht623c55c2012-05-14 14:14:45 +0200589 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
590 FLCMNCR(flctl));
591 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
592 writel(page_addr << 2, FLADR(flctl));
593
594 empty_fifo(flctl);
595 start_translation(flctl);
596
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900597 for (sector = 0; sector < page_sectors; sector++) {
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900598 read_fiforeg(flctl, 512, 512 * sector);
599
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200600 ecc_result = read_ecfiforeg(flctl,
Yoshihiro Shimodac0e66162009-03-24 18:27:24 +0900601 &flctl->done_buff[mtd->writesize + 16 * sector],
602 sector);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900603
Bastian Hecht6667a6d52012-05-14 14:14:46 +0200604 switch (ecc_result) {
605 case FL_REPAIRABLE:
606 dev_info(&flctl->pdev->dev,
607 "applied ecc on page 0x%x", page_addr);
608 flctl->mtd.ecc_stats.corrected++;
609 break;
610 case FL_ERROR:
611 dev_warn(&flctl->pdev->dev,
612 "page 0x%x contains corrupted data\n",
613 page_addr);
614 flctl->mtd.ecc_stats.failed++;
615 break;
616 default:
617 ;
618 }
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900619 }
Bastian Hecht623c55c2012-05-14 14:14:45 +0200620
621 wait_completion(flctl);
622
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900623 writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
624 FLCMNCR(flctl));
625}
626
627static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
628{
629 struct sh_flctl *flctl = mtd_to_flctl(mtd);
Bastian Hechtef4ce0b2012-05-14 14:14:43 +0200630 int page_sectors = flctl->page_size ? 4 : 1;
631 int i;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900632
633 set_cmd_regs(mtd, NAND_CMD_READ0,
634 (NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
635
636 empty_fifo(flctl);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900637
Bastian Hechtef4ce0b2012-05-14 14:14:43 +0200638 for (i = 0; i < page_sectors; i++) {
639 set_addr(mtd, (512 + 16) * i + 512 , page_addr);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900640 writel(16, FLDTCNTR(flctl));
641
642 start_translation(flctl);
Bastian Hechtef4ce0b2012-05-14 14:14:43 +0200643 read_fiforeg(flctl, 16, 16 * i);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900644 wait_completion(flctl);
645 }
646}
647
648static void execmd_write_page_sector(struct mtd_info *mtd)
649{
650 struct sh_flctl *flctl = mtd_to_flctl(mtd);
Bastian Hecht3166df02012-05-14 14:14:47 +0200651 int page_addr = flctl->seqin_page_addr;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900652 int sector, page_sectors;
653
Bastian Hecht623c55c2012-05-14 14:14:45 +0200654 page_sectors = flctl->page_size ? 4 : 1;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900655
656 set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
657 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
658
Bastian Hecht623c55c2012-05-14 14:14:45 +0200659 empty_fifo(flctl);
660 writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
661 writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
662 writel(page_addr << 2, FLADR(flctl));
663 start_translation(flctl);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900664
Bastian Hecht623c55c2012-05-14 14:14:45 +0200665 for (sector = 0; sector < page_sectors; sector++) {
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900666 write_fiforeg(flctl, 512, 512 * sector);
Bastian Hecht3166df02012-05-14 14:14:47 +0200667 write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900668 }
669
Bastian Hecht623c55c2012-05-14 14:14:45 +0200670 wait_completion(flctl);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900671 writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
672}
673
674static void execmd_write_oob(struct mtd_info *mtd)
675{
676 struct sh_flctl *flctl = mtd_to_flctl(mtd);
677 int page_addr = flctl->seqin_page_addr;
678 int sector, page_sectors;
679
Bastian Hechtef4ce0b2012-05-14 14:14:43 +0200680 page_sectors = flctl->page_size ? 4 : 1;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900681
682 set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
683 (NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
684
Bastian Hechtef4ce0b2012-05-14 14:14:43 +0200685 for (sector = 0; sector < page_sectors; sector++) {
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900686 empty_fifo(flctl);
687 set_addr(mtd, sector * 528 + 512, page_addr);
688 writel(16, FLDTCNTR(flctl)); /* set read size */
689
690 start_translation(flctl);
691 write_fiforeg(flctl, 16, 16 * sector);
692 wait_completion(flctl);
693 }
694}
695
696static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
697 int column, int page_addr)
698{
699 struct sh_flctl *flctl = mtd_to_flctl(mtd);
700 uint32_t read_cmd = 0;
701
Bastian Hechtcfe78192012-03-18 15:13:20 +0100702 pm_runtime_get_sync(&flctl->pdev->dev);
703
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900704 flctl->read_bytes = 0;
705 if (command != NAND_CMD_PAGEPROG)
706 flctl->index = 0;
707
708 switch (command) {
709 case NAND_CMD_READ1:
710 case NAND_CMD_READ0:
711 if (flctl->hwecc) {
712 /* read page with hwecc */
713 execmd_read_page_sector(mtd, page_addr);
714 break;
715 }
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900716 if (flctl->page_size)
717 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
718 | command);
719 else
720 set_cmd_regs(mtd, command, command);
721
722 set_addr(mtd, 0, page_addr);
723
724 flctl->read_bytes = mtd->writesize + mtd->oobsize;
Magnus Damm010ab822010-01-27 09:17:21 +0000725 if (flctl->chip.options & NAND_BUSWIDTH_16)
726 column >>= 1;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900727 flctl->index += column;
728 goto read_normal_exit;
729
730 case NAND_CMD_READOOB:
731 if (flctl->hwecc) {
732 /* read page with hwecc */
733 execmd_read_oob(mtd, page_addr);
734 break;
735 }
736
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900737 if (flctl->page_size) {
738 set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
739 | NAND_CMD_READ0);
740 set_addr(mtd, mtd->writesize, page_addr);
741 } else {
742 set_cmd_regs(mtd, command, command);
743 set_addr(mtd, 0, page_addr);
744 }
745 flctl->read_bytes = mtd->oobsize;
746 goto read_normal_exit;
747
Bastian Hechtdd5ab242012-03-01 10:48:38 +0100748 case NAND_CMD_RNDOUT:
749 if (flctl->hwecc)
750 break;
751
752 if (flctl->page_size)
753 set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
754 | command);
755 else
756 set_cmd_regs(mtd, command, command);
757
758 set_addr(mtd, column, 0);
759
760 flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
761 goto read_normal_exit;
762
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900763 case NAND_CMD_READID:
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900764 set_cmd_regs(mtd, command, command);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900765
Bastian Hecht7b6b2302012-03-01 10:48:37 +0100766 /* READID is always performed using an 8-bit bus */
767 if (flctl->chip.options & NAND_BUSWIDTH_16)
768 column <<= 1;
769 set_addr(mtd, column, 0);
770
771 flctl->read_bytes = 8;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900772 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
Bastian Hechtabb59ef2012-03-01 10:48:36 +0100773 empty_fifo(flctl);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900774 start_translation(flctl);
Bastian Hecht7b6b2302012-03-01 10:48:37 +0100775 read_fiforeg(flctl, flctl->read_bytes, 0);
776 wait_completion(flctl);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900777 break;
778
779 case NAND_CMD_ERASE1:
780 flctl->erase1_page_addr = page_addr;
781 break;
782
783 case NAND_CMD_ERASE2:
784 set_cmd_regs(mtd, NAND_CMD_ERASE1,
785 (command << 8) | NAND_CMD_ERASE1);
786 set_addr(mtd, -1, flctl->erase1_page_addr);
787 start_translation(flctl);
788 wait_completion(flctl);
789 break;
790
791 case NAND_CMD_SEQIN:
792 if (!flctl->page_size) {
793 /* output read command */
794 if (column >= mtd->writesize) {
795 column -= mtd->writesize;
796 read_cmd = NAND_CMD_READOOB;
797 } else if (column < 256) {
798 read_cmd = NAND_CMD_READ0;
799 } else {
800 column -= 256;
801 read_cmd = NAND_CMD_READ1;
802 }
803 }
804 flctl->seqin_column = column;
805 flctl->seqin_page_addr = page_addr;
806 flctl->seqin_read_cmd = read_cmd;
807 break;
808
809 case NAND_CMD_PAGEPROG:
810 empty_fifo(flctl);
811 if (!flctl->page_size) {
812 set_cmd_regs(mtd, NAND_CMD_SEQIN,
813 flctl->seqin_read_cmd);
814 set_addr(mtd, -1, -1);
815 writel(0, FLDTCNTR(flctl)); /* set 0 size */
816 start_translation(flctl);
817 wait_completion(flctl);
818 }
819 if (flctl->hwecc) {
820 /* write page with hwecc */
821 if (flctl->seqin_column == mtd->writesize)
822 execmd_write_oob(mtd);
823 else if (!flctl->seqin_column)
824 execmd_write_page_sector(mtd);
825 else
826 printk(KERN_ERR "Invalid address !?\n");
827 break;
828 }
829 set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
830 set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
831 writel(flctl->index, FLDTCNTR(flctl)); /* set write size */
832 start_translation(flctl);
833 write_fiforeg(flctl, flctl->index, 0);
834 wait_completion(flctl);
835 break;
836
837 case NAND_CMD_STATUS:
838 set_cmd_regs(mtd, command, command);
839 set_addr(mtd, -1, -1);
840
841 flctl->read_bytes = 1;
842 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
843 start_translation(flctl);
844 read_datareg(flctl, 0); /* read and end */
845 break;
846
847 case NAND_CMD_RESET:
848 set_cmd_regs(mtd, command, command);
849 set_addr(mtd, -1, -1);
850
851 writel(0, FLDTCNTR(flctl)); /* set 0 size */
852 start_translation(flctl);
853 wait_completion(flctl);
854 break;
855
856 default:
857 break;
858 }
Bastian Hechtcfe78192012-03-18 15:13:20 +0100859 goto runtime_exit;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900860
861read_normal_exit:
862 writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
Bastian Hechtabb59ef2012-03-01 10:48:36 +0100863 empty_fifo(flctl);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900864 start_translation(flctl);
865 read_fiforeg(flctl, flctl->read_bytes, 0);
866 wait_completion(flctl);
Bastian Hechtcfe78192012-03-18 15:13:20 +0100867runtime_exit:
868 pm_runtime_put_sync(&flctl->pdev->dev);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900869 return;
870}
871
872static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
873{
874 struct sh_flctl *flctl = mtd_to_flctl(mtd);
Bastian Hechtcfe78192012-03-18 15:13:20 +0100875 int ret;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900876
877 switch (chipnr) {
878 case -1:
Bastian Hecht0b3f0d12012-03-01 10:48:39 +0100879 flctl->flcmncr_base &= ~CE0_ENABLE;
Bastian Hechtcfe78192012-03-18 15:13:20 +0100880
881 pm_runtime_get_sync(&flctl->pdev->dev);
Bastian Hecht0b3f0d12012-03-01 10:48:39 +0100882 writel(flctl->flcmncr_base, FLCMNCR(flctl));
Bastian Hechtcfe78192012-03-18 15:13:20 +0100883
884 if (flctl->qos_request) {
885 dev_pm_qos_remove_request(&flctl->pm_qos);
886 flctl->qos_request = 0;
887 }
888
889 pm_runtime_put_sync(&flctl->pdev->dev);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900890 break;
891 case 0:
Bastian Hecht0b3f0d12012-03-01 10:48:39 +0100892 flctl->flcmncr_base |= CE0_ENABLE;
Bastian Hechtcfe78192012-03-18 15:13:20 +0100893
894 if (!flctl->qos_request) {
895 ret = dev_pm_qos_add_request(&flctl->pdev->dev,
896 &flctl->pm_qos, 100);
897 if (ret < 0)
898 dev_err(&flctl->pdev->dev,
899 "PM QoS request failed: %d\n", ret);
900 flctl->qos_request = 1;
901 }
902
903 if (flctl->holden) {
904 pm_runtime_get_sync(&flctl->pdev->dev);
Bastian Hecht3f2e9242012-03-01 10:48:40 +0100905 writel(HOLDEN, FLHOLDCR(flctl));
Bastian Hechtcfe78192012-03-18 15:13:20 +0100906 pm_runtime_put_sync(&flctl->pdev->dev);
907 }
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900908 break;
909 default:
910 BUG();
911 }
912}
913
914static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
915{
916 struct sh_flctl *flctl = mtd_to_flctl(mtd);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900917
Bastian Hechte8a9d8f2012-10-19 12:15:34 +0200918 memcpy(&flctl->done_buff[flctl->index], buf, len);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900919 flctl->index += len;
920}
921
922static uint8_t flctl_read_byte(struct mtd_info *mtd)
923{
924 struct sh_flctl *flctl = mtd_to_flctl(mtd);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900925 uint8_t data;
926
Bastian Hechte8a9d8f2012-10-19 12:15:34 +0200927 data = flctl->done_buff[flctl->index];
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900928 flctl->index++;
929 return data;
930}
931
Magnus Damm010ab822010-01-27 09:17:21 +0000932static uint16_t flctl_read_word(struct mtd_info *mtd)
933{
Bastian Hechte8a9d8f2012-10-19 12:15:34 +0200934 struct sh_flctl *flctl = mtd_to_flctl(mtd);
935 uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index];
Magnus Damm010ab822010-01-27 09:17:21 +0000936
Bastian Hechte8a9d8f2012-10-19 12:15:34 +0200937 flctl->index += 2;
938 return *buf;
Magnus Damm010ab822010-01-27 09:17:21 +0000939}
940
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900941static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
942{
Bastian Hechtd76236f2012-07-05 12:41:01 +0200943 struct sh_flctl *flctl = mtd_to_flctl(mtd);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900944
Bastian Hechte8a9d8f2012-10-19 12:15:34 +0200945 memcpy(buf, &flctl->done_buff[flctl->index], len);
Bastian Hechtd76236f2012-07-05 12:41:01 +0200946 flctl->index += len;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900947}
948
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900949static int flctl_chip_init_tail(struct mtd_info *mtd)
950{
951 struct sh_flctl *flctl = mtd_to_flctl(mtd);
952 struct nand_chip *chip = &flctl->chip;
953
954 if (mtd->writesize == 512) {
955 flctl->page_size = 0;
956 if (chip->chipsize > (32 << 20)) {
957 /* big than 32MB */
958 flctl->rw_ADRCNT = ADRCNT_4;
959 flctl->erase_ADRCNT = ADRCNT_3;
960 } else if (chip->chipsize > (2 << 16)) {
961 /* big than 128KB */
962 flctl->rw_ADRCNT = ADRCNT_3;
963 flctl->erase_ADRCNT = ADRCNT_2;
964 } else {
965 flctl->rw_ADRCNT = ADRCNT_2;
966 flctl->erase_ADRCNT = ADRCNT_1;
967 }
968 } else {
969 flctl->page_size = 1;
970 if (chip->chipsize > (128 << 20)) {
971 /* big than 128MB */
972 flctl->rw_ADRCNT = ADRCNT2_E;
973 flctl->erase_ADRCNT = ADRCNT_3;
974 } else if (chip->chipsize > (8 << 16)) {
975 /* big than 512KB */
976 flctl->rw_ADRCNT = ADRCNT_4;
977 flctl->erase_ADRCNT = ADRCNT_2;
978 } else {
979 flctl->rw_ADRCNT = ADRCNT_3;
980 flctl->erase_ADRCNT = ADRCNT_1;
981 }
982 }
983
984 if (flctl->hwecc) {
985 if (mtd->writesize == 512) {
986 chip->ecc.layout = &flctl_4secc_oob_16;
987 chip->badblock_pattern = &flctl_4secc_smallpage;
988 } else {
989 chip->ecc.layout = &flctl_4secc_oob_64;
990 chip->badblock_pattern = &flctl_4secc_largepage;
991 }
992
993 chip->ecc.size = 512;
994 chip->ecc.bytes = 10;
Mike Dunn6a918ba2012-03-11 14:21:11 -0700995 chip->ecc.strength = 4;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +0900996 chip->ecc.read_page = flctl_read_page_hwecc;
997 chip->ecc.write_page = flctl_write_page_hwecc;
998 chip->ecc.mode = NAND_ECC_HW;
999
1000 /* 4 symbols ECC enabled */
Bastian Hechtaa32d1f2012-05-14 14:14:42 +02001001 flctl->flcmncr_base |= _4ECCEN;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001002 } else {
1003 chip->ecc.mode = NAND_ECC_SOFT;
1004 }
1005
1006 return 0;
1007}
1008
Bastian Hecht3c7ea4e2012-05-14 14:14:41 +02001009static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
1010{
1011 struct sh_flctl *flctl = dev_id;
1012
1013 dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
1014 writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
1015
1016 return IRQ_HANDLED;
1017}
1018
Magnus Dammb79c7ad2010-02-02 13:01:25 +09001019static int __devinit flctl_probe(struct platform_device *pdev)
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001020{
1021 struct resource *res;
1022 struct sh_flctl *flctl;
1023 struct mtd_info *flctl_mtd;
1024 struct nand_chip *nand;
1025 struct sh_flctl_platform_data *pdata;
Magnus Dammb79c7ad2010-02-02 13:01:25 +09001026 int ret = -ENXIO;
Bastian Hecht3c7ea4e2012-05-14 14:14:41 +02001027 int irq;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001028
1029 pdata = pdev->dev.platform_data;
1030 if (pdata == NULL) {
Magnus Dammb79c7ad2010-02-02 13:01:25 +09001031 dev_err(&pdev->dev, "no platform data defined\n");
1032 return -EINVAL;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001033 }
1034
1035 flctl = kzalloc(sizeof(struct sh_flctl), GFP_KERNEL);
1036 if (!flctl) {
Magnus Dammb79c7ad2010-02-02 13:01:25 +09001037 dev_err(&pdev->dev, "failed to allocate driver data\n");
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001038 return -ENOMEM;
1039 }
1040
1041 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1042 if (!res) {
Magnus Dammb79c7ad2010-02-02 13:01:25 +09001043 dev_err(&pdev->dev, "failed to get I/O memory\n");
Bastian Hechtcfe78192012-03-18 15:13:20 +01001044 goto err_iomap;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001045 }
1046
H Hartley Sweetencbd38a82009-12-14 16:59:27 -05001047 flctl->reg = ioremap(res->start, resource_size(res));
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001048 if (flctl->reg == NULL) {
Magnus Dammb79c7ad2010-02-02 13:01:25 +09001049 dev_err(&pdev->dev, "failed to remap I/O memory\n");
Bastian Hechtcfe78192012-03-18 15:13:20 +01001050 goto err_iomap;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001051 }
1052
Bastian Hecht3c7ea4e2012-05-14 14:14:41 +02001053 irq = platform_get_irq(pdev, 0);
1054 if (irq < 0) {
1055 dev_err(&pdev->dev, "failed to get flste irq data\n");
1056 goto err_flste;
1057 }
1058
1059 ret = request_irq(irq, flctl_handle_flste, IRQF_SHARED, "flste", flctl);
1060 if (ret) {
1061 dev_err(&pdev->dev, "request interrupt failed.\n");
1062 goto err_flste;
1063 }
1064
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001065 platform_set_drvdata(pdev, flctl);
1066 flctl_mtd = &flctl->mtd;
1067 nand = &flctl->chip;
1068 flctl_mtd->priv = nand;
Magnus Dammb79c7ad2010-02-02 13:01:25 +09001069 flctl->pdev = pdev;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001070 flctl->hwecc = pdata->has_hwecc;
Bastian Hecht3f2e9242012-03-01 10:48:40 +01001071 flctl->holden = pdata->use_holden;
Bastian Hecht3c7ea4e2012-05-14 14:14:41 +02001072 flctl->flcmncr_base = pdata->flcmncr_val;
1073 flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001074
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001075 /* Set address of hardware control function */
1076 /* 20 us command delay time */
1077 nand->chip_delay = 20;
1078
1079 nand->read_byte = flctl_read_byte;
1080 nand->write_buf = flctl_write_buf;
1081 nand->read_buf = flctl_read_buf;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001082 nand->select_chip = flctl_select_chip;
1083 nand->cmdfunc = flctl_cmdfunc;
1084
Magnus Damm010ab822010-01-27 09:17:21 +00001085 if (pdata->flcmncr_val & SEL_16BIT) {
1086 nand->options |= NAND_BUSWIDTH_16;
1087 nand->read_word = flctl_read_word;
1088 }
1089
Bastian Hechtcfe78192012-03-18 15:13:20 +01001090 pm_runtime_enable(&pdev->dev);
1091 pm_runtime_resume(&pdev->dev);
1092
Bastian Hecht83738d82012-10-19 12:15:35 +02001093 flctl_setup_dma(flctl);
1094
David Woodhouse5e81e882010-02-26 18:32:56 +00001095 ret = nand_scan_ident(flctl_mtd, 1, NULL);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001096 if (ret)
Bastian Hechtcfe78192012-03-18 15:13:20 +01001097 goto err_chip;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001098
1099 ret = flctl_chip_init_tail(flctl_mtd);
1100 if (ret)
Bastian Hechtcfe78192012-03-18 15:13:20 +01001101 goto err_chip;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001102
1103 ret = nand_scan_tail(flctl_mtd);
1104 if (ret)
Bastian Hechtcfe78192012-03-18 15:13:20 +01001105 goto err_chip;
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001106
Jamie Ilesee0e87b2011-05-23 10:23:40 +01001107 mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001108
1109 return 0;
1110
Bastian Hechtcfe78192012-03-18 15:13:20 +01001111err_chip:
Bastian Hecht83738d82012-10-19 12:15:35 +02001112 flctl_release_dma(flctl);
Bastian Hechtcfe78192012-03-18 15:13:20 +01001113 pm_runtime_disable(&pdev->dev);
Bastian Hecht3c7ea4e2012-05-14 14:14:41 +02001114 free_irq(irq, flctl);
1115err_flste:
Bastian Hechtcb547512012-05-14 14:14:40 +02001116 iounmap(flctl->reg);
Bastian Hechtcfe78192012-03-18 15:13:20 +01001117err_iomap:
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001118 kfree(flctl);
1119 return ret;
1120}
1121
Magnus Dammb79c7ad2010-02-02 13:01:25 +09001122static int __devexit flctl_remove(struct platform_device *pdev)
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001123{
1124 struct sh_flctl *flctl = platform_get_drvdata(pdev);
1125
Bastian Hecht83738d82012-10-19 12:15:35 +02001126 flctl_release_dma(flctl);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001127 nand_release(&flctl->mtd);
Bastian Hechtcfe78192012-03-18 15:13:20 +01001128 pm_runtime_disable(&pdev->dev);
Bastian Hecht3c7ea4e2012-05-14 14:14:41 +02001129 free_irq(platform_get_irq(pdev, 0), flctl);
Bastian Hechtcb547512012-05-14 14:14:40 +02001130 iounmap(flctl->reg);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001131 kfree(flctl);
1132
1133 return 0;
1134}
1135
1136static struct platform_driver flctl_driver = {
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001137 .remove = flctl_remove,
1138 .driver = {
1139 .name = "sh_flctl",
1140 .owner = THIS_MODULE,
1141 },
1142};
1143
1144static int __init flctl_nand_init(void)
1145{
David Woodhouse894572a2009-09-19 16:07:34 -07001146 return platform_driver_probe(&flctl_driver, flctl_probe);
Yoshihiro Shimoda35a34792008-10-20 17:17:44 +09001147}
1148
1149static void __exit flctl_nand_cleanup(void)
1150{
1151 platform_driver_unregister(&flctl_driver);
1152}
1153
1154module_init(flctl_nand_init);
1155module_exit(flctl_nand_cleanup);
1156
1157MODULE_LICENSE("GPL");
1158MODULE_AUTHOR("Yoshihiro Shimoda");
1159MODULE_DESCRIPTION("SuperH FLCTL driver");
1160MODULE_ALIAS("platform:sh_flctl");