blob: f7c9e638623bfaa5f851881526713eef7a2f6006 [file] [log] [blame]
Huang Shijieb1994892014-02-24 18:37:37 +08001/*
2 * Cloned most of the code from the m25p80.c
3 *
4 * This code is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/err.h>
10#include <linux/errno.h>
11#include <linux/module.h>
12#include <linux/device.h>
13#include <linux/mutex.h>
14#include <linux/math64.h>
15
16#include <linux/mtd/cfi.h>
17#include <linux/mtd/mtd.h>
18#include <linux/of_platform.h>
19#include <linux/spi/flash.h>
20#include <linux/mtd/spi-nor.h>
21
22/* Define max times to check status register before we give up. */
23#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
24
25#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
26
27/*
28 * Read the status register, returning its value in the location
29 * Return the status register value.
30 * Returns negative if error occurred.
31 */
32static int read_sr(struct spi_nor *nor)
33{
34 int ret;
35 u8 val;
36
37 ret = nor->read_reg(nor, OPCODE_RDSR, &val, 1);
38 if (ret < 0) {
39 pr_err("error %d reading SR\n", (int) ret);
40 return ret;
41 }
42
43 return val;
44}
45
46/*
47 * Read configuration register, returning its value in the
48 * location. Return the configuration register value.
49 * Returns negative if error occured.
50 */
51static int read_cr(struct spi_nor *nor)
52{
53 int ret;
54 u8 val;
55
56 ret = nor->read_reg(nor, OPCODE_RDCR, &val, 1);
57 if (ret < 0) {
58 dev_err(nor->dev, "error %d reading CR\n", ret);
59 return ret;
60 }
61
62 return val;
63}
64
65/*
66 * Dummy Cycle calculation for different type of read.
67 * It can be used to support more commands with
68 * different dummy cycle requirements.
69 */
70static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
71{
72 switch (nor->flash_read) {
73 case SPI_NOR_FAST:
74 case SPI_NOR_DUAL:
75 case SPI_NOR_QUAD:
76 return 1;
77 case SPI_NOR_NORMAL:
78 return 0;
79 }
80 return 0;
81}
82
83/*
84 * Write status register 1 byte
85 * Returns negative if error occurred.
86 */
87static inline int write_sr(struct spi_nor *nor, u8 val)
88{
89 nor->cmd_buf[0] = val;
90 return nor->write_reg(nor, OPCODE_WRSR, nor->cmd_buf, 1, 0);
91}
92
93/*
94 * Set write enable latch with Write Enable command.
95 * Returns negative if error occurred.
96 */
97static inline int write_enable(struct spi_nor *nor)
98{
99 return nor->write_reg(nor, OPCODE_WREN, NULL, 0, 0);
100}
101
102/*
103 * Send write disble instruction to the chip.
104 */
105static inline int write_disable(struct spi_nor *nor)
106{
107 return nor->write_reg(nor, OPCODE_WRDI, NULL, 0, 0);
108}
109
110static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
111{
112 return mtd->priv;
113}
114
115/* Enable/disable 4-byte addressing mode. */
116static inline int set_4byte(struct spi_nor *nor, u32 jedec_id, int enable)
117{
118 int status;
119 bool need_wren = false;
120 u8 cmd;
121
122 switch (JEDEC_MFR(jedec_id)) {
123 case CFI_MFR_ST: /* Micron, actually */
124 /* Some Micron need WREN command; all will accept it */
125 need_wren = true;
126 case CFI_MFR_MACRONIX:
127 case 0xEF /* winbond */:
128 if (need_wren)
129 write_enable(nor);
130
131 cmd = enable ? OPCODE_EN4B : OPCODE_EX4B;
132 status = nor->write_reg(nor, cmd, NULL, 0, 0);
133 if (need_wren)
134 write_disable(nor);
135
136 return status;
137 default:
138 /* Spansion style */
139 nor->cmd_buf[0] = enable << 7;
140 return nor->write_reg(nor, OPCODE_BRWR, nor->cmd_buf, 1, 0);
141 }
142}
143
144static int spi_nor_wait_till_ready(struct spi_nor *nor)
145{
146 unsigned long deadline;
147 int sr;
148
149 deadline = jiffies + MAX_READY_WAIT_JIFFIES;
150
151 do {
152 cond_resched();
153
154 sr = read_sr(nor);
155 if (sr < 0)
156 break;
157 else if (!(sr & SR_WIP))
158 return 0;
159 } while (!time_after_eq(jiffies, deadline));
160
161 return -ETIMEDOUT;
162}
163
164/*
165 * Service routine to read status register until ready, or timeout occurs.
166 * Returns non-zero if error.
167 */
168static int wait_till_ready(struct spi_nor *nor)
169{
170 return nor->wait_till_ready(nor);
171}
172
173/*
174 * Erase the whole flash memory
175 *
176 * Returns 0 if successful, non-zero otherwise.
177 */
178static int erase_chip(struct spi_nor *nor)
179{
180 int ret;
181
182 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd->size >> 10));
183
184 /* Wait until finished previous write command. */
185 ret = wait_till_ready(nor);
186 if (ret)
187 return ret;
188
189 /* Send write enable, then erase commands. */
190 write_enable(nor);
191
192 return nor->write_reg(nor, OPCODE_CHIP_ERASE, NULL, 0, 0);
193}
194
195static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
196{
197 int ret = 0;
198
199 mutex_lock(&nor->lock);
200
201 if (nor->prepare) {
202 ret = nor->prepare(nor, ops);
203 if (ret) {
204 dev_err(nor->dev, "failed in the preparation.\n");
205 mutex_unlock(&nor->lock);
206 return ret;
207 }
208 }
209 return ret;
210}
211
212static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
213{
214 if (nor->unprepare)
215 nor->unprepare(nor, ops);
216 mutex_unlock(&nor->lock);
217}
218
219/*
220 * Erase an address range on the nor chip. The address range may extend
221 * one or more erase sectors. Return an error is there is a problem erasing.
222 */
223static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
224{
225 struct spi_nor *nor = mtd_to_spi_nor(mtd);
226 u32 addr, len;
227 uint32_t rem;
228 int ret;
229
230 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
231 (long long)instr->len);
232
233 div_u64_rem(instr->len, mtd->erasesize, &rem);
234 if (rem)
235 return -EINVAL;
236
237 addr = instr->addr;
238 len = instr->len;
239
240 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
241 if (ret)
242 return ret;
243
244 /* whole-chip erase? */
245 if (len == mtd->size) {
246 if (erase_chip(nor)) {
247 ret = -EIO;
248 goto erase_err;
249 }
250
251 /* REVISIT in some cases we could speed up erasing large regions
252 * by using OPCODE_SE instead of OPCODE_BE_4K. We may have set up
253 * to use "small sector erase", but that's not always optimal.
254 */
255
256 /* "sector"-at-a-time erase */
257 } else {
258 while (len) {
259 if (nor->erase(nor, addr)) {
260 ret = -EIO;
261 goto erase_err;
262 }
263
264 addr += mtd->erasesize;
265 len -= mtd->erasesize;
266 }
267 }
268
269 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
270
271 instr->state = MTD_ERASE_DONE;
272 mtd_erase_callback(instr);
273
274 return ret;
275
276erase_err:
277 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
278 instr->state = MTD_ERASE_FAILED;
279 return ret;
280}
281
282static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
283{
284 struct spi_nor *nor = mtd_to_spi_nor(mtd);
285 uint32_t offset = ofs;
286 uint8_t status_old, status_new;
287 int ret = 0;
288
289 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
290 if (ret)
291 return ret;
292
293 /* Wait until finished previous command */
294 ret = wait_till_ready(nor);
295 if (ret)
296 goto err;
297
298 status_old = read_sr(nor);
299
300 if (offset < mtd->size - (mtd->size / 2))
301 status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0;
302 else if (offset < mtd->size - (mtd->size / 4))
303 status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
304 else if (offset < mtd->size - (mtd->size / 8))
305 status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
306 else if (offset < mtd->size - (mtd->size / 16))
307 status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
308 else if (offset < mtd->size - (mtd->size / 32))
309 status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
310 else if (offset < mtd->size - (mtd->size / 64))
311 status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
312 else
313 status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
314
315 /* Only modify protection if it will not unlock other areas */
316 if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) >
317 (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
318 write_enable(nor);
319 ret = write_sr(nor, status_new);
320 if (ret)
321 goto err;
322 }
323
324err:
325 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
326 return ret;
327}
328
329static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
330{
331 struct spi_nor *nor = mtd_to_spi_nor(mtd);
332 uint32_t offset = ofs;
333 uint8_t status_old, status_new;
334 int ret = 0;
335
336 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
337 if (ret)
338 return ret;
339
340 /* Wait until finished previous command */
341 ret = wait_till_ready(nor);
342 if (ret)
343 goto err;
344
345 status_old = read_sr(nor);
346
347 if (offset+len > mtd->size - (mtd->size / 64))
348 status_new = status_old & ~(SR_BP2 | SR_BP1 | SR_BP0);
349 else if (offset+len > mtd->size - (mtd->size / 32))
350 status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
351 else if (offset+len > mtd->size - (mtd->size / 16))
352 status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
353 else if (offset+len > mtd->size - (mtd->size / 8))
354 status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
355 else if (offset+len > mtd->size - (mtd->size / 4))
356 status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
357 else if (offset+len > mtd->size - (mtd->size / 2))
358 status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
359 else
360 status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
361
362 /* Only modify protection if it will not lock other areas */
363 if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) <
364 (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
365 write_enable(nor);
366 ret = write_sr(nor, status_new);
367 if (ret)
368 goto err;
369 }
370
371err:
372 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
373 return ret;
374}
375
376struct flash_info {
377 /* JEDEC id zero means "no ID" (most older chips); otherwise it has
378 * a high byte of zero plus three data bytes: the manufacturer id,
379 * then a two byte device id.
380 */
381 u32 jedec_id;
382 u16 ext_id;
383
384 /* The size listed here is what works with OPCODE_SE, which isn't
385 * necessarily called a "sector" by the vendor.
386 */
387 unsigned sector_size;
388 u16 n_sectors;
389
390 u16 page_size;
391 u16 addr_width;
392
393 u16 flags;
394#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
395#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
396#define SST_WRITE 0x04 /* use SST byte programming */
397#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
398#define SECT_4K_PMC 0x10 /* OPCODE_BE_4K_PMC works uniformly */
399#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
400#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
401};
402
403#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
404 ((kernel_ulong_t)&(struct flash_info) { \
405 .jedec_id = (_jedec_id), \
406 .ext_id = (_ext_id), \
407 .sector_size = (_sector_size), \
408 .n_sectors = (_n_sectors), \
409 .page_size = 256, \
410 .flags = (_flags), \
411 })
412
413#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
414 ((kernel_ulong_t)&(struct flash_info) { \
415 .sector_size = (_sector_size), \
416 .n_sectors = (_n_sectors), \
417 .page_size = (_page_size), \
418 .addr_width = (_addr_width), \
419 .flags = (_flags), \
420 })
421
422/* NOTE: double check command sets and memory organization when you add
423 * more nor chips. This current list focusses on newer chips, which
424 * have been converging on command sets which including JEDEC ID.
425 */
426const struct spi_device_id spi_nor_ids[] = {
427 /* Atmel -- some are (confusingly) marketed as "DataFlash" */
428 { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
429 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
430
431 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
432 { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
433 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
434
435 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
436 { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
437 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
438 { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
439
440 { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
441
442 /* EON -- en25xxx */
443 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
444 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
445 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
446 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
447 { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
448 { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
449
450 /* ESMT */
451 { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
452
453 /* Everspin */
454 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
455 { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
456
457 /* GigaDevice */
458 { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
459 { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
460
461 /* Intel/Numonyx -- xxxs33b */
462 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
463 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
464 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
465
466 /* Macronix */
467 { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
468 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
469 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
470 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
471 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
472 { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
473 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
474 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
475 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
476 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
477 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
478 { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
479 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
480
481 /* Micron */
482 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
483 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
484 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
485 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
486 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) },
487
488 /* PMC */
489 { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
490 { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
491 { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
492
493 /* Spansion -- single (large) sector size only, at least
494 * for the chips listed here (without boot sectors).
495 */
496 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, 0) },
497 { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
498 { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
499 { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
500 { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
501 { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
502 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
503 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
504 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
505 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
506 { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
507 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
508 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
509 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
510 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
511 { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
512 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
513 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
514
515 /* SST -- large erase sizes are "overlays", "sectors" are 4K */
516 { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
517 { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
518 { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
519 { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
520 { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
521 { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
522 { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
523 { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
524 { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
525
526 /* ST Microelectronics -- newer production may have feature updates */
527 { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
528 { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
529 { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
530 { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
531 { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
532 { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
533 { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
534 { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
535 { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
536 { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) },
537
538 { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
539 { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
540 { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
541 { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
542 { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
543 { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
544 { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
545 { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
546 { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
547
548 { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
549 { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
550 { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
551
552 { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
553 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
554 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
555
556 { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
557 { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
558 { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
559 { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
560 { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
561
562 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
563 { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
564 { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
565 { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
566 { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
567 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
568 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
569 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
570 { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
571 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
572 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
573 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
574 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
575 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
576 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
577 { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
578
579 /* Catalyst / On Semiconductor -- non-JEDEC */
580 { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
581 { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
582 { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
583 { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
584 { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
585 { },
586};
587
588static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
589{
590 int tmp;
591 u8 id[5];
592 u32 jedec;
593 u16 ext_jedec;
594 struct flash_info *info;
595
596 tmp = nor->read_reg(nor, OPCODE_RDID, id, 5);
597 if (tmp < 0) {
598 dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp);
599 return ERR_PTR(tmp);
600 }
601 jedec = id[0];
602 jedec = jedec << 8;
603 jedec |= id[1];
604 jedec = jedec << 8;
605 jedec |= id[2];
606
607 ext_jedec = id[3] << 8 | id[4];
608
609 for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
610 info = (void *)spi_nor_ids[tmp].driver_data;
611 if (info->jedec_id == jedec) {
612 if (info->ext_id == 0 || info->ext_id == ext_jedec)
613 return &spi_nor_ids[tmp];
614 }
615 }
616 dev_err(nor->dev, "unrecognized JEDEC id %06x\n", jedec);
617 return ERR_PTR(-ENODEV);
618}
619
620static const struct spi_device_id *jedec_probe(struct spi_nor *nor)
621{
622 return nor->read_id(nor);
623}
624
625static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
626 size_t *retlen, u_char *buf)
627{
628 struct spi_nor *nor = mtd_to_spi_nor(mtd);
629 int ret;
630
631 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
632
633 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
634 if (ret)
635 return ret;
636
637 ret = nor->read(nor, from, len, retlen, buf);
638
639 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
640 return ret;
641}
642
643static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
644 size_t *retlen, const u_char *buf)
645{
646 struct spi_nor *nor = mtd_to_spi_nor(mtd);
647 size_t actual;
648 int ret;
649
650 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
651
652 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
653 if (ret)
654 return ret;
655
656 /* Wait until finished previous write command. */
657 ret = wait_till_ready(nor);
658 if (ret)
659 goto time_out;
660
661 write_enable(nor);
662
663 nor->sst_write_second = false;
664
665 actual = to % 2;
666 /* Start write from odd address. */
667 if (actual) {
668 nor->program_opcode = OPCODE_BP;
669
670 /* write one byte. */
671 nor->write(nor, to, 1, retlen, buf);
672 ret = wait_till_ready(nor);
673 if (ret)
674 goto time_out;
675 }
676 to += actual;
677
678 /* Write out most of the data here. */
679 for (; actual < len - 1; actual += 2) {
680 nor->program_opcode = OPCODE_AAI_WP;
681
682 /* write two bytes. */
683 nor->write(nor, to, 2, retlen, buf + actual);
684 ret = wait_till_ready(nor);
685 if (ret)
686 goto time_out;
687 to += 2;
688 nor->sst_write_second = true;
689 }
690 nor->sst_write_second = false;
691
692 write_disable(nor);
693 ret = wait_till_ready(nor);
694 if (ret)
695 goto time_out;
696
697 /* Write out trailing byte if it exists. */
698 if (actual != len) {
699 write_enable(nor);
700
701 nor->program_opcode = OPCODE_BP;
702 nor->write(nor, to, 1, retlen, buf + actual);
703
704 ret = wait_till_ready(nor);
705 if (ret)
706 goto time_out;
707 write_disable(nor);
708 }
709time_out:
710 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
711 return ret;
712}
713
714/*
715 * Write an address range to the nor chip. Data must be written in
716 * FLASH_PAGESIZE chunks. The address range may be any size provided
717 * it is within the physical boundaries.
718 */
719static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
720 size_t *retlen, const u_char *buf)
721{
722 struct spi_nor *nor = mtd_to_spi_nor(mtd);
723 u32 page_offset, page_size, i;
724 int ret;
725
726 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
727
728 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
729 if (ret)
730 return ret;
731
732 /* Wait until finished previous write command. */
733 ret = wait_till_ready(nor);
734 if (ret)
735 goto write_err;
736
737 write_enable(nor);
738
739 page_offset = to & (nor->page_size - 1);
740
741 /* do all the bytes fit onto one page? */
742 if (page_offset + len <= nor->page_size) {
743 nor->write(nor, to, len, retlen, buf);
744 } else {
745 /* the size of data remaining on the first page */
746 page_size = nor->page_size - page_offset;
747 nor->write(nor, to, page_size, retlen, buf);
748
749 /* write everything in nor->page_size chunks */
750 for (i = page_size; i < len; i += page_size) {
751 page_size = len - i;
752 if (page_size > nor->page_size)
753 page_size = nor->page_size;
754
755 wait_till_ready(nor);
756 write_enable(nor);
757
758 nor->write(nor, to + i, page_size, retlen, buf + i);
759 }
760 }
761
762write_err:
763 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
764 return 0;
765}
766
767static int macronix_quad_enable(struct spi_nor *nor)
768{
769 int ret, val;
770
771 val = read_sr(nor);
772 write_enable(nor);
773
774 nor->cmd_buf[0] = val | SR_QUAD_EN_MX;
775 nor->write_reg(nor, OPCODE_WRSR, nor->cmd_buf, 1, 0);
776
777 if (wait_till_ready(nor))
778 return 1;
779
780 ret = read_sr(nor);
781 if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
782 dev_err(nor->dev, "Macronix Quad bit not set\n");
783 return -EINVAL;
784 }
785
786 return 0;
787}
788
789/*
790 * Write status Register and configuration register with 2 bytes
791 * The first byte will be written to the status register, while the
792 * second byte will be written to the configuration register.
793 * Return negative if error occured.
794 */
795static int write_sr_cr(struct spi_nor *nor, u16 val)
796{
797 nor->cmd_buf[0] = val & 0xff;
798 nor->cmd_buf[1] = (val >> 8);
799
800 return nor->write_reg(nor, OPCODE_WRSR, nor->cmd_buf, 2, 0);
801}
802
803static int spansion_quad_enable(struct spi_nor *nor)
804{
805 int ret;
806 int quad_en = CR_QUAD_EN_SPAN << 8;
807
808 write_enable(nor);
809
810 ret = write_sr_cr(nor, quad_en);
811 if (ret < 0) {
812 dev_err(nor->dev,
813 "error while writing configuration register\n");
814 return -EINVAL;
815 }
816
817 /* read back and check it */
818 ret = read_cr(nor);
819 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
820 dev_err(nor->dev, "Spansion Quad bit not set\n");
821 return -EINVAL;
822 }
823
824 return 0;
825}
826
827static int set_quad_mode(struct spi_nor *nor, u32 jedec_id)
828{
829 int status;
830
831 switch (JEDEC_MFR(jedec_id)) {
832 case CFI_MFR_MACRONIX:
833 status = macronix_quad_enable(nor);
834 if (status) {
835 dev_err(nor->dev, "Macronix quad-read not enabled\n");
836 return -EINVAL;
837 }
838 return status;
839 default:
840 status = spansion_quad_enable(nor);
841 if (status) {
842 dev_err(nor->dev, "Spansion quad-read not enabled\n");
843 return -EINVAL;
844 }
845 return status;
846 }
847}
848
849static int spi_nor_check(struct spi_nor *nor)
850{
851 if (!nor->dev || !nor->read || !nor->write ||
852 !nor->read_reg || !nor->write_reg || !nor->erase) {
853 pr_err("spi-nor: please fill all the necessary fields!\n");
854 return -EINVAL;
855 }
856
857 if (!nor->read_id)
858 nor->read_id = spi_nor_read_id;
859 if (!nor->wait_till_ready)
860 nor->wait_till_ready = spi_nor_wait_till_ready;
861
862 return 0;
863}
864
865int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id,
866 enum read_mode mode)
867{
868 struct flash_info *info;
869 struct flash_platform_data *data;
870 struct device *dev = nor->dev;
871 struct mtd_info *mtd = nor->mtd;
872 struct device_node *np = dev->of_node;
873 int ret;
874 int i;
875
876 ret = spi_nor_check(nor);
877 if (ret)
878 return ret;
879
880 /* Platform data helps sort out which chip type we have, as
881 * well as how this board partitions it. If we don't have
882 * a chip ID, try the JEDEC id commands; they'll work for most
883 * newer chips, even if we don't recognize the particular chip.
884 */
885 data = dev_get_platdata(dev);
886 if (data && data->type) {
887 const struct spi_device_id *plat_id;
888
889 for (i = 0; i < ARRAY_SIZE(spi_nor_ids) - 1; i++) {
890 plat_id = &spi_nor_ids[i];
891 if (strcmp(data->type, plat_id->name))
892 continue;
893 break;
894 }
895
896 if (i < ARRAY_SIZE(spi_nor_ids) - 1)
897 id = plat_id;
898 else
899 dev_warn(dev, "unrecognized id %s\n", data->type);
900 }
901
902 info = (void *)id->driver_data;
903
904 if (info->jedec_id) {
905 const struct spi_device_id *jid;
906
907 jid = jedec_probe(nor);
908 if (IS_ERR(jid)) {
909 return PTR_ERR(jid);
910 } else if (jid != id) {
911 /*
912 * JEDEC knows better, so overwrite platform ID. We
913 * can't trust partitions any longer, but we'll let
914 * mtd apply them anyway, since some partitions may be
915 * marked read-only, and we don't want to lose that
916 * information, even if it's not 100% accurate.
917 */
918 dev_warn(dev, "found %s, expected %s\n",
919 jid->name, id->name);
920 id = jid;
921 info = (void *)jid->driver_data;
922 }
923 }
924
925 mutex_init(&nor->lock);
926
927 /*
928 * Atmel, SST and Intel/Numonyx serial nor tend to power
929 * up with the software protection bits set
930 */
931
932 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL ||
933 JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL ||
934 JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) {
935 write_enable(nor);
936 write_sr(nor, 0);
937 }
938
939 if (data && data->name)
940 mtd->name = data->name;
941 else
942 mtd->name = dev_name(dev);
943
944 mtd->type = MTD_NORFLASH;
945 mtd->writesize = 1;
946 mtd->flags = MTD_CAP_NORFLASH;
947 mtd->size = info->sector_size * info->n_sectors;
948 mtd->_erase = spi_nor_erase;
949 mtd->_read = spi_nor_read;
950
951 /* nor protection support for STmicro chips */
952 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) {
953 mtd->_lock = spi_nor_lock;
954 mtd->_unlock = spi_nor_unlock;
955 }
956
957 /* sst nor chips use AAI word program */
958 if (info->flags & SST_WRITE)
959 mtd->_write = sst_write;
960 else
961 mtd->_write = spi_nor_write;
962
963 /* prefer "small sector" erase if possible */
964 if (info->flags & SECT_4K) {
965 nor->erase_opcode = OPCODE_BE_4K;
966 mtd->erasesize = 4096;
967 } else if (info->flags & SECT_4K_PMC) {
968 nor->erase_opcode = OPCODE_BE_4K_PMC;
969 mtd->erasesize = 4096;
970 } else {
971 nor->erase_opcode = OPCODE_SE;
972 mtd->erasesize = info->sector_size;
973 }
974
975 if (info->flags & SPI_NOR_NO_ERASE)
976 mtd->flags |= MTD_NO_ERASE;
977
978 mtd->dev.parent = dev;
979 nor->page_size = info->page_size;
980 mtd->writebufsize = nor->page_size;
981
982 if (np) {
983 /* If we were instantiated by DT, use it */
984 if (of_property_read_bool(np, "m25p,fast-read"))
985 nor->flash_read = SPI_NOR_FAST;
986 else
987 nor->flash_read = SPI_NOR_NORMAL;
988 } else {
989 /* If we weren't instantiated by DT, default to fast-read */
990 nor->flash_read = SPI_NOR_FAST;
991 }
992
993 /* Some devices cannot do fast-read, no matter what DT tells us */
994 if (info->flags & SPI_NOR_NO_FR)
995 nor->flash_read = SPI_NOR_NORMAL;
996
997 /* Quad/Dual-read mode takes precedence over fast/normal */
998 if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
999 ret = set_quad_mode(nor, info->jedec_id);
1000 if (ret) {
1001 dev_err(dev, "quad mode not supported\n");
1002 return ret;
1003 }
1004 nor->flash_read = SPI_NOR_QUAD;
1005 } else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) {
1006 nor->flash_read = SPI_NOR_DUAL;
1007 }
1008
1009 /* Default commands */
1010 switch (nor->flash_read) {
1011 case SPI_NOR_QUAD:
1012 nor->read_opcode = OPCODE_QUAD_READ;
1013 break;
1014 case SPI_NOR_DUAL:
1015 nor->read_opcode = OPCODE_DUAL_READ;
1016 break;
1017 case SPI_NOR_FAST:
1018 nor->read_opcode = OPCODE_FAST_READ;
1019 break;
1020 case SPI_NOR_NORMAL:
1021 nor->read_opcode = OPCODE_NORM_READ;
1022 break;
1023 default:
1024 dev_err(dev, "No Read opcode defined\n");
1025 return -EINVAL;
1026 }
1027
1028 nor->program_opcode = OPCODE_PP;
1029
1030 if (info->addr_width)
1031 nor->addr_width = info->addr_width;
1032 else if (mtd->size > 0x1000000) {
1033 /* enable 4-byte addressing if the device exceeds 16MiB */
1034 nor->addr_width = 4;
1035 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) {
1036 /* Dedicated 4-byte command set */
1037 switch (nor->flash_read) {
1038 case SPI_NOR_QUAD:
1039 nor->read_opcode = OPCODE_QUAD_READ_4B;
1040 break;
1041 case SPI_NOR_DUAL:
1042 nor->read_opcode = OPCODE_DUAL_READ_4B;
1043 break;
1044 case SPI_NOR_FAST:
1045 nor->read_opcode = OPCODE_FAST_READ_4B;
1046 break;
1047 case SPI_NOR_NORMAL:
1048 nor->read_opcode = OPCODE_NORM_READ_4B;
1049 break;
1050 }
1051 nor->program_opcode = OPCODE_PP_4B;
1052 /* No small sector erase for 4-byte command set */
1053 nor->erase_opcode = OPCODE_SE_4B;
1054 mtd->erasesize = info->sector_size;
1055 } else
1056 set_4byte(nor, info->jedec_id, 1);
1057 } else {
1058 nor->addr_width = 3;
1059 }
1060
1061 nor->read_dummy = spi_nor_read_dummy_cycles(nor);
1062
1063 dev_info(dev, "%s (%lld Kbytes)\n", id->name,
1064 (long long)mtd->size >> 10);
1065
1066 dev_dbg(dev,
1067 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
1068 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
1069 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
1070 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
1071
1072 if (mtd->numeraseregions)
1073 for (i = 0; i < mtd->numeraseregions; i++)
1074 dev_dbg(dev,
1075 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
1076 ".erasesize = 0x%.8x (%uKiB), "
1077 ".numblocks = %d }\n",
1078 i, (long long)mtd->eraseregions[i].offset,
1079 mtd->eraseregions[i].erasesize,
1080 mtd->eraseregions[i].erasesize / 1024,
1081 mtd->eraseregions[i].numblocks);
1082 return 0;
1083}
1084
Huang Shijie0d8c11c2014-02-24 18:37:40 +08001085const struct spi_device_id *spi_nor_match_id(char *name)
1086{
1087 const struct spi_device_id *id = spi_nor_ids;
1088
1089 while (id->name[0]) {
1090 if (!strcmp(name, id->name))
1091 return id;
1092 id++;
1093 }
1094 return NULL;
1095}
1096
Huang Shijieb1994892014-02-24 18:37:37 +08001097MODULE_LICENSE("GPL");
1098MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
1099MODULE_AUTHOR("Mike Lavender");
1100MODULE_DESCRIPTION("framework for SPI NOR");