blob: 6c64ab95dee2ba27ed9dbe4429317c7725e53a79 [file] [log] [blame]
Huang Shijieb1994892014-02-24 18:37:37 +08001/*
Huang Shijie8eabdd12014-04-10 16:27:28 +08002 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
3 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
4 *
5 * Copyright (C) 2005, Intec Automation Inc.
6 * Copyright (C) 2014, Freescale Semiconductor, Inc.
Huang Shijieb1994892014-02-24 18:37:37 +08007 *
8 * This code is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/err.h>
14#include <linux/errno.h>
15#include <linux/module.h>
16#include <linux/device.h>
17#include <linux/mutex.h>
18#include <linux/math64.h>
19
20#include <linux/mtd/cfi.h>
21#include <linux/mtd/mtd.h>
22#include <linux/of_platform.h>
23#include <linux/spi/flash.h>
24#include <linux/mtd/spi-nor.h>
25
26/* Define max times to check status register before we give up. */
27#define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */
28
29#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
30
31/*
32 * Read the status register, returning its value in the location
33 * Return the status register value.
34 * Returns negative if error occurred.
35 */
36static int read_sr(struct spi_nor *nor)
37{
38 int ret;
39 u8 val;
40
41 ret = nor->read_reg(nor, OPCODE_RDSR, &val, 1);
42 if (ret < 0) {
43 pr_err("error %d reading SR\n", (int) ret);
44 return ret;
45 }
46
47 return val;
48}
49
50/*
51 * Read configuration register, returning its value in the
52 * location. Return the configuration register value.
53 * Returns negative if error occured.
54 */
55static int read_cr(struct spi_nor *nor)
56{
57 int ret;
58 u8 val;
59
60 ret = nor->read_reg(nor, OPCODE_RDCR, &val, 1);
61 if (ret < 0) {
62 dev_err(nor->dev, "error %d reading CR\n", ret);
63 return ret;
64 }
65
66 return val;
67}
68
69/*
70 * Dummy Cycle calculation for different type of read.
71 * It can be used to support more commands with
72 * different dummy cycle requirements.
73 */
74static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
75{
76 switch (nor->flash_read) {
77 case SPI_NOR_FAST:
78 case SPI_NOR_DUAL:
79 case SPI_NOR_QUAD:
80 return 1;
81 case SPI_NOR_NORMAL:
82 return 0;
83 }
84 return 0;
85}
86
87/*
88 * Write status register 1 byte
89 * Returns negative if error occurred.
90 */
91static inline int write_sr(struct spi_nor *nor, u8 val)
92{
93 nor->cmd_buf[0] = val;
94 return nor->write_reg(nor, OPCODE_WRSR, nor->cmd_buf, 1, 0);
95}
96
97/*
98 * Set write enable latch with Write Enable command.
99 * Returns negative if error occurred.
100 */
101static inline int write_enable(struct spi_nor *nor)
102{
103 return nor->write_reg(nor, OPCODE_WREN, NULL, 0, 0);
104}
105
106/*
107 * Send write disble instruction to the chip.
108 */
109static inline int write_disable(struct spi_nor *nor)
110{
111 return nor->write_reg(nor, OPCODE_WRDI, NULL, 0, 0);
112}
113
114static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
115{
116 return mtd->priv;
117}
118
119/* Enable/disable 4-byte addressing mode. */
120static inline int set_4byte(struct spi_nor *nor, u32 jedec_id, int enable)
121{
122 int status;
123 bool need_wren = false;
124 u8 cmd;
125
126 switch (JEDEC_MFR(jedec_id)) {
127 case CFI_MFR_ST: /* Micron, actually */
128 /* Some Micron need WREN command; all will accept it */
129 need_wren = true;
130 case CFI_MFR_MACRONIX:
131 case 0xEF /* winbond */:
132 if (need_wren)
133 write_enable(nor);
134
135 cmd = enable ? OPCODE_EN4B : OPCODE_EX4B;
136 status = nor->write_reg(nor, cmd, NULL, 0, 0);
137 if (need_wren)
138 write_disable(nor);
139
140 return status;
141 default:
142 /* Spansion style */
143 nor->cmd_buf[0] = enable << 7;
144 return nor->write_reg(nor, OPCODE_BRWR, nor->cmd_buf, 1, 0);
145 }
146}
147
148static int spi_nor_wait_till_ready(struct spi_nor *nor)
149{
150 unsigned long deadline;
151 int sr;
152
153 deadline = jiffies + MAX_READY_WAIT_JIFFIES;
154
155 do {
156 cond_resched();
157
158 sr = read_sr(nor);
159 if (sr < 0)
160 break;
161 else if (!(sr & SR_WIP))
162 return 0;
163 } while (!time_after_eq(jiffies, deadline));
164
165 return -ETIMEDOUT;
166}
167
168/*
169 * Service routine to read status register until ready, or timeout occurs.
170 * Returns non-zero if error.
171 */
172static int wait_till_ready(struct spi_nor *nor)
173{
174 return nor->wait_till_ready(nor);
175}
176
177/*
178 * Erase the whole flash memory
179 *
180 * Returns 0 if successful, non-zero otherwise.
181 */
182static int erase_chip(struct spi_nor *nor)
183{
184 int ret;
185
186 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd->size >> 10));
187
188 /* Wait until finished previous write command. */
189 ret = wait_till_ready(nor);
190 if (ret)
191 return ret;
192
193 /* Send write enable, then erase commands. */
194 write_enable(nor);
195
196 return nor->write_reg(nor, OPCODE_CHIP_ERASE, NULL, 0, 0);
197}
198
199static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
200{
201 int ret = 0;
202
203 mutex_lock(&nor->lock);
204
205 if (nor->prepare) {
206 ret = nor->prepare(nor, ops);
207 if (ret) {
208 dev_err(nor->dev, "failed in the preparation.\n");
209 mutex_unlock(&nor->lock);
210 return ret;
211 }
212 }
213 return ret;
214}
215
216static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
217{
218 if (nor->unprepare)
219 nor->unprepare(nor, ops);
220 mutex_unlock(&nor->lock);
221}
222
223/*
224 * Erase an address range on the nor chip. The address range may extend
225 * one or more erase sectors. Return an error is there is a problem erasing.
226 */
227static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
228{
229 struct spi_nor *nor = mtd_to_spi_nor(mtd);
230 u32 addr, len;
231 uint32_t rem;
232 int ret;
233
234 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
235 (long long)instr->len);
236
237 div_u64_rem(instr->len, mtd->erasesize, &rem);
238 if (rem)
239 return -EINVAL;
240
241 addr = instr->addr;
242 len = instr->len;
243
244 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
245 if (ret)
246 return ret;
247
248 /* whole-chip erase? */
249 if (len == mtd->size) {
250 if (erase_chip(nor)) {
251 ret = -EIO;
252 goto erase_err;
253 }
254
255 /* REVISIT in some cases we could speed up erasing large regions
256 * by using OPCODE_SE instead of OPCODE_BE_4K. We may have set up
257 * to use "small sector erase", but that's not always optimal.
258 */
259
260 /* "sector"-at-a-time erase */
261 } else {
262 while (len) {
263 if (nor->erase(nor, addr)) {
264 ret = -EIO;
265 goto erase_err;
266 }
267
268 addr += mtd->erasesize;
269 len -= mtd->erasesize;
270 }
271 }
272
273 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
274
275 instr->state = MTD_ERASE_DONE;
276 mtd_erase_callback(instr);
277
278 return ret;
279
280erase_err:
281 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
282 instr->state = MTD_ERASE_FAILED;
283 return ret;
284}
285
286static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
287{
288 struct spi_nor *nor = mtd_to_spi_nor(mtd);
289 uint32_t offset = ofs;
290 uint8_t status_old, status_new;
291 int ret = 0;
292
293 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
294 if (ret)
295 return ret;
296
297 /* Wait until finished previous command */
298 ret = wait_till_ready(nor);
299 if (ret)
300 goto err;
301
302 status_old = read_sr(nor);
303
304 if (offset < mtd->size - (mtd->size / 2))
305 status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0;
306 else if (offset < mtd->size - (mtd->size / 4))
307 status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
308 else if (offset < mtd->size - (mtd->size / 8))
309 status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
310 else if (offset < mtd->size - (mtd->size / 16))
311 status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
312 else if (offset < mtd->size - (mtd->size / 32))
313 status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
314 else if (offset < mtd->size - (mtd->size / 64))
315 status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
316 else
317 status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
318
319 /* Only modify protection if it will not unlock other areas */
320 if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) >
321 (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
322 write_enable(nor);
323 ret = write_sr(nor, status_new);
324 if (ret)
325 goto err;
326 }
327
328err:
329 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
330 return ret;
331}
332
333static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
334{
335 struct spi_nor *nor = mtd_to_spi_nor(mtd);
336 uint32_t offset = ofs;
337 uint8_t status_old, status_new;
338 int ret = 0;
339
340 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
341 if (ret)
342 return ret;
343
344 /* Wait until finished previous command */
345 ret = wait_till_ready(nor);
346 if (ret)
347 goto err;
348
349 status_old = read_sr(nor);
350
351 if (offset+len > mtd->size - (mtd->size / 64))
352 status_new = status_old & ~(SR_BP2 | SR_BP1 | SR_BP0);
353 else if (offset+len > mtd->size - (mtd->size / 32))
354 status_new = (status_old & ~(SR_BP2 | SR_BP1)) | SR_BP0;
355 else if (offset+len > mtd->size - (mtd->size / 16))
356 status_new = (status_old & ~(SR_BP2 | SR_BP0)) | SR_BP1;
357 else if (offset+len > mtd->size - (mtd->size / 8))
358 status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
359 else if (offset+len > mtd->size - (mtd->size / 4))
360 status_new = (status_old & ~(SR_BP0 | SR_BP1)) | SR_BP2;
361 else if (offset+len > mtd->size - (mtd->size / 2))
362 status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
363 else
364 status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
365
366 /* Only modify protection if it will not lock other areas */
367 if ((status_new & (SR_BP2 | SR_BP1 | SR_BP0)) <
368 (status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
369 write_enable(nor);
370 ret = write_sr(nor, status_new);
371 if (ret)
372 goto err;
373 }
374
375err:
376 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
377 return ret;
378}
379
380struct flash_info {
381 /* JEDEC id zero means "no ID" (most older chips); otherwise it has
382 * a high byte of zero plus three data bytes: the manufacturer id,
383 * then a two byte device id.
384 */
385 u32 jedec_id;
386 u16 ext_id;
387
388 /* The size listed here is what works with OPCODE_SE, which isn't
389 * necessarily called a "sector" by the vendor.
390 */
391 unsigned sector_size;
392 u16 n_sectors;
393
394 u16 page_size;
395 u16 addr_width;
396
397 u16 flags;
398#define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */
399#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
400#define SST_WRITE 0x04 /* use SST byte programming */
401#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
402#define SECT_4K_PMC 0x10 /* OPCODE_BE_4K_PMC works uniformly */
403#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
404#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
405};
406
407#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
408 ((kernel_ulong_t)&(struct flash_info) { \
409 .jedec_id = (_jedec_id), \
410 .ext_id = (_ext_id), \
411 .sector_size = (_sector_size), \
412 .n_sectors = (_n_sectors), \
413 .page_size = 256, \
414 .flags = (_flags), \
415 })
416
417#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
418 ((kernel_ulong_t)&(struct flash_info) { \
419 .sector_size = (_sector_size), \
420 .n_sectors = (_n_sectors), \
421 .page_size = (_page_size), \
422 .addr_width = (_addr_width), \
423 .flags = (_flags), \
424 })
425
426/* NOTE: double check command sets and memory organization when you add
427 * more nor chips. This current list focusses on newer chips, which
428 * have been converging on command sets which including JEDEC ID.
429 */
430const struct spi_device_id spi_nor_ids[] = {
431 /* Atmel -- some are (confusingly) marketed as "DataFlash" */
432 { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
433 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
434
435 { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
436 { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
437 { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
438
439 { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
440 { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
441 { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
442 { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
443
444 { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
445
446 /* EON -- en25xxx */
447 { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
448 { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
449 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
450 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
451 { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
452 { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
453
454 /* ESMT */
455 { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
456
457 /* Everspin */
458 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
459 { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
460
461 /* GigaDevice */
462 { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
463 { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
464
465 /* Intel/Numonyx -- xxxs33b */
466 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
467 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
468 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
469
470 /* Macronix */
471 { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
472 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
473 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
474 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
475 { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
476 { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
477 { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
478 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
479 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
480 { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
481 { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
482 { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
483 { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
484
485 /* Micron */
486 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) },
487 { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
488 { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
489 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
490 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) },
491
492 /* PMC */
493 { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
494 { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
495 { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
496
497 /* Spansion -- single (large) sector size only, at least
498 * for the chips listed here (without boot sectors).
499 */
500 { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, 0) },
501 { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) },
502 { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
503 { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
504 { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
505 { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
506 { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
507 { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
508 { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) },
509 { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) },
510 { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
511 { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
512 { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
513 { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
514 { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
515 { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
516 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) },
517 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
518
519 /* SST -- large erase sizes are "overlays", "sectors" are 4K */
520 { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
521 { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
522 { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
523 { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
524 { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
525 { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
526 { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
527 { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
528 { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
529
530 /* ST Microelectronics -- newer production may have feature updates */
531 { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
532 { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
533 { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
534 { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
535 { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
536 { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
537 { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
538 { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
539 { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
540 { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) },
541
542 { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
543 { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
544 { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
545 { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
546 { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
547 { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
548 { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
549 { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
550 { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
551
552 { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
553 { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
554 { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
555
556 { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
557 { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
558 { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
559
560 { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
561 { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
562 { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
563 { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
564 { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
565
566 /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
567 { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
568 { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
569 { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
570 { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
571 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
572 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
573 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
574 { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
575 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
576 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
577 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
578 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
579 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
580 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
581 { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
582
583 /* Catalyst / On Semiconductor -- non-JEDEC */
584 { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
585 { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
586 { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
587 { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
588 { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
589 { },
590};
Brian Norrisb61834b2014-04-08 18:22:57 -0700591EXPORT_SYMBOL_GPL(spi_nor_ids);
Huang Shijieb1994892014-02-24 18:37:37 +0800592
593static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
594{
595 int tmp;
596 u8 id[5];
597 u32 jedec;
598 u16 ext_jedec;
599 struct flash_info *info;
600
601 tmp = nor->read_reg(nor, OPCODE_RDID, id, 5);
602 if (tmp < 0) {
603 dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp);
604 return ERR_PTR(tmp);
605 }
606 jedec = id[0];
607 jedec = jedec << 8;
608 jedec |= id[1];
609 jedec = jedec << 8;
610 jedec |= id[2];
611
612 ext_jedec = id[3] << 8 | id[4];
613
614 for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
615 info = (void *)spi_nor_ids[tmp].driver_data;
616 if (info->jedec_id == jedec) {
617 if (info->ext_id == 0 || info->ext_id == ext_jedec)
618 return &spi_nor_ids[tmp];
619 }
620 }
621 dev_err(nor->dev, "unrecognized JEDEC id %06x\n", jedec);
622 return ERR_PTR(-ENODEV);
623}
624
625static const struct spi_device_id *jedec_probe(struct spi_nor *nor)
626{
627 return nor->read_id(nor);
628}
629
630static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
631 size_t *retlen, u_char *buf)
632{
633 struct spi_nor *nor = mtd_to_spi_nor(mtd);
634 int ret;
635
636 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
637
638 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
639 if (ret)
640 return ret;
641
642 ret = nor->read(nor, from, len, retlen, buf);
643
644 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
645 return ret;
646}
647
648static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
649 size_t *retlen, const u_char *buf)
650{
651 struct spi_nor *nor = mtd_to_spi_nor(mtd);
652 size_t actual;
653 int ret;
654
655 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
656
657 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
658 if (ret)
659 return ret;
660
661 /* Wait until finished previous write command. */
662 ret = wait_till_ready(nor);
663 if (ret)
664 goto time_out;
665
666 write_enable(nor);
667
668 nor->sst_write_second = false;
669
670 actual = to % 2;
671 /* Start write from odd address. */
672 if (actual) {
673 nor->program_opcode = OPCODE_BP;
674
675 /* write one byte. */
676 nor->write(nor, to, 1, retlen, buf);
677 ret = wait_till_ready(nor);
678 if (ret)
679 goto time_out;
680 }
681 to += actual;
682
683 /* Write out most of the data here. */
684 for (; actual < len - 1; actual += 2) {
685 nor->program_opcode = OPCODE_AAI_WP;
686
687 /* write two bytes. */
688 nor->write(nor, to, 2, retlen, buf + actual);
689 ret = wait_till_ready(nor);
690 if (ret)
691 goto time_out;
692 to += 2;
693 nor->sst_write_second = true;
694 }
695 nor->sst_write_second = false;
696
697 write_disable(nor);
698 ret = wait_till_ready(nor);
699 if (ret)
700 goto time_out;
701
702 /* Write out trailing byte if it exists. */
703 if (actual != len) {
704 write_enable(nor);
705
706 nor->program_opcode = OPCODE_BP;
707 nor->write(nor, to, 1, retlen, buf + actual);
708
709 ret = wait_till_ready(nor);
710 if (ret)
711 goto time_out;
712 write_disable(nor);
713 }
714time_out:
715 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
716 return ret;
717}
718
719/*
720 * Write an address range to the nor chip. Data must be written in
721 * FLASH_PAGESIZE chunks. The address range may be any size provided
722 * it is within the physical boundaries.
723 */
724static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
725 size_t *retlen, const u_char *buf)
726{
727 struct spi_nor *nor = mtd_to_spi_nor(mtd);
728 u32 page_offset, page_size, i;
729 int ret;
730
731 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
732
733 ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
734 if (ret)
735 return ret;
736
737 /* Wait until finished previous write command. */
738 ret = wait_till_ready(nor);
739 if (ret)
740 goto write_err;
741
742 write_enable(nor);
743
744 page_offset = to & (nor->page_size - 1);
745
746 /* do all the bytes fit onto one page? */
747 if (page_offset + len <= nor->page_size) {
748 nor->write(nor, to, len, retlen, buf);
749 } else {
750 /* the size of data remaining on the first page */
751 page_size = nor->page_size - page_offset;
752 nor->write(nor, to, page_size, retlen, buf);
753
754 /* write everything in nor->page_size chunks */
755 for (i = page_size; i < len; i += page_size) {
756 page_size = len - i;
757 if (page_size > nor->page_size)
758 page_size = nor->page_size;
759
760 wait_till_ready(nor);
761 write_enable(nor);
762
763 nor->write(nor, to + i, page_size, retlen, buf + i);
764 }
765 }
766
767write_err:
768 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
769 return 0;
770}
771
772static int macronix_quad_enable(struct spi_nor *nor)
773{
774 int ret, val;
775
776 val = read_sr(nor);
777 write_enable(nor);
778
779 nor->cmd_buf[0] = val | SR_QUAD_EN_MX;
780 nor->write_reg(nor, OPCODE_WRSR, nor->cmd_buf, 1, 0);
781
782 if (wait_till_ready(nor))
783 return 1;
784
785 ret = read_sr(nor);
786 if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
787 dev_err(nor->dev, "Macronix Quad bit not set\n");
788 return -EINVAL;
789 }
790
791 return 0;
792}
793
794/*
795 * Write status Register and configuration register with 2 bytes
796 * The first byte will be written to the status register, while the
797 * second byte will be written to the configuration register.
798 * Return negative if error occured.
799 */
800static int write_sr_cr(struct spi_nor *nor, u16 val)
801{
802 nor->cmd_buf[0] = val & 0xff;
803 nor->cmd_buf[1] = (val >> 8);
804
805 return nor->write_reg(nor, OPCODE_WRSR, nor->cmd_buf, 2, 0);
806}
807
808static int spansion_quad_enable(struct spi_nor *nor)
809{
810 int ret;
811 int quad_en = CR_QUAD_EN_SPAN << 8;
812
813 write_enable(nor);
814
815 ret = write_sr_cr(nor, quad_en);
816 if (ret < 0) {
817 dev_err(nor->dev,
818 "error while writing configuration register\n");
819 return -EINVAL;
820 }
821
822 /* read back and check it */
823 ret = read_cr(nor);
824 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
825 dev_err(nor->dev, "Spansion Quad bit not set\n");
826 return -EINVAL;
827 }
828
829 return 0;
830}
831
832static int set_quad_mode(struct spi_nor *nor, u32 jedec_id)
833{
834 int status;
835
836 switch (JEDEC_MFR(jedec_id)) {
837 case CFI_MFR_MACRONIX:
838 status = macronix_quad_enable(nor);
839 if (status) {
840 dev_err(nor->dev, "Macronix quad-read not enabled\n");
841 return -EINVAL;
842 }
843 return status;
844 default:
845 status = spansion_quad_enable(nor);
846 if (status) {
847 dev_err(nor->dev, "Spansion quad-read not enabled\n");
848 return -EINVAL;
849 }
850 return status;
851 }
852}
853
854static int spi_nor_check(struct spi_nor *nor)
855{
856 if (!nor->dev || !nor->read || !nor->write ||
857 !nor->read_reg || !nor->write_reg || !nor->erase) {
858 pr_err("spi-nor: please fill all the necessary fields!\n");
859 return -EINVAL;
860 }
861
862 if (!nor->read_id)
863 nor->read_id = spi_nor_read_id;
864 if (!nor->wait_till_ready)
865 nor->wait_till_ready = spi_nor_wait_till_ready;
866
867 return 0;
868}
869
870int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id,
871 enum read_mode mode)
872{
873 struct flash_info *info;
874 struct flash_platform_data *data;
875 struct device *dev = nor->dev;
876 struct mtd_info *mtd = nor->mtd;
877 struct device_node *np = dev->of_node;
878 int ret;
879 int i;
880
881 ret = spi_nor_check(nor);
882 if (ret)
883 return ret;
884
885 /* Platform data helps sort out which chip type we have, as
886 * well as how this board partitions it. If we don't have
887 * a chip ID, try the JEDEC id commands; they'll work for most
888 * newer chips, even if we don't recognize the particular chip.
889 */
890 data = dev_get_platdata(dev);
891 if (data && data->type) {
892 const struct spi_device_id *plat_id;
893
894 for (i = 0; i < ARRAY_SIZE(spi_nor_ids) - 1; i++) {
895 plat_id = &spi_nor_ids[i];
896 if (strcmp(data->type, plat_id->name))
897 continue;
898 break;
899 }
900
901 if (i < ARRAY_SIZE(spi_nor_ids) - 1)
902 id = plat_id;
903 else
904 dev_warn(dev, "unrecognized id %s\n", data->type);
905 }
906
907 info = (void *)id->driver_data;
908
909 if (info->jedec_id) {
910 const struct spi_device_id *jid;
911
912 jid = jedec_probe(nor);
913 if (IS_ERR(jid)) {
914 return PTR_ERR(jid);
915 } else if (jid != id) {
916 /*
917 * JEDEC knows better, so overwrite platform ID. We
918 * can't trust partitions any longer, but we'll let
919 * mtd apply them anyway, since some partitions may be
920 * marked read-only, and we don't want to lose that
921 * information, even if it's not 100% accurate.
922 */
923 dev_warn(dev, "found %s, expected %s\n",
924 jid->name, id->name);
925 id = jid;
926 info = (void *)jid->driver_data;
927 }
928 }
929
930 mutex_init(&nor->lock);
931
932 /*
933 * Atmel, SST and Intel/Numonyx serial nor tend to power
934 * up with the software protection bits set
935 */
936
937 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL ||
938 JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL ||
939 JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) {
940 write_enable(nor);
941 write_sr(nor, 0);
942 }
943
944 if (data && data->name)
945 mtd->name = data->name;
946 else
947 mtd->name = dev_name(dev);
948
949 mtd->type = MTD_NORFLASH;
950 mtd->writesize = 1;
951 mtd->flags = MTD_CAP_NORFLASH;
952 mtd->size = info->sector_size * info->n_sectors;
953 mtd->_erase = spi_nor_erase;
954 mtd->_read = spi_nor_read;
955
956 /* nor protection support for STmicro chips */
957 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) {
958 mtd->_lock = spi_nor_lock;
959 mtd->_unlock = spi_nor_unlock;
960 }
961
962 /* sst nor chips use AAI word program */
963 if (info->flags & SST_WRITE)
964 mtd->_write = sst_write;
965 else
966 mtd->_write = spi_nor_write;
967
968 /* prefer "small sector" erase if possible */
969 if (info->flags & SECT_4K) {
970 nor->erase_opcode = OPCODE_BE_4K;
971 mtd->erasesize = 4096;
972 } else if (info->flags & SECT_4K_PMC) {
973 nor->erase_opcode = OPCODE_BE_4K_PMC;
974 mtd->erasesize = 4096;
975 } else {
976 nor->erase_opcode = OPCODE_SE;
977 mtd->erasesize = info->sector_size;
978 }
979
980 if (info->flags & SPI_NOR_NO_ERASE)
981 mtd->flags |= MTD_NO_ERASE;
982
983 mtd->dev.parent = dev;
984 nor->page_size = info->page_size;
985 mtd->writebufsize = nor->page_size;
986
987 if (np) {
988 /* If we were instantiated by DT, use it */
989 if (of_property_read_bool(np, "m25p,fast-read"))
990 nor->flash_read = SPI_NOR_FAST;
991 else
992 nor->flash_read = SPI_NOR_NORMAL;
993 } else {
994 /* If we weren't instantiated by DT, default to fast-read */
995 nor->flash_read = SPI_NOR_FAST;
996 }
997
998 /* Some devices cannot do fast-read, no matter what DT tells us */
999 if (info->flags & SPI_NOR_NO_FR)
1000 nor->flash_read = SPI_NOR_NORMAL;
1001
1002 /* Quad/Dual-read mode takes precedence over fast/normal */
1003 if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
1004 ret = set_quad_mode(nor, info->jedec_id);
1005 if (ret) {
1006 dev_err(dev, "quad mode not supported\n");
1007 return ret;
1008 }
1009 nor->flash_read = SPI_NOR_QUAD;
1010 } else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) {
1011 nor->flash_read = SPI_NOR_DUAL;
1012 }
1013
1014 /* Default commands */
1015 switch (nor->flash_read) {
1016 case SPI_NOR_QUAD:
1017 nor->read_opcode = OPCODE_QUAD_READ;
1018 break;
1019 case SPI_NOR_DUAL:
1020 nor->read_opcode = OPCODE_DUAL_READ;
1021 break;
1022 case SPI_NOR_FAST:
1023 nor->read_opcode = OPCODE_FAST_READ;
1024 break;
1025 case SPI_NOR_NORMAL:
1026 nor->read_opcode = OPCODE_NORM_READ;
1027 break;
1028 default:
1029 dev_err(dev, "No Read opcode defined\n");
1030 return -EINVAL;
1031 }
1032
1033 nor->program_opcode = OPCODE_PP;
1034
1035 if (info->addr_width)
1036 nor->addr_width = info->addr_width;
1037 else if (mtd->size > 0x1000000) {
1038 /* enable 4-byte addressing if the device exceeds 16MiB */
1039 nor->addr_width = 4;
1040 if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) {
1041 /* Dedicated 4-byte command set */
1042 switch (nor->flash_read) {
1043 case SPI_NOR_QUAD:
1044 nor->read_opcode = OPCODE_QUAD_READ_4B;
1045 break;
1046 case SPI_NOR_DUAL:
1047 nor->read_opcode = OPCODE_DUAL_READ_4B;
1048 break;
1049 case SPI_NOR_FAST:
1050 nor->read_opcode = OPCODE_FAST_READ_4B;
1051 break;
1052 case SPI_NOR_NORMAL:
1053 nor->read_opcode = OPCODE_NORM_READ_4B;
1054 break;
1055 }
1056 nor->program_opcode = OPCODE_PP_4B;
1057 /* No small sector erase for 4-byte command set */
1058 nor->erase_opcode = OPCODE_SE_4B;
1059 mtd->erasesize = info->sector_size;
1060 } else
1061 set_4byte(nor, info->jedec_id, 1);
1062 } else {
1063 nor->addr_width = 3;
1064 }
1065
1066 nor->read_dummy = spi_nor_read_dummy_cycles(nor);
1067
1068 dev_info(dev, "%s (%lld Kbytes)\n", id->name,
1069 (long long)mtd->size >> 10);
1070
1071 dev_dbg(dev,
1072 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
1073 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
1074 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
1075 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
1076
1077 if (mtd->numeraseregions)
1078 for (i = 0; i < mtd->numeraseregions; i++)
1079 dev_dbg(dev,
1080 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
1081 ".erasesize = 0x%.8x (%uKiB), "
1082 ".numblocks = %d }\n",
1083 i, (long long)mtd->eraseregions[i].offset,
1084 mtd->eraseregions[i].erasesize,
1085 mtd->eraseregions[i].erasesize / 1024,
1086 mtd->eraseregions[i].numblocks);
1087 return 0;
1088}
Brian Norrisb61834b2014-04-08 18:22:57 -07001089EXPORT_SYMBOL_GPL(spi_nor_scan);
Huang Shijieb1994892014-02-24 18:37:37 +08001090
Huang Shijie0d8c11c2014-02-24 18:37:40 +08001091const struct spi_device_id *spi_nor_match_id(char *name)
1092{
1093 const struct spi_device_id *id = spi_nor_ids;
1094
1095 while (id->name[0]) {
1096 if (!strcmp(name, id->name))
1097 return id;
1098 id++;
1099 }
1100 return NULL;
1101}
Brian Norrisb61834b2014-04-08 18:22:57 -07001102EXPORT_SYMBOL_GPL(spi_nor_match_id);
Huang Shijie0d8c11c2014-02-24 18:37:40 +08001103
Huang Shijieb1994892014-02-24 18:37:37 +08001104MODULE_LICENSE("GPL");
1105MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
1106MODULE_AUTHOR("Mike Lavender");
1107MODULE_DESCRIPTION("framework for SPI NOR");