Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1 | /* |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 2 | * MTD SPI driver for ST M25Pxx (and similar) serial flash chips |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 3 | * |
| 4 | * Author: Mike Lavender, mike@steroidmicros.com |
| 5 | * |
| 6 | * Copyright (c) 2005, Intec Automation Inc. |
| 7 | * |
| 8 | * Some parts are based on lart.c by Abraham Van Der Merwe |
| 9 | * |
| 10 | * Cleaned up and generalized based on mtd_dataflash.c |
| 11 | * |
| 12 | * This code is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of the GNU General Public License version 2 as |
| 14 | * published by the Free Software Foundation. |
| 15 | * |
| 16 | */ |
| 17 | |
| 18 | #include <linux/init.h> |
Anton Vorontsov | 9d2c4f3 | 2010-06-22 20:57:42 +0400 | [diff] [blame] | 19 | #include <linux/err.h> |
| 20 | #include <linux/errno.h> |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 21 | #include <linux/module.h> |
| 22 | #include <linux/device.h> |
| 23 | #include <linux/interrupt.h> |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 24 | #include <linux/mutex.h> |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 25 | #include <linux/math64.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 26 | #include <linux/slab.h> |
Alexey Dobriyan | d43c36d | 2009-10-07 17:09:06 +0400 | [diff] [blame] | 27 | #include <linux/sched.h> |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 28 | #include <linux/mod_devicetable.h> |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 29 | |
Kevin Cernekee | aa08465 | 2011-05-08 10:48:00 -0700 | [diff] [blame] | 30 | #include <linux/mtd/cfi.h> |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 31 | #include <linux/mtd/mtd.h> |
| 32 | #include <linux/mtd/partitions.h> |
Shaohui Xie | 5f94913 | 2011-10-14 15:49:00 +0800 | [diff] [blame] | 33 | #include <linux/of_platform.h> |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 34 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 35 | #include <linux/spi/spi.h> |
| 36 | #include <linux/spi/flash.h> |
| 37 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 38 | /* Flash opcodes. */ |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 39 | #define OPCODE_WREN 0x06 /* Write enable */ |
| 40 | #define OPCODE_RDSR 0x05 /* Read status register */ |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 41 | #define OPCODE_WRSR 0x01 /* Write status register 1 byte */ |
Bryan Wu | 2230b76 | 2008-04-25 12:07:32 +0800 | [diff] [blame] | 42 | #define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */ |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 43 | #define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */ |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 44 | #define OPCODE_QUAD_READ 0x6b /* Read data bytes */ |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 45 | #define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */ |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 46 | #define OPCODE_BE_4K 0x20 /* Erase 4KiB block */ |
Michel Stempin | 6c3b889 | 2013-07-15 12:13:56 +0200 | [diff] [blame] | 47 | #define OPCODE_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */ |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 48 | #define OPCODE_BE_32K 0x52 /* Erase 32KiB block */ |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 49 | #define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */ |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 50 | #define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 51 | #define OPCODE_RDID 0x9f /* Read JEDEC ID */ |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 52 | #define OPCODE_RDCR 0x35 /* Read configuration register */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 53 | |
Brian Norris | 87c9511 | 2013-04-11 01:34:57 -0700 | [diff] [blame] | 54 | /* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ |
| 55 | #define OPCODE_NORM_READ_4B 0x13 /* Read data bytes (low frequency) */ |
| 56 | #define OPCODE_FAST_READ_4B 0x0c /* Read data bytes (high frequency) */ |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 57 | #define OPCODE_QUAD_READ_4B 0x6c /* Read data bytes */ |
Brian Norris | 87c9511 | 2013-04-11 01:34:57 -0700 | [diff] [blame] | 58 | #define OPCODE_PP_4B 0x12 /* Page program (up to 256 bytes) */ |
| 59 | #define OPCODE_SE_4B 0xdc /* Sector erase (usually 64KiB) */ |
| 60 | |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 61 | /* Used for SST flashes only. */ |
| 62 | #define OPCODE_BP 0x02 /* Byte program */ |
| 63 | #define OPCODE_WRDI 0x04 /* Write disable */ |
| 64 | #define OPCODE_AAI_WP 0xad /* Auto address increment word program */ |
| 65 | |
Brian Norris | caddab0 | 2013-04-11 01:34:58 -0700 | [diff] [blame] | 66 | /* Used for Macronix and Winbond flashes. */ |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 67 | #define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */ |
| 68 | #define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */ |
| 69 | |
Kevin Cernekee | baa9ae3 | 2011-05-08 10:48:01 -0700 | [diff] [blame] | 70 | /* Used for Spansion flashes only. */ |
| 71 | #define OPCODE_BRWR 0x17 /* Bank register write */ |
| 72 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 73 | /* Status Register bits. */ |
| 74 | #define SR_WIP 1 /* Write in progress */ |
| 75 | #define SR_WEL 2 /* Write enable latch */ |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 76 | /* meaning of other SR_* bits may differ between vendors */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 77 | #define SR_BP0 4 /* Block protect 0 */ |
| 78 | #define SR_BP1 8 /* Block protect 1 */ |
| 79 | #define SR_BP2 0x10 /* Block protect 2 */ |
| 80 | #define SR_SRWD 0x80 /* SR write protect */ |
| 81 | |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 82 | #define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */ |
| 83 | |
| 84 | /* Configuration Register bits. */ |
| 85 | #define CR_QUAD_EN_SPAN 0x2 /* Spansion Quad I/O */ |
| 86 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 87 | /* Define max times to check status register before we give up. */ |
Steven A. Falco | 89bb871 | 2009-06-26 12:42:47 -0400 | [diff] [blame] | 88 | #define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */ |
Brian Norris | 778d226 | 2013-07-24 18:32:07 -0700 | [diff] [blame] | 89 | #define MAX_CMD_SIZE 6 |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 90 | |
Kevin Cernekee | aa08465 | 2011-05-08 10:48:00 -0700 | [diff] [blame] | 91 | #define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16) |
| 92 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 93 | /****************************************************************************/ |
| 94 | |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 95 | enum read_type { |
| 96 | M25P80_NORMAL = 0, |
| 97 | M25P80_FAST, |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 98 | M25P80_QUAD, |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 99 | }; |
| 100 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 101 | struct m25p { |
| 102 | struct spi_device *spi; |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 103 | struct mutex lock; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 104 | struct mtd_info mtd; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 105 | u16 page_size; |
| 106 | u16 addr_width; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 107 | u8 erase_opcode; |
Brian Norris | 87c9511 | 2013-04-11 01:34:57 -0700 | [diff] [blame] | 108 | u8 read_opcode; |
| 109 | u8 program_opcode; |
Johannes Stezenbach | 61c3506 | 2009-10-28 14:21:37 +0100 | [diff] [blame] | 110 | u8 *command; |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 111 | enum read_type flash_read; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 112 | }; |
| 113 | |
| 114 | static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd) |
| 115 | { |
| 116 | return container_of(mtd, struct m25p, mtd); |
| 117 | } |
| 118 | |
| 119 | /****************************************************************************/ |
| 120 | |
| 121 | /* |
| 122 | * Internal helper functions |
| 123 | */ |
| 124 | |
| 125 | /* |
| 126 | * Read the status register, returning its value in the location |
| 127 | * Return the status register value. |
| 128 | * Returns negative if error occurred. |
| 129 | */ |
| 130 | static int read_sr(struct m25p *flash) |
| 131 | { |
| 132 | ssize_t retval; |
| 133 | u8 code = OPCODE_RDSR; |
| 134 | u8 val; |
| 135 | |
| 136 | retval = spi_write_then_read(flash->spi, &code, 1, &val, 1); |
| 137 | |
| 138 | if (retval < 0) { |
| 139 | dev_err(&flash->spi->dev, "error %d reading SR\n", |
| 140 | (int) retval); |
| 141 | return retval; |
| 142 | } |
| 143 | |
| 144 | return val; |
| 145 | } |
| 146 | |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 147 | /* |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 148 | * Read configuration register, returning its value in the |
| 149 | * location. Return the configuration register value. |
| 150 | * Returns negative if error occured. |
| 151 | */ |
| 152 | static int read_cr(struct m25p *flash) |
| 153 | { |
| 154 | u8 code = OPCODE_RDCR; |
| 155 | int ret; |
| 156 | u8 val; |
| 157 | |
| 158 | ret = spi_write_then_read(flash->spi, &code, 1, &val, 1); |
| 159 | if (ret < 0) { |
| 160 | dev_err(&flash->spi->dev, "error %d reading CR\n", ret); |
| 161 | return ret; |
| 162 | } |
| 163 | |
| 164 | return val; |
| 165 | } |
| 166 | |
| 167 | /* |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 168 | * Write status register 1 byte |
| 169 | * Returns negative if error occurred. |
| 170 | */ |
| 171 | static int write_sr(struct m25p *flash, u8 val) |
| 172 | { |
| 173 | flash->command[0] = OPCODE_WRSR; |
| 174 | flash->command[1] = val; |
| 175 | |
| 176 | return spi_write(flash->spi, flash->command, 2); |
| 177 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 178 | |
| 179 | /* |
| 180 | * Set write enable latch with Write Enable command. |
| 181 | * Returns negative if error occurred. |
| 182 | */ |
| 183 | static inline int write_enable(struct m25p *flash) |
| 184 | { |
| 185 | u8 code = OPCODE_WREN; |
| 186 | |
David Woodhouse | 8a1a627 | 2008-10-20 09:26:16 +0100 | [diff] [blame] | 187 | return spi_write_then_read(flash->spi, &code, 1, NULL, 0); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 188 | } |
| 189 | |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 190 | /* |
| 191 | * Send write disble instruction to the chip. |
| 192 | */ |
| 193 | static inline int write_disable(struct m25p *flash) |
| 194 | { |
| 195 | u8 code = OPCODE_WRDI; |
| 196 | |
| 197 | return spi_write_then_read(flash->spi, &code, 1, NULL, 0); |
| 198 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 199 | |
| 200 | /* |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 201 | * Enable/disable 4-byte addressing mode. |
| 202 | */ |
Kevin Cernekee | baa9ae3 | 2011-05-08 10:48:01 -0700 | [diff] [blame] | 203 | static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable) |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 204 | { |
Elie De Brauwer | 2b468ef | 2013-09-17 19:48:22 +0200 | [diff] [blame] | 205 | int status; |
| 206 | bool need_wren = false; |
| 207 | |
Kevin Cernekee | baa9ae3 | 2011-05-08 10:48:01 -0700 | [diff] [blame] | 208 | switch (JEDEC_MFR(jedec_id)) { |
Brian Norris | eedeac3 | 2013-08-17 12:16:29 -0700 | [diff] [blame] | 209 | case CFI_MFR_ST: /* Micron, actually */ |
Elie De Brauwer | 2b468ef | 2013-09-17 19:48:22 +0200 | [diff] [blame] | 210 | /* Some Micron need WREN command; all will accept it */ |
| 211 | need_wren = true; |
| 212 | case CFI_MFR_MACRONIX: |
Matthieu CASTET | 0aa87b7 | 2012-09-25 11:05:27 +0200 | [diff] [blame] | 213 | case 0xEF /* winbond */: |
Elie De Brauwer | 2b468ef | 2013-09-17 19:48:22 +0200 | [diff] [blame] | 214 | if (need_wren) |
| 215 | write_enable(flash); |
| 216 | |
Kevin Cernekee | baa9ae3 | 2011-05-08 10:48:01 -0700 | [diff] [blame] | 217 | flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B; |
Elie De Brauwer | 2b468ef | 2013-09-17 19:48:22 +0200 | [diff] [blame] | 218 | status = spi_write(flash->spi, flash->command, 1); |
| 219 | |
| 220 | if (need_wren) |
| 221 | write_disable(flash); |
| 222 | |
| 223 | return status; |
Kevin Cernekee | baa9ae3 | 2011-05-08 10:48:01 -0700 | [diff] [blame] | 224 | default: |
| 225 | /* Spansion style */ |
| 226 | flash->command[0] = OPCODE_BRWR; |
| 227 | flash->command[1] = enable << 7; |
| 228 | return spi_write(flash->spi, flash->command, 2); |
| 229 | } |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 230 | } |
| 231 | |
| 232 | /* |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 233 | * Service routine to read status register until ready, or timeout occurs. |
| 234 | * Returns non-zero if error. |
| 235 | */ |
| 236 | static int wait_till_ready(struct m25p *flash) |
| 237 | { |
Peter Horton | cd1a6de | 2009-05-08 13:51:53 +0100 | [diff] [blame] | 238 | unsigned long deadline; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 239 | int sr; |
| 240 | |
Peter Horton | cd1a6de | 2009-05-08 13:51:53 +0100 | [diff] [blame] | 241 | deadline = jiffies + MAX_READY_WAIT_JIFFIES; |
| 242 | |
| 243 | do { |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 244 | if ((sr = read_sr(flash)) < 0) |
| 245 | break; |
| 246 | else if (!(sr & SR_WIP)) |
| 247 | return 0; |
| 248 | |
Peter Horton | cd1a6de | 2009-05-08 13:51:53 +0100 | [diff] [blame] | 249 | cond_resched(); |
| 250 | |
| 251 | } while (!time_after_eq(jiffies, deadline)); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 252 | |
| 253 | return 1; |
| 254 | } |
| 255 | |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 256 | /* |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 257 | * Write status Register and configuration register with 2 bytes |
| 258 | * The first byte will be written to the status register, while the |
| 259 | * second byte will be written to the configuration register. |
| 260 | * Return negative if error occured. |
| 261 | */ |
| 262 | static int write_sr_cr(struct m25p *flash, u16 val) |
| 263 | { |
| 264 | flash->command[0] = OPCODE_WRSR; |
| 265 | flash->command[1] = val & 0xff; |
| 266 | flash->command[2] = (val >> 8); |
| 267 | |
| 268 | return spi_write(flash->spi, flash->command, 3); |
| 269 | } |
| 270 | |
| 271 | static int macronix_quad_enable(struct m25p *flash) |
| 272 | { |
| 273 | int ret, val; |
| 274 | u8 cmd[2]; |
| 275 | cmd[0] = OPCODE_WRSR; |
| 276 | |
| 277 | val = read_sr(flash); |
| 278 | cmd[1] = val | SR_QUAD_EN_MX; |
| 279 | write_enable(flash); |
| 280 | |
| 281 | spi_write(flash->spi, &cmd, 2); |
| 282 | |
| 283 | if (wait_till_ready(flash)) |
| 284 | return 1; |
| 285 | |
| 286 | ret = read_sr(flash); |
| 287 | if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) { |
| 288 | dev_err(&flash->spi->dev, "Macronix Quad bit not set\n"); |
| 289 | return -EINVAL; |
| 290 | } |
| 291 | |
| 292 | return 0; |
| 293 | } |
| 294 | |
| 295 | static int spansion_quad_enable(struct m25p *flash) |
| 296 | { |
| 297 | int ret; |
| 298 | int quad_en = CR_QUAD_EN_SPAN << 8; |
| 299 | |
| 300 | write_enable(flash); |
| 301 | |
| 302 | ret = write_sr_cr(flash, quad_en); |
| 303 | if (ret < 0) { |
| 304 | dev_err(&flash->spi->dev, |
| 305 | "error while writing configuration register\n"); |
| 306 | return -EINVAL; |
| 307 | } |
| 308 | |
| 309 | /* read back and check it */ |
| 310 | ret = read_cr(flash); |
| 311 | if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) { |
| 312 | dev_err(&flash->spi->dev, "Spansion Quad bit not set\n"); |
| 313 | return -EINVAL; |
| 314 | } |
| 315 | |
| 316 | return 0; |
| 317 | } |
| 318 | |
| 319 | static int set_quad_mode(struct m25p *flash, u32 jedec_id) |
| 320 | { |
| 321 | int status; |
| 322 | |
| 323 | switch (JEDEC_MFR(jedec_id)) { |
| 324 | case CFI_MFR_MACRONIX: |
| 325 | status = macronix_quad_enable(flash); |
| 326 | if (status) { |
| 327 | dev_err(&flash->spi->dev, |
| 328 | "Macronix quad-read not enabled\n"); |
| 329 | return -EINVAL; |
| 330 | } |
| 331 | return status; |
| 332 | default: |
| 333 | status = spansion_quad_enable(flash); |
| 334 | if (status) { |
| 335 | dev_err(&flash->spi->dev, |
| 336 | "Spansion quad-read not enabled\n"); |
| 337 | return -EINVAL; |
| 338 | } |
| 339 | return status; |
| 340 | } |
| 341 | } |
| 342 | |
| 343 | /* |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 344 | * Erase the whole flash memory |
| 345 | * |
| 346 | * Returns 0 if successful, non-zero otherwise. |
| 347 | */ |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 348 | static int erase_chip(struct m25p *flash) |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 349 | { |
Brian Norris | 0a32a10 | 2011-07-19 10:06:10 -0700 | [diff] [blame] | 350 | pr_debug("%s: %s %lldKiB\n", dev_name(&flash->spi->dev), __func__, |
| 351 | (long long)(flash->mtd.size >> 10)); |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 352 | |
| 353 | /* Wait until finished previous write command. */ |
| 354 | if (wait_till_ready(flash)) |
| 355 | return 1; |
| 356 | |
| 357 | /* Send write enable, then erase commands. */ |
| 358 | write_enable(flash); |
| 359 | |
| 360 | /* Set up command buffer. */ |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 361 | flash->command[0] = OPCODE_CHIP_ERASE; |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 362 | |
| 363 | spi_write(flash->spi, flash->command, 1); |
| 364 | |
| 365 | return 0; |
| 366 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 367 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 368 | static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd) |
| 369 | { |
| 370 | /* opcode is in cmd[0] */ |
| 371 | cmd[1] = addr >> (flash->addr_width * 8 - 8); |
| 372 | cmd[2] = addr >> (flash->addr_width * 8 - 16); |
| 373 | cmd[3] = addr >> (flash->addr_width * 8 - 24); |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 374 | cmd[4] = addr >> (flash->addr_width * 8 - 32); |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 375 | } |
| 376 | |
| 377 | static int m25p_cmdsz(struct m25p *flash) |
| 378 | { |
| 379 | return 1 + flash->addr_width; |
| 380 | } |
| 381 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 382 | /* |
| 383 | * Erase one sector of flash memory at offset ``offset'' which is any |
| 384 | * address within the sector which should be erased. |
| 385 | * |
| 386 | * Returns 0 if successful, non-zero otherwise. |
| 387 | */ |
| 388 | static int erase_sector(struct m25p *flash, u32 offset) |
| 389 | { |
Brian Norris | 0a32a10 | 2011-07-19 10:06:10 -0700 | [diff] [blame] | 390 | pr_debug("%s: %s %dKiB at 0x%08x\n", dev_name(&flash->spi->dev), |
| 391 | __func__, flash->mtd.erasesize / 1024, offset); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 392 | |
| 393 | /* Wait until finished previous write command. */ |
| 394 | if (wait_till_ready(flash)) |
| 395 | return 1; |
| 396 | |
| 397 | /* Send write enable, then erase commands. */ |
| 398 | write_enable(flash); |
| 399 | |
| 400 | /* Set up command buffer. */ |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 401 | flash->command[0] = flash->erase_opcode; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 402 | m25p_addr2cmd(flash, offset, flash->command); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 403 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 404 | spi_write(flash->spi, flash->command, m25p_cmdsz(flash)); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 405 | |
| 406 | return 0; |
| 407 | } |
| 408 | |
| 409 | /****************************************************************************/ |
| 410 | |
| 411 | /* |
| 412 | * MTD implementation |
| 413 | */ |
| 414 | |
| 415 | /* |
| 416 | * Erase an address range on the flash chip. The address range may extend |
| 417 | * one or more erase sectors. Return an error is there is a problem erasing. |
| 418 | */ |
| 419 | static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr) |
| 420 | { |
| 421 | struct m25p *flash = mtd_to_m25p(mtd); |
| 422 | u32 addr,len; |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 423 | uint32_t rem; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 424 | |
Brian Norris | 0a32a10 | 2011-07-19 10:06:10 -0700 | [diff] [blame] | 425 | pr_debug("%s: %s at 0x%llx, len %lld\n", dev_name(&flash->spi->dev), |
| 426 | __func__, (long long)instr->addr, |
| 427 | (long long)instr->len); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 428 | |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 429 | div_u64_rem(instr->len, mtd->erasesize, &rem); |
| 430 | if (rem) |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 431 | return -EINVAL; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 432 | |
| 433 | addr = instr->addr; |
| 434 | len = instr->len; |
| 435 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 436 | mutex_lock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 437 | |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 438 | /* whole-chip erase? */ |
Steven A. Falco | 3f33b0a | 2009-04-27 17:10:10 -0400 | [diff] [blame] | 439 | if (len == flash->mtd.size) { |
| 440 | if (erase_chip(flash)) { |
| 441 | instr->state = MTD_ERASE_FAILED; |
| 442 | mutex_unlock(&flash->lock); |
| 443 | return -EIO; |
| 444 | } |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 445 | |
| 446 | /* REVISIT in some cases we could speed up erasing large regions |
| 447 | * by using OPCODE_SE instead of OPCODE_BE_4K. We may have set up |
| 448 | * to use "small sector erase", but that's not always optimal. |
| 449 | */ |
| 450 | |
| 451 | /* "sector"-at-a-time erase */ |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 452 | } else { |
| 453 | while (len) { |
| 454 | if (erase_sector(flash, addr)) { |
| 455 | instr->state = MTD_ERASE_FAILED; |
| 456 | mutex_unlock(&flash->lock); |
| 457 | return -EIO; |
| 458 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 459 | |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 460 | addr += mtd->erasesize; |
| 461 | len -= mtd->erasesize; |
| 462 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 463 | } |
| 464 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 465 | mutex_unlock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 466 | |
| 467 | instr->state = MTD_ERASE_DONE; |
| 468 | mtd_erase_callback(instr); |
| 469 | |
| 470 | return 0; |
| 471 | } |
| 472 | |
| 473 | /* |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 474 | * Dummy Cycle calculation for different type of read. |
| 475 | * It can be used to support more commands with |
| 476 | * different dummy cycle requirements. |
| 477 | */ |
| 478 | static inline int m25p80_dummy_cycles_read(struct m25p *flash) |
| 479 | { |
| 480 | switch (flash->flash_read) { |
| 481 | case M25P80_FAST: |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 482 | case M25P80_QUAD: |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 483 | return 1; |
| 484 | case M25P80_NORMAL: |
| 485 | return 0; |
| 486 | default: |
| 487 | dev_err(&flash->spi->dev, "No valid read type supported\n"); |
| 488 | return -1; |
| 489 | } |
| 490 | } |
| 491 | |
| 492 | /* |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 493 | * Read an address range from the flash chip. The address range |
| 494 | * may be any size provided it is within the physical boundaries. |
| 495 | */ |
| 496 | static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len, |
| 497 | size_t *retlen, u_char *buf) |
| 498 | { |
| 499 | struct m25p *flash = mtd_to_m25p(mtd); |
| 500 | struct spi_transfer t[2]; |
| 501 | struct spi_message m; |
Marek Vasut | 12ad2be | 2012-09-24 03:39:39 +0200 | [diff] [blame] | 502 | uint8_t opcode; |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 503 | int dummy; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 504 | |
Brian Norris | 0a32a10 | 2011-07-19 10:06:10 -0700 | [diff] [blame] | 505 | pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev), |
| 506 | __func__, (u32)from, len); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 507 | |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 508 | spi_message_init(&m); |
| 509 | memset(t, 0, (sizeof t)); |
| 510 | |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 511 | dummy = m25p80_dummy_cycles_read(flash); |
| 512 | if (dummy < 0) { |
| 513 | dev_err(&flash->spi->dev, "No valid read command supported\n"); |
| 514 | return -EINVAL; |
| 515 | } |
| 516 | |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 517 | t[0].tx_buf = flash->command; |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 518 | t[0].len = m25p_cmdsz(flash) + dummy; |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 519 | spi_message_add_tail(&t[0], &m); |
| 520 | |
| 521 | t[1].rx_buf = buf; |
| 522 | t[1].len = len; |
| 523 | spi_message_add_tail(&t[1], &m); |
| 524 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 525 | mutex_lock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 526 | |
| 527 | /* Wait till previous write/erase is done. */ |
| 528 | if (wait_till_ready(flash)) { |
| 529 | /* REVISIT status return?? */ |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 530 | mutex_unlock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 531 | return 1; |
| 532 | } |
| 533 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 534 | /* Set up the write data buffer. */ |
Brian Norris | 87c9511 | 2013-04-11 01:34:57 -0700 | [diff] [blame] | 535 | opcode = flash->read_opcode; |
Marek Vasut | 12ad2be | 2012-09-24 03:39:39 +0200 | [diff] [blame] | 536 | flash->command[0] = opcode; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 537 | m25p_addr2cmd(flash, from, flash->command); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 538 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 539 | spi_sync(flash->spi, &m); |
| 540 | |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 541 | *retlen = m.actual_length - m25p_cmdsz(flash) - dummy; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 542 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 543 | mutex_unlock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 544 | |
| 545 | return 0; |
| 546 | } |
| 547 | |
| 548 | /* |
| 549 | * Write an address range to the flash chip. Data must be written in |
| 550 | * FLASH_PAGESIZE chunks. The address range may be any size provided |
| 551 | * it is within the physical boundaries. |
| 552 | */ |
| 553 | static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len, |
| 554 | size_t *retlen, const u_char *buf) |
| 555 | { |
| 556 | struct m25p *flash = mtd_to_m25p(mtd); |
| 557 | u32 page_offset, page_size; |
| 558 | struct spi_transfer t[2]; |
| 559 | struct spi_message m; |
| 560 | |
Brian Norris | 0a32a10 | 2011-07-19 10:06:10 -0700 | [diff] [blame] | 561 | pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), |
| 562 | __func__, (u32)to, len); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 563 | |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 564 | spi_message_init(&m); |
| 565 | memset(t, 0, (sizeof t)); |
| 566 | |
| 567 | t[0].tx_buf = flash->command; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 568 | t[0].len = m25p_cmdsz(flash); |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 569 | spi_message_add_tail(&t[0], &m); |
| 570 | |
| 571 | t[1].tx_buf = buf; |
| 572 | spi_message_add_tail(&t[1], &m); |
| 573 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 574 | mutex_lock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 575 | |
| 576 | /* Wait until finished previous write command. */ |
Chen Gong | bc01886 | 2008-06-05 21:50:04 +0800 | [diff] [blame] | 577 | if (wait_till_ready(flash)) { |
| 578 | mutex_unlock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 579 | return 1; |
Chen Gong | bc01886 | 2008-06-05 21:50:04 +0800 | [diff] [blame] | 580 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 581 | |
| 582 | write_enable(flash); |
| 583 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 584 | /* Set up the opcode in the write buffer. */ |
Brian Norris | 87c9511 | 2013-04-11 01:34:57 -0700 | [diff] [blame] | 585 | flash->command[0] = flash->program_opcode; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 586 | m25p_addr2cmd(flash, to, flash->command); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 587 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 588 | page_offset = to & (flash->page_size - 1); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 589 | |
| 590 | /* do all the bytes fit onto one page? */ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 591 | if (page_offset + len <= flash->page_size) { |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 592 | t[1].len = len; |
| 593 | |
| 594 | spi_sync(flash->spi, &m); |
| 595 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 596 | *retlen = m.actual_length - m25p_cmdsz(flash); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 597 | } else { |
| 598 | u32 i; |
| 599 | |
| 600 | /* the size of data remaining on the first page */ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 601 | page_size = flash->page_size - page_offset; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 602 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 603 | t[1].len = page_size; |
| 604 | spi_sync(flash->spi, &m); |
| 605 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 606 | *retlen = m.actual_length - m25p_cmdsz(flash); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 607 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 608 | /* write everything in flash->page_size chunks */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 609 | for (i = page_size; i < len; i += page_size) { |
| 610 | page_size = len - i; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 611 | if (page_size > flash->page_size) |
| 612 | page_size = flash->page_size; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 613 | |
| 614 | /* write the next page to flash */ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 615 | m25p_addr2cmd(flash, to + i, flash->command); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 616 | |
| 617 | t[1].tx_buf = buf + i; |
| 618 | t[1].len = page_size; |
| 619 | |
| 620 | wait_till_ready(flash); |
| 621 | |
| 622 | write_enable(flash); |
| 623 | |
| 624 | spi_sync(flash->spi, &m); |
| 625 | |
Dan Carpenter | b06cd21 | 2010-08-12 09:53:52 +0200 | [diff] [blame] | 626 | *retlen += m.actual_length - m25p_cmdsz(flash); |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 627 | } |
| 628 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 629 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 630 | mutex_unlock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 631 | |
| 632 | return 0; |
| 633 | } |
| 634 | |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 635 | static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, |
| 636 | size_t *retlen, const u_char *buf) |
| 637 | { |
| 638 | struct m25p *flash = mtd_to_m25p(mtd); |
| 639 | struct spi_transfer t[2]; |
| 640 | struct spi_message m; |
| 641 | size_t actual; |
| 642 | int cmd_sz, ret; |
| 643 | |
Brian Norris | 0a32a10 | 2011-07-19 10:06:10 -0700 | [diff] [blame] | 644 | pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), |
| 645 | __func__, (u32)to, len); |
Nicolas Ferre | dcf1246 | 2010-12-15 12:59:32 +0100 | [diff] [blame] | 646 | |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 647 | spi_message_init(&m); |
| 648 | memset(t, 0, (sizeof t)); |
| 649 | |
| 650 | t[0].tx_buf = flash->command; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 651 | t[0].len = m25p_cmdsz(flash); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 652 | spi_message_add_tail(&t[0], &m); |
| 653 | |
| 654 | t[1].tx_buf = buf; |
| 655 | spi_message_add_tail(&t[1], &m); |
| 656 | |
| 657 | mutex_lock(&flash->lock); |
| 658 | |
| 659 | /* Wait until finished previous write command. */ |
| 660 | ret = wait_till_ready(flash); |
| 661 | if (ret) |
| 662 | goto time_out; |
| 663 | |
| 664 | write_enable(flash); |
| 665 | |
| 666 | actual = to % 2; |
| 667 | /* Start write from odd address. */ |
| 668 | if (actual) { |
| 669 | flash->command[0] = OPCODE_BP; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 670 | m25p_addr2cmd(flash, to, flash->command); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 671 | |
| 672 | /* write one byte. */ |
| 673 | t[1].len = 1; |
| 674 | spi_sync(flash->spi, &m); |
| 675 | ret = wait_till_ready(flash); |
| 676 | if (ret) |
| 677 | goto time_out; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 678 | *retlen += m.actual_length - m25p_cmdsz(flash); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 679 | } |
| 680 | to += actual; |
| 681 | |
| 682 | flash->command[0] = OPCODE_AAI_WP; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 683 | m25p_addr2cmd(flash, to, flash->command); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 684 | |
| 685 | /* Write out most of the data here. */ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 686 | cmd_sz = m25p_cmdsz(flash); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 687 | for (; actual < len - 1; actual += 2) { |
| 688 | t[0].len = cmd_sz; |
| 689 | /* write two bytes. */ |
| 690 | t[1].len = 2; |
| 691 | t[1].tx_buf = buf + actual; |
| 692 | |
| 693 | spi_sync(flash->spi, &m); |
| 694 | ret = wait_till_ready(flash); |
| 695 | if (ret) |
| 696 | goto time_out; |
| 697 | *retlen += m.actual_length - cmd_sz; |
| 698 | cmd_sz = 1; |
| 699 | to += 2; |
| 700 | } |
| 701 | write_disable(flash); |
| 702 | ret = wait_till_ready(flash); |
| 703 | if (ret) |
| 704 | goto time_out; |
| 705 | |
| 706 | /* Write out trailing byte if it exists. */ |
| 707 | if (actual != len) { |
| 708 | write_enable(flash); |
| 709 | flash->command[0] = OPCODE_BP; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 710 | m25p_addr2cmd(flash, to, flash->command); |
| 711 | t[0].len = m25p_cmdsz(flash); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 712 | t[1].len = 1; |
| 713 | t[1].tx_buf = buf + actual; |
| 714 | |
| 715 | spi_sync(flash->spi, &m); |
| 716 | ret = wait_till_ready(flash); |
| 717 | if (ret) |
| 718 | goto time_out; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 719 | *retlen += m.actual_length - m25p_cmdsz(flash); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 720 | write_disable(flash); |
| 721 | } |
| 722 | |
| 723 | time_out: |
| 724 | mutex_unlock(&flash->lock); |
| 725 | return ret; |
| 726 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 727 | |
Austin Boyle | 972e1b7 | 2013-01-04 13:02:28 +1300 | [diff] [blame] | 728 | static int m25p80_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
| 729 | { |
| 730 | struct m25p *flash = mtd_to_m25p(mtd); |
| 731 | uint32_t offset = ofs; |
| 732 | uint8_t status_old, status_new; |
| 733 | int res = 0; |
| 734 | |
| 735 | mutex_lock(&flash->lock); |
| 736 | /* Wait until finished previous command */ |
| 737 | if (wait_till_ready(flash)) { |
| 738 | res = 1; |
| 739 | goto err; |
| 740 | } |
| 741 | |
| 742 | status_old = read_sr(flash); |
| 743 | |
| 744 | if (offset < flash->mtd.size-(flash->mtd.size/2)) |
| 745 | status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0; |
| 746 | else if (offset < flash->mtd.size-(flash->mtd.size/4)) |
| 747 | status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1; |
| 748 | else if (offset < flash->mtd.size-(flash->mtd.size/8)) |
| 749 | status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0; |
| 750 | else if (offset < flash->mtd.size-(flash->mtd.size/16)) |
| 751 | status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2; |
| 752 | else if (offset < flash->mtd.size-(flash->mtd.size/32)) |
| 753 | status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0; |
| 754 | else if (offset < flash->mtd.size-(flash->mtd.size/64)) |
| 755 | status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1; |
| 756 | else |
| 757 | status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0; |
| 758 | |
| 759 | /* Only modify protection if it will not unlock other areas */ |
| 760 | if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) > |
| 761 | (status_old&(SR_BP2|SR_BP1|SR_BP0))) { |
| 762 | write_enable(flash); |
| 763 | if (write_sr(flash, status_new) < 0) { |
| 764 | res = 1; |
| 765 | goto err; |
| 766 | } |
| 767 | } |
| 768 | |
| 769 | err: mutex_unlock(&flash->lock); |
| 770 | return res; |
| 771 | } |
| 772 | |
| 773 | static int m25p80_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
| 774 | { |
| 775 | struct m25p *flash = mtd_to_m25p(mtd); |
| 776 | uint32_t offset = ofs; |
| 777 | uint8_t status_old, status_new; |
| 778 | int res = 0; |
| 779 | |
| 780 | mutex_lock(&flash->lock); |
| 781 | /* Wait until finished previous command */ |
| 782 | if (wait_till_ready(flash)) { |
| 783 | res = 1; |
| 784 | goto err; |
| 785 | } |
| 786 | |
| 787 | status_old = read_sr(flash); |
| 788 | |
| 789 | if (offset+len > flash->mtd.size-(flash->mtd.size/64)) |
| 790 | status_new = status_old & ~(SR_BP2|SR_BP1|SR_BP0); |
| 791 | else if (offset+len > flash->mtd.size-(flash->mtd.size/32)) |
| 792 | status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0; |
| 793 | else if (offset+len > flash->mtd.size-(flash->mtd.size/16)) |
| 794 | status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1; |
| 795 | else if (offset+len > flash->mtd.size-(flash->mtd.size/8)) |
| 796 | status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0; |
| 797 | else if (offset+len > flash->mtd.size-(flash->mtd.size/4)) |
| 798 | status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2; |
| 799 | else if (offset+len > flash->mtd.size-(flash->mtd.size/2)) |
| 800 | status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0; |
| 801 | else |
| 802 | status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1; |
| 803 | |
| 804 | /* Only modify protection if it will not lock other areas */ |
| 805 | if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) < |
| 806 | (status_old&(SR_BP2|SR_BP1|SR_BP0))) { |
| 807 | write_enable(flash); |
| 808 | if (write_sr(flash, status_new) < 0) { |
| 809 | res = 1; |
| 810 | goto err; |
| 811 | } |
| 812 | } |
| 813 | |
| 814 | err: mutex_unlock(&flash->lock); |
| 815 | return res; |
| 816 | } |
| 817 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 818 | /****************************************************************************/ |
| 819 | |
| 820 | /* |
| 821 | * SPI device driver setup and teardown |
| 822 | */ |
| 823 | |
| 824 | struct flash_info { |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 825 | /* JEDEC id zero means "no ID" (most older chips); otherwise it has |
| 826 | * a high byte of zero plus three data bytes: the manufacturer id, |
| 827 | * then a two byte device id. |
| 828 | */ |
| 829 | u32 jedec_id; |
Chen Gong | d0e8c47 | 2008-08-11 16:59:15 +0800 | [diff] [blame] | 830 | u16 ext_id; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 831 | |
| 832 | /* The size listed here is what works with OPCODE_SE, which isn't |
| 833 | * necessarily called a "sector" by the vendor. |
| 834 | */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 835 | unsigned sector_size; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 836 | u16 n_sectors; |
| 837 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 838 | u16 page_size; |
| 839 | u16 addr_width; |
| 840 | |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 841 | u16 flags; |
| 842 | #define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 843 | #define M25P_NO_ERASE 0x02 /* No erase command needed */ |
Krzysztof Mazur | e534ee4 | 2013-02-22 15:51:05 +0100 | [diff] [blame] | 844 | #define SST_WRITE 0x04 /* use SST byte programming */ |
Sascha Hauer | 5814699 | 2013-08-20 09:54:40 +0200 | [diff] [blame] | 845 | #define M25P_NO_FR 0x08 /* Can't do fastread */ |
Michel Stempin | 6c3b889 | 2013-07-15 12:13:56 +0200 | [diff] [blame] | 846 | #define SECT_4K_PMC 0x10 /* OPCODE_BE_4K_PMC works uniformly */ |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 847 | #define M25P80_QUAD_READ 0x20 /* Flash supports Quad Read */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 848 | }; |
| 849 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 850 | #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ |
| 851 | ((kernel_ulong_t)&(struct flash_info) { \ |
| 852 | .jedec_id = (_jedec_id), \ |
| 853 | .ext_id = (_ext_id), \ |
| 854 | .sector_size = (_sector_size), \ |
| 855 | .n_sectors = (_n_sectors), \ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 856 | .page_size = 256, \ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 857 | .flags = (_flags), \ |
| 858 | }) |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 859 | |
Sascha Hauer | 7e7d83b | 2013-08-20 09:54:39 +0200 | [diff] [blame] | 860 | #define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 861 | ((kernel_ulong_t)&(struct flash_info) { \ |
| 862 | .sector_size = (_sector_size), \ |
| 863 | .n_sectors = (_n_sectors), \ |
| 864 | .page_size = (_page_size), \ |
| 865 | .addr_width = (_addr_width), \ |
Sascha Hauer | 7e7d83b | 2013-08-20 09:54:39 +0200 | [diff] [blame] | 866 | .flags = (_flags), \ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 867 | }) |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 868 | |
| 869 | /* NOTE: double check command sets and memory organization when you add |
| 870 | * more flash chips. This current list focusses on newer chips, which |
| 871 | * have been converging on command sets which including JEDEC ID. |
| 872 | */ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 873 | static const struct spi_device_id m25p_ids[] = { |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 874 | /* Atmel -- some are (confusingly) marketed as "DataFlash" */ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 875 | { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) }, |
| 876 | { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 877 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 878 | { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) }, |
Mikhail Kshevetskiy | ada766e | 2011-09-23 19:36:18 +0400 | [diff] [blame] | 879 | { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 880 | { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 881 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 882 | { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) }, |
| 883 | { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) }, |
| 884 | { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, |
Aleksandr Koltsoff | 8fffed8 | 2011-01-04 10:42:35 +0200 | [diff] [blame] | 885 | { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 886 | |
Chunhe Lan | a5b2d76 | 2012-06-19 10:55:08 +0800 | [diff] [blame] | 887 | { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) }, |
| 888 | |
Gabor Juhos | 37a23c20 | 2011-01-25 11:20:26 +0100 | [diff] [blame] | 889 | /* EON -- en25xxx */ |
Brian Norris | 6e5d9bd | 2013-08-09 19:41:13 -0700 | [diff] [blame] | 890 | { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, |
| 891 | { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, |
| 892 | { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, |
| 893 | { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, |
| 894 | { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, |
| 895 | { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) }, |
Gabor Juhos | 60845e7 | 2010-08-04 21:14:25 +0200 | [diff] [blame] | 896 | |
Flavio Silveira | e6db7c8 | 2013-09-03 20:25:54 -0300 | [diff] [blame] | 897 | /* ESMT */ |
| 898 | { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) }, |
| 899 | |
Marek Vasut | 5ca11ca | 2012-05-01 04:04:00 +0200 | [diff] [blame] | 900 | /* Everspin */ |
Brian Norris | 6e5d9bd | 2013-08-09 19:41:13 -0700 | [diff] [blame] | 901 | { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, M25P_NO_ERASE | M25P_NO_FR) }, |
| 902 | { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, M25P_NO_ERASE | M25P_NO_FR) }, |
Marek Vasut | 5ca11ca | 2012-05-01 04:04:00 +0200 | [diff] [blame] | 903 | |
Michel Stempin | 55bf75b | 2013-01-06 00:39:36 +0100 | [diff] [blame] | 904 | /* GigaDevice */ |
| 905 | { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) }, |
| 906 | { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) }, |
| 907 | |
Gabor Juhos | f80e521 | 2010-08-05 16:58:36 +0200 | [diff] [blame] | 908 | /* Intel/Numonyx -- xxxs33b */ |
| 909 | { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, |
| 910 | { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, |
| 911 | { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, |
| 912 | |
Lennert Buytenhek | ab1ff21 | 2009-05-20 13:07:11 +0200 | [diff] [blame] | 913 | /* Macronix */ |
John Crispin | bb08bc1 | 2012-04-30 19:30:45 +0200 | [diff] [blame] | 914 | { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) }, |
Simon Guinot | df0094d | 2009-12-05 15:28:00 +0100 | [diff] [blame] | 915 | { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) }, |
Martin Michlmayr | 6175f4a | 2010-06-07 19:31:01 +0100 | [diff] [blame] | 916 | { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) }, |
Gabor Juhos | 9c76b4e | 2011-03-25 08:48:52 +0100 | [diff] [blame] | 917 | { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 918 | { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) }, |
Brian Norris | 5ff1482 | 2013-10-23 13:38:09 -0700 | [diff] [blame] | 919 | { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 920 | { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) }, |
| 921 | { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, |
| 922 | { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 923 | { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, |
Kevin Cernekee | ac622f5 | 2010-10-30 21:11:04 -0700 | [diff] [blame] | 924 | { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 925 | { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, M25P80_QUAD_READ) }, |
Lennert Buytenhek | ab1ff21 | 2009-05-20 13:07:11 +0200 | [diff] [blame] | 926 | |
Vivien Didelot | 8da2868 | 2012-08-14 15:24:07 -0400 | [diff] [blame] | 927 | /* Micron */ |
Brian Norris | 6e5d9bd | 2013-08-09 19:41:13 -0700 | [diff] [blame] | 928 | { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) }, |
| 929 | { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) }, |
| 930 | { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) }, |
| 931 | { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) }, |
| 932 | { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) }, |
Vivien Didelot | 8da2868 | 2012-08-14 15:24:07 -0400 | [diff] [blame] | 933 | |
Michel Stempin | 6c3b889 | 2013-07-15 12:13:56 +0200 | [diff] [blame] | 934 | /* PMC */ |
Brian Norris | 6e5d9bd | 2013-08-09 19:41:13 -0700 | [diff] [blame] | 935 | { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) }, |
| 936 | { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) }, |
| 937 | { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) }, |
Michel Stempin | 6c3b889 | 2013-07-15 12:13:56 +0200 | [diff] [blame] | 938 | |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 939 | /* Spansion -- single (large) sector size only, at least |
| 940 | * for the chips listed here (without boot sectors). |
| 941 | */ |
Marek Vasut | b277f77 | 2012-09-04 05:31:36 +0200 | [diff] [blame] | 942 | { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, 0) }, |
| 943 | { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) }, |
Kevin Cernekee | baa9ae3 | 2011-05-08 10:48:01 -0700 | [diff] [blame] | 944 | { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) }, |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 945 | { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, M25P80_QUAD_READ) }, |
Geert Uytterhoeven | d8d5d10 | 2014-01-21 13:59:16 +0100 | [diff] [blame^] | 946 | { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, M25P80_QUAD_READ) }, |
Kevin Cernekee | 3d2d2b6 | 2011-05-08 10:48:02 -0700 | [diff] [blame] | 947 | { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 948 | { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, |
| 949 | { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, |
| 950 | { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) }, |
| 951 | { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) }, |
Marek Vasut | 8bb8b85 | 2012-07-06 08:10:26 +0200 | [diff] [blame] | 952 | { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) }, |
| 953 | { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) }, |
| 954 | { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) }, |
| 955 | { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) }, |
| 956 | { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) }, |
Gernot Hoyler | f2df1ae | 2010-09-02 17:27:20 +0200 | [diff] [blame] | 957 | { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) }, |
| 958 | { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 959 | |
| 960 | /* SST -- large erase sizes are "overlays", "sectors" are 4K */ |
Krzysztof Mazur | e534ee4 | 2013-02-22 15:51:05 +0100 | [diff] [blame] | 961 | { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) }, |
| 962 | { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) }, |
| 963 | { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) }, |
| 964 | { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) }, |
Krzysztof Mazur | 8913405 | 2013-02-22 15:51:06 +0100 | [diff] [blame] | 965 | { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) }, |
Krzysztof Mazur | e534ee4 | 2013-02-22 15:51:05 +0100 | [diff] [blame] | 966 | { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) }, |
| 967 | { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) }, |
| 968 | { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) }, |
| 969 | { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 970 | |
| 971 | /* ST Microelectronics -- newer production may have feature updates */ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 972 | { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) }, |
| 973 | { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) }, |
| 974 | { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) }, |
| 975 | { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) }, |
| 976 | { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) }, |
| 977 | { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) }, |
| 978 | { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) }, |
| 979 | { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) }, |
| 980 | { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) }, |
Knut Wohlrab | 4800399 | 2012-07-17 15:45:53 +0200 | [diff] [blame] | 981 | { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 982 | |
Anton Vorontsov | f7b0009 | 2010-06-22 20:57:34 +0400 | [diff] [blame] | 983 | { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) }, |
| 984 | { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) }, |
| 985 | { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) }, |
| 986 | { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) }, |
| 987 | { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) }, |
| 988 | { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) }, |
| 989 | { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) }, |
| 990 | { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) }, |
| 991 | { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) }, |
| 992 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 993 | { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) }, |
| 994 | { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) }, |
| 995 | { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 996 | |
Alexandre Pereira da Silva | 943b35a | 2012-06-12 16:42:40 -0300 | [diff] [blame] | 997 | { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 998 | { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, |
| 999 | { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1000 | |
Igor Grinberg | 574926c | 2013-11-11 22:55:29 +0200 | [diff] [blame] | 1001 | { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) }, |
Kevin Cernekee | 16004f3 | 2011-05-08 10:47:59 -0700 | [diff] [blame] | 1002 | { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) }, |
| 1003 | { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) }, |
| 1004 | { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) }, |
| 1005 | { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) }, |
Yoshihiro Shimoda | d8f90b2 | 2011-02-09 17:00:33 +0900 | [diff] [blame] | 1006 | |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 1007 | /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 1008 | { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, |
| 1009 | { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) }, |
| 1010 | { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) }, |
| 1011 | { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, |
| 1012 | { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, |
| 1013 | { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, |
Gabor Juhos | 0af18d2 | 2010-08-04 21:14:27 +0200 | [diff] [blame] | 1014 | { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, |
ing. Federico Fuga | 9d6367f | 2012-06-05 17:37:01 +0200 | [diff] [blame] | 1015 | { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 1016 | { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, |
Thierry Reding | d2ac467 | 2010-08-30 13:00:48 +0200 | [diff] [blame] | 1017 | { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, |
Girish K S | 4b6ff7a | 2013-04-16 14:01:14 +0530 | [diff] [blame] | 1018 | { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, |
Thomas Abraham | 4fba37a | 2012-05-09 04:04:54 +0530 | [diff] [blame] | 1019 | { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, |
Stephen Warren | 9b7ef60 | 2012-11-12 12:58:28 -0700 | [diff] [blame] | 1020 | { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, |
Rafał Miłecki | 001c33a | 2013-02-24 13:57:26 +0100 | [diff] [blame] | 1021 | { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, |
Matthieu CASTET | 0aa87b7 | 2012-09-25 11:05:27 +0200 | [diff] [blame] | 1022 | { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) }, |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1023 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 1024 | /* Catalyst / On Semiconductor -- non-JEDEC */ |
Sascha Hauer | 5814699 | 2013-08-20 09:54:40 +0200 | [diff] [blame] | 1025 | { "cat25c11", CAT25_INFO( 16, 8, 16, 1, M25P_NO_ERASE | M25P_NO_FR) }, |
| 1026 | { "cat25c03", CAT25_INFO( 32, 8, 16, 2, M25P_NO_ERASE | M25P_NO_FR) }, |
| 1027 | { "cat25c09", CAT25_INFO( 128, 8, 32, 2, M25P_NO_ERASE | M25P_NO_FR) }, |
| 1028 | { "cat25c17", CAT25_INFO( 256, 8, 32, 2, M25P_NO_ERASE | M25P_NO_FR) }, |
| 1029 | { "cat25128", CAT25_INFO(2048, 8, 64, 2, M25P_NO_ERASE | M25P_NO_FR) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 1030 | { }, |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1031 | }; |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 1032 | MODULE_DEVICE_TABLE(spi, m25p_ids); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1033 | |
Bill Pemberton | 06f2551 | 2012-11-19 13:23:07 -0500 | [diff] [blame] | 1034 | static const struct spi_device_id *jedec_probe(struct spi_device *spi) |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1035 | { |
| 1036 | int tmp; |
| 1037 | u8 code = OPCODE_RDID; |
Chen Gong | daa8473 | 2008-09-16 14:14:12 +0800 | [diff] [blame] | 1038 | u8 id[5]; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1039 | u32 jedec; |
Chen Gong | d0e8c47 | 2008-08-11 16:59:15 +0800 | [diff] [blame] | 1040 | u16 ext_jedec; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1041 | struct flash_info *info; |
| 1042 | |
| 1043 | /* JEDEC also defines an optional "extended device information" |
| 1044 | * string for after vendor-specific data, after the three bytes |
| 1045 | * we use here. Supporting some chips might require using it. |
| 1046 | */ |
Chen Gong | daa8473 | 2008-09-16 14:14:12 +0800 | [diff] [blame] | 1047 | tmp = spi_write_then_read(spi, &code, 1, id, 5); |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1048 | if (tmp < 0) { |
Brian Norris | 289c052 | 2011-07-19 10:06:09 -0700 | [diff] [blame] | 1049 | pr_debug("%s: error %d reading JEDEC ID\n", |
Brian Norris | 0a32a10 | 2011-07-19 10:06:10 -0700 | [diff] [blame] | 1050 | dev_name(&spi->dev), tmp); |
Anton Vorontsov | 9d2c4f3 | 2010-06-22 20:57:42 +0400 | [diff] [blame] | 1051 | return ERR_PTR(tmp); |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1052 | } |
| 1053 | jedec = id[0]; |
| 1054 | jedec = jedec << 8; |
| 1055 | jedec |= id[1]; |
| 1056 | jedec = jedec << 8; |
| 1057 | jedec |= id[2]; |
| 1058 | |
Chen Gong | d0e8c47 | 2008-08-11 16:59:15 +0800 | [diff] [blame] | 1059 | ext_jedec = id[3] << 8 | id[4]; |
| 1060 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 1061 | for (tmp = 0; tmp < ARRAY_SIZE(m25p_ids) - 1; tmp++) { |
| 1062 | info = (void *)m25p_ids[tmp].driver_data; |
Mike Frysinger | a3d3f73 | 2008-11-26 10:23:25 +0000 | [diff] [blame] | 1063 | if (info->jedec_id == jedec) { |
Mike Frysinger | 9168ab8 | 2008-11-26 10:23:35 +0000 | [diff] [blame] | 1064 | if (info->ext_id != 0 && info->ext_id != ext_jedec) |
Chen Gong | d0e8c47 | 2008-08-11 16:59:15 +0800 | [diff] [blame] | 1065 | continue; |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 1066 | return &m25p_ids[tmp]; |
Mike Frysinger | a3d3f73 | 2008-11-26 10:23:25 +0000 | [diff] [blame] | 1067 | } |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1068 | } |
Kevin Cernekee | f0dff9b | 2010-10-30 21:11:02 -0700 | [diff] [blame] | 1069 | dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec); |
Anton Vorontsov | 9d2c4f3 | 2010-06-22 20:57:42 +0400 | [diff] [blame] | 1070 | return ERR_PTR(-ENODEV); |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1071 | } |
| 1072 | |
| 1073 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1074 | /* |
| 1075 | * board specific setup should have ensured the SPI clock used here |
| 1076 | * matches what the READ command supports, at least until this driver |
| 1077 | * understands FAST_READ (for clocks over 25 MHz). |
| 1078 | */ |
Bill Pemberton | 06f2551 | 2012-11-19 13:23:07 -0500 | [diff] [blame] | 1079 | static int m25p_probe(struct spi_device *spi) |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1080 | { |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 1081 | const struct spi_device_id *id = spi_get_device_id(spi); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1082 | struct flash_platform_data *data; |
| 1083 | struct m25p *flash; |
| 1084 | struct flash_info *info; |
| 1085 | unsigned i; |
Dmitry Eremin-Solenikov | ea6a472 | 2011-05-30 01:02:20 +0400 | [diff] [blame] | 1086 | struct mtd_part_parser_data ppdata; |
Brian Norris | dc525ff | 2013-10-23 19:34:46 -0700 | [diff] [blame] | 1087 | struct device_node *np = spi->dev.of_node; |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 1088 | int ret; |
Shaohui Xie | 5f94913 | 2011-10-14 15:49:00 +0800 | [diff] [blame] | 1089 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1090 | /* Platform data helps sort out which chip type we have, as |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1091 | * well as how this board partitions it. If we don't have |
| 1092 | * a chip ID, try the JEDEC id commands; they'll work for most |
| 1093 | * newer chips, even if we don't recognize the particular chip. |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1094 | */ |
Jingoo Han | 0278fd3 | 2013-07-30 17:17:44 +0900 | [diff] [blame] | 1095 | data = dev_get_platdata(&spi->dev); |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1096 | if (data && data->type) { |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 1097 | const struct spi_device_id *plat_id; |
| 1098 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 1099 | for (i = 0; i < ARRAY_SIZE(m25p_ids) - 1; i++) { |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 1100 | plat_id = &m25p_ids[i]; |
| 1101 | if (strcmp(data->type, plat_id->name)) |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 1102 | continue; |
| 1103 | break; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1104 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1105 | |
Dan Carpenter | f78ec6b | 2010-08-12 09:58:27 +0200 | [diff] [blame] | 1106 | if (i < ARRAY_SIZE(m25p_ids) - 1) |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 1107 | id = plat_id; |
| 1108 | else |
| 1109 | dev_warn(&spi->dev, "unrecognized id %s\n", data->type); |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 1110 | } |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1111 | |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 1112 | info = (void *)id->driver_data; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1113 | |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 1114 | if (info->jedec_id) { |
| 1115 | const struct spi_device_id *jid; |
| 1116 | |
| 1117 | jid = jedec_probe(spi); |
Anton Vorontsov | 9d2c4f3 | 2010-06-22 20:57:42 +0400 | [diff] [blame] | 1118 | if (IS_ERR(jid)) { |
| 1119 | return PTR_ERR(jid); |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 1120 | } else if (jid != id) { |
| 1121 | /* |
| 1122 | * JEDEC knows better, so overwrite platform ID. We |
| 1123 | * can't trust partitions any longer, but we'll let |
| 1124 | * mtd apply them anyway, since some partitions may be |
| 1125 | * marked read-only, and we don't want to lose that |
| 1126 | * information, even if it's not 100% accurate. |
| 1127 | */ |
| 1128 | dev_warn(&spi->dev, "found %s, expected %s\n", |
| 1129 | jid->name, id->name); |
| 1130 | id = jid; |
| 1131 | info = (void *)jid->driver_data; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1132 | } |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 1133 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1134 | |
Brian Norris | 778d226 | 2013-07-24 18:32:07 -0700 | [diff] [blame] | 1135 | flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1136 | if (!flash) |
| 1137 | return -ENOMEM; |
Brian Norris | 778d226 | 2013-07-24 18:32:07 -0700 | [diff] [blame] | 1138 | |
| 1139 | flash->command = devm_kzalloc(&spi->dev, MAX_CMD_SIZE, GFP_KERNEL); |
| 1140 | if (!flash->command) |
Johannes Stezenbach | 61c3506 | 2009-10-28 14:21:37 +0100 | [diff] [blame] | 1141 | return -ENOMEM; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1142 | |
| 1143 | flash->spi = spi; |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 1144 | mutex_init(&flash->lock); |
Jingoo Han | 975aefc | 2013-04-06 15:41:32 +0900 | [diff] [blame] | 1145 | spi_set_drvdata(spi, flash); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1146 | |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 1147 | /* |
Gabor Juhos | f80e521 | 2010-08-05 16:58:36 +0200 | [diff] [blame] | 1148 | * Atmel, SST and Intel/Numonyx serial flash tend to power |
Graf Yang | ea60658a | 2009-09-24 15:46:22 -0400 | [diff] [blame] | 1149 | * up with the software protection bits set |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 1150 | */ |
| 1151 | |
Kevin Cernekee | aa08465 | 2011-05-08 10:48:00 -0700 | [diff] [blame] | 1152 | if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL || |
| 1153 | JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL || |
| 1154 | JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) { |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 1155 | write_enable(flash); |
| 1156 | write_sr(flash, 0); |
| 1157 | } |
| 1158 | |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1159 | if (data && data->name) |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1160 | flash->mtd.name = data->name; |
| 1161 | else |
Kay Sievers | 160bbab | 2008-12-23 10:00:14 +0000 | [diff] [blame] | 1162 | flash->mtd.name = dev_name(&spi->dev); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1163 | |
| 1164 | flash->mtd.type = MTD_NORFLASH; |
Artem B. Bityutskiy | 783ed81 | 2006-06-14 19:53:44 +0400 | [diff] [blame] | 1165 | flash->mtd.writesize = 1; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1166 | flash->mtd.flags = MTD_CAP_NORFLASH; |
| 1167 | flash->mtd.size = info->sector_size * info->n_sectors; |
Artem Bityutskiy | 3c3c10b | 2012-01-30 14:58:32 +0200 | [diff] [blame] | 1168 | flash->mtd._erase = m25p80_erase; |
| 1169 | flash->mtd._read = m25p80_read; |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 1170 | |
Austin Boyle | 972e1b7 | 2013-01-04 13:02:28 +1300 | [diff] [blame] | 1171 | /* flash protection support for STmicro chips */ |
| 1172 | if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) { |
| 1173 | flash->mtd._lock = m25p80_lock; |
| 1174 | flash->mtd._unlock = m25p80_unlock; |
| 1175 | } |
| 1176 | |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 1177 | /* sst flash chips use AAI word program */ |
Krzysztof Mazur | e534ee4 | 2013-02-22 15:51:05 +0100 | [diff] [blame] | 1178 | if (info->flags & SST_WRITE) |
Artem Bityutskiy | 3c3c10b | 2012-01-30 14:58:32 +0200 | [diff] [blame] | 1179 | flash->mtd._write = sst_write; |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 1180 | else |
Artem Bityutskiy | 3c3c10b | 2012-01-30 14:58:32 +0200 | [diff] [blame] | 1181 | flash->mtd._write = m25p80_write; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1182 | |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1183 | /* prefer "small sector" erase if possible */ |
| 1184 | if (info->flags & SECT_4K) { |
| 1185 | flash->erase_opcode = OPCODE_BE_4K; |
| 1186 | flash->mtd.erasesize = 4096; |
Michel Stempin | 6c3b889 | 2013-07-15 12:13:56 +0200 | [diff] [blame] | 1187 | } else if (info->flags & SECT_4K_PMC) { |
| 1188 | flash->erase_opcode = OPCODE_BE_4K_PMC; |
| 1189 | flash->mtd.erasesize = 4096; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1190 | } else { |
| 1191 | flash->erase_opcode = OPCODE_SE; |
| 1192 | flash->mtd.erasesize = info->sector_size; |
| 1193 | } |
| 1194 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 1195 | if (info->flags & M25P_NO_ERASE) |
| 1196 | flash->mtd.flags |= MTD_NO_ERASE; |
David Brownell | 87f39f0 | 2009-03-26 00:42:50 -0700 | [diff] [blame] | 1197 | |
Dmitry Eremin-Solenikov | ea6a472 | 2011-05-30 01:02:20 +0400 | [diff] [blame] | 1198 | ppdata.of_node = spi->dev.of_node; |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 1199 | flash->mtd.dev.parent = &spi->dev; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 1200 | flash->page_size = info->page_size; |
Brian Norris | b54f47c | 2012-01-31 00:06:03 -0800 | [diff] [blame] | 1201 | flash->mtd.writebufsize = flash->page_size; |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 1202 | |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 1203 | if (np) { |
Brian Norris | ddba7c5 | 2013-08-19 21:30:22 -0700 | [diff] [blame] | 1204 | /* If we were instantiated by DT, use it */ |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 1205 | if (of_property_read_bool(np, "m25p,fast-read")) |
| 1206 | flash->flash_read = M25P80_FAST; |
Brian Norris | 99ed1a1 | 2013-12-04 22:59:40 -0800 | [diff] [blame] | 1207 | else |
| 1208 | flash->flash_read = M25P80_NORMAL; |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 1209 | } else { |
Brian Norris | ddba7c5 | 2013-08-19 21:30:22 -0700 | [diff] [blame] | 1210 | /* If we weren't instantiated by DT, default to fast-read */ |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 1211 | flash->flash_read = M25P80_FAST; |
| 1212 | } |
Marek Vasut | 12ad2be | 2012-09-24 03:39:39 +0200 | [diff] [blame] | 1213 | |
Brian Norris | ddba7c5 | 2013-08-19 21:30:22 -0700 | [diff] [blame] | 1214 | /* Some devices cannot do fast-read, no matter what DT tells us */ |
Sascha Hauer | 5814699 | 2013-08-20 09:54:40 +0200 | [diff] [blame] | 1215 | if (info->flags & M25P_NO_FR) |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 1216 | flash->flash_read = M25P80_NORMAL; |
Marek Vasut | 12ad2be | 2012-09-24 03:39:39 +0200 | [diff] [blame] | 1217 | |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 1218 | /* Quad-read mode takes precedence over fast/normal */ |
| 1219 | if (spi->mode & SPI_RX_QUAD && info->flags & M25P80_QUAD_READ) { |
| 1220 | ret = set_quad_mode(flash, info->jedec_id); |
| 1221 | if (ret) { |
| 1222 | dev_err(&flash->spi->dev, "quad mode not supported\n"); |
| 1223 | return ret; |
| 1224 | } |
| 1225 | flash->flash_read = M25P80_QUAD; |
| 1226 | } |
| 1227 | |
Brian Norris | 87c9511 | 2013-04-11 01:34:57 -0700 | [diff] [blame] | 1228 | /* Default commands */ |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 1229 | switch (flash->flash_read) { |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 1230 | case M25P80_QUAD: |
| 1231 | flash->read_opcode = OPCODE_QUAD_READ; |
| 1232 | break; |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 1233 | case M25P80_FAST: |
Brian Norris | 87c9511 | 2013-04-11 01:34:57 -0700 | [diff] [blame] | 1234 | flash->read_opcode = OPCODE_FAST_READ; |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 1235 | break; |
| 1236 | case M25P80_NORMAL: |
Brian Norris | 87c9511 | 2013-04-11 01:34:57 -0700 | [diff] [blame] | 1237 | flash->read_opcode = OPCODE_NORM_READ; |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 1238 | break; |
| 1239 | default: |
| 1240 | dev_err(&flash->spi->dev, "No Read opcode defined\n"); |
| 1241 | return -EINVAL; |
| 1242 | } |
Brian Norris | 87c9511 | 2013-04-11 01:34:57 -0700 | [diff] [blame] | 1243 | |
| 1244 | flash->program_opcode = OPCODE_PP; |
| 1245 | |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 1246 | if (info->addr_width) |
| 1247 | flash->addr_width = info->addr_width; |
Brian Norris | 87c9511 | 2013-04-11 01:34:57 -0700 | [diff] [blame] | 1248 | else if (flash->mtd.size > 0x1000000) { |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 1249 | /* enable 4-byte addressing if the device exceeds 16MiB */ |
Brian Norris | 87c9511 | 2013-04-11 01:34:57 -0700 | [diff] [blame] | 1250 | flash->addr_width = 4; |
| 1251 | if (JEDEC_MFR(info->jedec_id) == CFI_MFR_AMD) { |
| 1252 | /* Dedicated 4-byte command set */ |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 1253 | switch (flash->flash_read) { |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 1254 | case M25P80_QUAD: |
Geert Uytterhoeven | 7587f64 | 2014-01-15 16:48:55 +0100 | [diff] [blame] | 1255 | flash->read_opcode = OPCODE_QUAD_READ_4B; |
Sourav Poddar | 3487a639 | 2013-11-06 20:05:35 +0530 | [diff] [blame] | 1256 | break; |
Sourav Poddar | 8552b43 | 2013-11-06 20:05:34 +0530 | [diff] [blame] | 1257 | case M25P80_FAST: |
| 1258 | flash->read_opcode = OPCODE_FAST_READ_4B; |
| 1259 | break; |
| 1260 | case M25P80_NORMAL: |
| 1261 | flash->read_opcode = OPCODE_NORM_READ_4B; |
| 1262 | break; |
| 1263 | } |
Brian Norris | 87c9511 | 2013-04-11 01:34:57 -0700 | [diff] [blame] | 1264 | flash->program_opcode = OPCODE_PP_4B; |
| 1265 | /* No small sector erase for 4-byte command set */ |
| 1266 | flash->erase_opcode = OPCODE_SE_4B; |
| 1267 | flash->mtd.erasesize = info->sector_size; |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 1268 | } else |
Brian Norris | 87c9511 | 2013-04-11 01:34:57 -0700 | [diff] [blame] | 1269 | set_4byte(flash, info->jedec_id, 1); |
| 1270 | } else { |
| 1271 | flash->addr_width = 3; |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 1272 | } |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 1273 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 1274 | dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name, |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1275 | (long long)flash->mtd.size >> 10); |
| 1276 | |
Brian Norris | 289c052 | 2011-07-19 10:06:09 -0700 | [diff] [blame] | 1277 | pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) " |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 1278 | ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1279 | flash->mtd.name, |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 1280 | (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1281 | flash->mtd.erasesize, flash->mtd.erasesize / 1024, |
| 1282 | flash->mtd.numeraseregions); |
| 1283 | |
| 1284 | if (flash->mtd.numeraseregions) |
| 1285 | for (i = 0; i < flash->mtd.numeraseregions; i++) |
Brian Norris | 289c052 | 2011-07-19 10:06:09 -0700 | [diff] [blame] | 1286 | pr_debug("mtd.eraseregions[%d] = { .offset = 0x%llx, " |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 1287 | ".erasesize = 0x%.8x (%uKiB), " |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1288 | ".numblocks = %d }\n", |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 1289 | i, (long long)flash->mtd.eraseregions[i].offset, |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1290 | flash->mtd.eraseregions[i].erasesize, |
| 1291 | flash->mtd.eraseregions[i].erasesize / 1024, |
| 1292 | flash->mtd.eraseregions[i].numblocks); |
| 1293 | |
| 1294 | |
| 1295 | /* partitions should match sector boundaries; and it may be good to |
| 1296 | * use readonly partitions for writeprotected sectors (BP2..BP0). |
| 1297 | */ |
Dmitry Eremin-Solenikov | 871770b | 2011-06-02 17:59:16 +0400 | [diff] [blame] | 1298 | return mtd_device_parse_register(&flash->mtd, NULL, &ppdata, |
| 1299 | data ? data->parts : NULL, |
| 1300 | data ? data->nr_parts : 0); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1301 | } |
| 1302 | |
| 1303 | |
Bill Pemberton | 810b7e0 | 2012-11-19 13:26:04 -0500 | [diff] [blame] | 1304 | static int m25p_remove(struct spi_device *spi) |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1305 | { |
Jingoo Han | 975aefc | 2013-04-06 15:41:32 +0900 | [diff] [blame] | 1306 | struct m25p *flash = spi_get_drvdata(spi); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1307 | |
| 1308 | /* Clean up MTD stuff. */ |
Brian Norris | 9650b9b | 2013-10-27 15:42:12 -0700 | [diff] [blame] | 1309 | return mtd_device_unregister(&flash->mtd); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1310 | } |
| 1311 | |
| 1312 | |
| 1313 | static struct spi_driver m25p80_driver = { |
| 1314 | .driver = { |
| 1315 | .name = "m25p80", |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1316 | .owner = THIS_MODULE, |
| 1317 | }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 1318 | .id_table = m25p_ids, |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1319 | .probe = m25p_probe, |
Bill Pemberton | 5153b88 | 2012-11-19 13:21:24 -0500 | [diff] [blame] | 1320 | .remove = m25p_remove, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1321 | |
| 1322 | /* REVISIT: many of these chips have deep power-down modes, which |
| 1323 | * should clearly be entered on suspend() to minimize power use. |
| 1324 | * And also when they're otherwise idle... |
| 1325 | */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1326 | }; |
| 1327 | |
Axel Lin | c9d1b75 | 2012-01-27 15:45:20 +0800 | [diff] [blame] | 1328 | module_spi_driver(m25p80_driver); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1329 | |
| 1330 | MODULE_LICENSE("GPL"); |
| 1331 | MODULE_AUTHOR("Mike Lavender"); |
| 1332 | MODULE_DESCRIPTION("MTD SPI driver for ST M25Pxx flash chips"); |