Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1 | /* |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 2 | * MTD SPI driver for ST M25Pxx (and similar) serial flash chips |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 3 | * |
| 4 | * Author: Mike Lavender, mike@steroidmicros.com |
| 5 | * |
| 6 | * Copyright (c) 2005, Intec Automation Inc. |
| 7 | * |
| 8 | * Some parts are based on lart.c by Abraham Van Der Merwe |
| 9 | * |
| 10 | * Cleaned up and generalized based on mtd_dataflash.c |
| 11 | * |
| 12 | * This code is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of the GNU General Public License version 2 as |
| 14 | * published by the Free Software Foundation. |
| 15 | * |
| 16 | */ |
| 17 | |
| 18 | #include <linux/init.h> |
Anton Vorontsov | 9d2c4f3 | 2010-06-22 20:57:42 +0400 | [diff] [blame] | 19 | #include <linux/err.h> |
| 20 | #include <linux/errno.h> |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 21 | #include <linux/module.h> |
| 22 | #include <linux/device.h> |
| 23 | #include <linux/interrupt.h> |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 24 | #include <linux/mutex.h> |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 25 | #include <linux/math64.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 26 | #include <linux/slab.h> |
Alexey Dobriyan | d43c36d | 2009-10-07 17:09:06 +0400 | [diff] [blame] | 27 | #include <linux/sched.h> |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 28 | #include <linux/mod_devicetable.h> |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 29 | |
Kevin Cernekee | aa08465 | 2011-05-08 10:48:00 -0700 | [diff] [blame] | 30 | #include <linux/mtd/cfi.h> |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 31 | #include <linux/mtd/mtd.h> |
| 32 | #include <linux/mtd/partitions.h> |
Shaohui Xie | 5f94913 | 2011-10-14 15:49:00 +0800 | [diff] [blame] | 33 | #include <linux/of_platform.h> |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 34 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 35 | #include <linux/spi/spi.h> |
| 36 | #include <linux/spi/flash.h> |
| 37 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 38 | /* Flash opcodes. */ |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 39 | #define OPCODE_WREN 0x06 /* Write enable */ |
| 40 | #define OPCODE_RDSR 0x05 /* Read status register */ |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 41 | #define OPCODE_WRSR 0x01 /* Write status register 1 byte */ |
Bryan Wu | 2230b76 | 2008-04-25 12:07:32 +0800 | [diff] [blame] | 42 | #define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */ |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 43 | #define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */ |
| 44 | #define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */ |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 45 | #define OPCODE_BE_4K 0x20 /* Erase 4KiB block */ |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 46 | #define OPCODE_BE_32K 0x52 /* Erase 32KiB block */ |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 47 | #define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */ |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 48 | #define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 49 | #define OPCODE_RDID 0x9f /* Read JEDEC ID */ |
| 50 | |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 51 | /* Used for SST flashes only. */ |
| 52 | #define OPCODE_BP 0x02 /* Byte program */ |
| 53 | #define OPCODE_WRDI 0x04 /* Write disable */ |
| 54 | #define OPCODE_AAI_WP 0xad /* Auto address increment word program */ |
| 55 | |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 56 | /* Used for Macronix flashes only. */ |
| 57 | #define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */ |
| 58 | #define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */ |
| 59 | |
Kevin Cernekee | baa9ae3 | 2011-05-08 10:48:01 -0700 | [diff] [blame] | 60 | /* Used for Spansion flashes only. */ |
| 61 | #define OPCODE_BRWR 0x17 /* Bank register write */ |
| 62 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 63 | /* Status Register bits. */ |
| 64 | #define SR_WIP 1 /* Write in progress */ |
| 65 | #define SR_WEL 2 /* Write enable latch */ |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 66 | /* meaning of other SR_* bits may differ between vendors */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 67 | #define SR_BP0 4 /* Block protect 0 */ |
| 68 | #define SR_BP1 8 /* Block protect 1 */ |
| 69 | #define SR_BP2 0x10 /* Block protect 2 */ |
| 70 | #define SR_SRWD 0x80 /* SR write protect */ |
| 71 | |
| 72 | /* Define max times to check status register before we give up. */ |
Steven A. Falco | 89bb871 | 2009-06-26 12:42:47 -0400 | [diff] [blame] | 73 | #define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */ |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 74 | #define MAX_CMD_SIZE 5 |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 75 | |
Kevin Cernekee | aa08465 | 2011-05-08 10:48:00 -0700 | [diff] [blame] | 76 | #define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16) |
| 77 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 78 | /****************************************************************************/ |
| 79 | |
| 80 | struct m25p { |
| 81 | struct spi_device *spi; |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 82 | struct mutex lock; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 83 | struct mtd_info mtd; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 84 | u16 page_size; |
| 85 | u16 addr_width; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 86 | u8 erase_opcode; |
Johannes Stezenbach | 61c3506 | 2009-10-28 14:21:37 +0100 | [diff] [blame] | 87 | u8 *command; |
Marek Vasut | 12ad2be | 2012-09-24 03:39:39 +0200 | [diff] [blame] | 88 | bool fast_read; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 89 | }; |
| 90 | |
| 91 | static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd) |
| 92 | { |
| 93 | return container_of(mtd, struct m25p, mtd); |
| 94 | } |
| 95 | |
| 96 | /****************************************************************************/ |
| 97 | |
| 98 | /* |
| 99 | * Internal helper functions |
| 100 | */ |
| 101 | |
| 102 | /* |
| 103 | * Read the status register, returning its value in the location |
| 104 | * Return the status register value. |
| 105 | * Returns negative if error occurred. |
| 106 | */ |
| 107 | static int read_sr(struct m25p *flash) |
| 108 | { |
| 109 | ssize_t retval; |
| 110 | u8 code = OPCODE_RDSR; |
| 111 | u8 val; |
| 112 | |
| 113 | retval = spi_write_then_read(flash->spi, &code, 1, &val, 1); |
| 114 | |
| 115 | if (retval < 0) { |
| 116 | dev_err(&flash->spi->dev, "error %d reading SR\n", |
| 117 | (int) retval); |
| 118 | return retval; |
| 119 | } |
| 120 | |
| 121 | return val; |
| 122 | } |
| 123 | |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 124 | /* |
| 125 | * Write status register 1 byte |
| 126 | * Returns negative if error occurred. |
| 127 | */ |
| 128 | static int write_sr(struct m25p *flash, u8 val) |
| 129 | { |
| 130 | flash->command[0] = OPCODE_WRSR; |
| 131 | flash->command[1] = val; |
| 132 | |
| 133 | return spi_write(flash->spi, flash->command, 2); |
| 134 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 135 | |
| 136 | /* |
| 137 | * Set write enable latch with Write Enable command. |
| 138 | * Returns negative if error occurred. |
| 139 | */ |
| 140 | static inline int write_enable(struct m25p *flash) |
| 141 | { |
| 142 | u8 code = OPCODE_WREN; |
| 143 | |
David Woodhouse | 8a1a627 | 2008-10-20 09:26:16 +0100 | [diff] [blame] | 144 | return spi_write_then_read(flash->spi, &code, 1, NULL, 0); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 145 | } |
| 146 | |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 147 | /* |
| 148 | * Send write disble instruction to the chip. |
| 149 | */ |
| 150 | static inline int write_disable(struct m25p *flash) |
| 151 | { |
| 152 | u8 code = OPCODE_WRDI; |
| 153 | |
| 154 | return spi_write_then_read(flash->spi, &code, 1, NULL, 0); |
| 155 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 156 | |
| 157 | /* |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 158 | * Enable/disable 4-byte addressing mode. |
| 159 | */ |
Kevin Cernekee | baa9ae3 | 2011-05-08 10:48:01 -0700 | [diff] [blame] | 160 | static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable) |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 161 | { |
Kevin Cernekee | baa9ae3 | 2011-05-08 10:48:01 -0700 | [diff] [blame] | 162 | switch (JEDEC_MFR(jedec_id)) { |
| 163 | case CFI_MFR_MACRONIX: |
Matthieu CASTET | 0aa87b7 | 2012-09-25 11:05:27 +0200 | [diff] [blame] | 164 | case 0xEF /* winbond */: |
Kevin Cernekee | baa9ae3 | 2011-05-08 10:48:01 -0700 | [diff] [blame] | 165 | flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B; |
| 166 | return spi_write(flash->spi, flash->command, 1); |
| 167 | default: |
| 168 | /* Spansion style */ |
| 169 | flash->command[0] = OPCODE_BRWR; |
| 170 | flash->command[1] = enable << 7; |
| 171 | return spi_write(flash->spi, flash->command, 2); |
| 172 | } |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 173 | } |
| 174 | |
| 175 | /* |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 176 | * Service routine to read status register until ready, or timeout occurs. |
| 177 | * Returns non-zero if error. |
| 178 | */ |
| 179 | static int wait_till_ready(struct m25p *flash) |
| 180 | { |
Peter Horton | cd1a6de | 2009-05-08 13:51:53 +0100 | [diff] [blame] | 181 | unsigned long deadline; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 182 | int sr; |
| 183 | |
Peter Horton | cd1a6de | 2009-05-08 13:51:53 +0100 | [diff] [blame] | 184 | deadline = jiffies + MAX_READY_WAIT_JIFFIES; |
| 185 | |
| 186 | do { |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 187 | if ((sr = read_sr(flash)) < 0) |
| 188 | break; |
| 189 | else if (!(sr & SR_WIP)) |
| 190 | return 0; |
| 191 | |
Peter Horton | cd1a6de | 2009-05-08 13:51:53 +0100 | [diff] [blame] | 192 | cond_resched(); |
| 193 | |
| 194 | } while (!time_after_eq(jiffies, deadline)); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 195 | |
| 196 | return 1; |
| 197 | } |
| 198 | |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 199 | /* |
| 200 | * Erase the whole flash memory |
| 201 | * |
| 202 | * Returns 0 if successful, non-zero otherwise. |
| 203 | */ |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 204 | static int erase_chip(struct m25p *flash) |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 205 | { |
Brian Norris | 0a32a10 | 2011-07-19 10:06:10 -0700 | [diff] [blame] | 206 | pr_debug("%s: %s %lldKiB\n", dev_name(&flash->spi->dev), __func__, |
| 207 | (long long)(flash->mtd.size >> 10)); |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 208 | |
| 209 | /* Wait until finished previous write command. */ |
| 210 | if (wait_till_ready(flash)) |
| 211 | return 1; |
| 212 | |
| 213 | /* Send write enable, then erase commands. */ |
| 214 | write_enable(flash); |
| 215 | |
| 216 | /* Set up command buffer. */ |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 217 | flash->command[0] = OPCODE_CHIP_ERASE; |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 218 | |
| 219 | spi_write(flash->spi, flash->command, 1); |
| 220 | |
| 221 | return 0; |
| 222 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 223 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 224 | static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd) |
| 225 | { |
| 226 | /* opcode is in cmd[0] */ |
| 227 | cmd[1] = addr >> (flash->addr_width * 8 - 8); |
| 228 | cmd[2] = addr >> (flash->addr_width * 8 - 16); |
| 229 | cmd[3] = addr >> (flash->addr_width * 8 - 24); |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 230 | cmd[4] = addr >> (flash->addr_width * 8 - 32); |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 231 | } |
| 232 | |
| 233 | static int m25p_cmdsz(struct m25p *flash) |
| 234 | { |
| 235 | return 1 + flash->addr_width; |
| 236 | } |
| 237 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 238 | /* |
| 239 | * Erase one sector of flash memory at offset ``offset'' which is any |
| 240 | * address within the sector which should be erased. |
| 241 | * |
| 242 | * Returns 0 if successful, non-zero otherwise. |
| 243 | */ |
| 244 | static int erase_sector(struct m25p *flash, u32 offset) |
| 245 | { |
Brian Norris | 0a32a10 | 2011-07-19 10:06:10 -0700 | [diff] [blame] | 246 | pr_debug("%s: %s %dKiB at 0x%08x\n", dev_name(&flash->spi->dev), |
| 247 | __func__, flash->mtd.erasesize / 1024, offset); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 248 | |
| 249 | /* Wait until finished previous write command. */ |
| 250 | if (wait_till_ready(flash)) |
| 251 | return 1; |
| 252 | |
| 253 | /* Send write enable, then erase commands. */ |
| 254 | write_enable(flash); |
| 255 | |
| 256 | /* Set up command buffer. */ |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 257 | flash->command[0] = flash->erase_opcode; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 258 | m25p_addr2cmd(flash, offset, flash->command); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 259 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 260 | spi_write(flash->spi, flash->command, m25p_cmdsz(flash)); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 261 | |
| 262 | return 0; |
| 263 | } |
| 264 | |
| 265 | /****************************************************************************/ |
| 266 | |
| 267 | /* |
| 268 | * MTD implementation |
| 269 | */ |
| 270 | |
| 271 | /* |
| 272 | * Erase an address range on the flash chip. The address range may extend |
| 273 | * one or more erase sectors. Return an error is there is a problem erasing. |
| 274 | */ |
| 275 | static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr) |
| 276 | { |
| 277 | struct m25p *flash = mtd_to_m25p(mtd); |
| 278 | u32 addr,len; |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 279 | uint32_t rem; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 280 | |
Brian Norris | 0a32a10 | 2011-07-19 10:06:10 -0700 | [diff] [blame] | 281 | pr_debug("%s: %s at 0x%llx, len %lld\n", dev_name(&flash->spi->dev), |
| 282 | __func__, (long long)instr->addr, |
| 283 | (long long)instr->len); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 284 | |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 285 | div_u64_rem(instr->len, mtd->erasesize, &rem); |
| 286 | if (rem) |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 287 | return -EINVAL; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 288 | |
| 289 | addr = instr->addr; |
| 290 | len = instr->len; |
| 291 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 292 | mutex_lock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 293 | |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 294 | /* whole-chip erase? */ |
Steven A. Falco | 3f33b0a | 2009-04-27 17:10:10 -0400 | [diff] [blame] | 295 | if (len == flash->mtd.size) { |
| 296 | if (erase_chip(flash)) { |
| 297 | instr->state = MTD_ERASE_FAILED; |
| 298 | mutex_unlock(&flash->lock); |
| 299 | return -EIO; |
| 300 | } |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 301 | |
| 302 | /* REVISIT in some cases we could speed up erasing large regions |
| 303 | * by using OPCODE_SE instead of OPCODE_BE_4K. We may have set up |
| 304 | * to use "small sector erase", but that's not always optimal. |
| 305 | */ |
| 306 | |
| 307 | /* "sector"-at-a-time erase */ |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 308 | } else { |
| 309 | while (len) { |
| 310 | if (erase_sector(flash, addr)) { |
| 311 | instr->state = MTD_ERASE_FAILED; |
| 312 | mutex_unlock(&flash->lock); |
| 313 | return -EIO; |
| 314 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 315 | |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 316 | addr += mtd->erasesize; |
| 317 | len -= mtd->erasesize; |
| 318 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 319 | } |
| 320 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 321 | mutex_unlock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 322 | |
| 323 | instr->state = MTD_ERASE_DONE; |
| 324 | mtd_erase_callback(instr); |
| 325 | |
| 326 | return 0; |
| 327 | } |
| 328 | |
| 329 | /* |
| 330 | * Read an address range from the flash chip. The address range |
| 331 | * may be any size provided it is within the physical boundaries. |
| 332 | */ |
| 333 | static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len, |
| 334 | size_t *retlen, u_char *buf) |
| 335 | { |
| 336 | struct m25p *flash = mtd_to_m25p(mtd); |
| 337 | struct spi_transfer t[2]; |
| 338 | struct spi_message m; |
Marek Vasut | 12ad2be | 2012-09-24 03:39:39 +0200 | [diff] [blame] | 339 | uint8_t opcode; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 340 | |
Brian Norris | 0a32a10 | 2011-07-19 10:06:10 -0700 | [diff] [blame] | 341 | pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev), |
| 342 | __func__, (u32)from, len); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 343 | |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 344 | spi_message_init(&m); |
| 345 | memset(t, 0, (sizeof t)); |
| 346 | |
Bryan Wu | 2230b76 | 2008-04-25 12:07:32 +0800 | [diff] [blame] | 347 | /* NOTE: |
| 348 | * OPCODE_FAST_READ (if available) is faster. |
| 349 | * Should add 1 byte DUMMY_BYTE. |
| 350 | */ |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 351 | t[0].tx_buf = flash->command; |
Marek Vasut | 12ad2be | 2012-09-24 03:39:39 +0200 | [diff] [blame] | 352 | t[0].len = m25p_cmdsz(flash) + (flash->fast_read ? 1 : 0); |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 353 | spi_message_add_tail(&t[0], &m); |
| 354 | |
| 355 | t[1].rx_buf = buf; |
| 356 | t[1].len = len; |
| 357 | spi_message_add_tail(&t[1], &m); |
| 358 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 359 | mutex_lock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 360 | |
| 361 | /* Wait till previous write/erase is done. */ |
| 362 | if (wait_till_ready(flash)) { |
| 363 | /* REVISIT status return?? */ |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 364 | mutex_unlock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 365 | return 1; |
| 366 | } |
| 367 | |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 368 | /* FIXME switch to OPCODE_FAST_READ. It's required for higher |
| 369 | * clocks; and at this writing, every chip this driver handles |
| 370 | * supports that opcode. |
| 371 | */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 372 | |
| 373 | /* Set up the write data buffer. */ |
Marek Vasut | 12ad2be | 2012-09-24 03:39:39 +0200 | [diff] [blame] | 374 | opcode = flash->fast_read ? OPCODE_FAST_READ : OPCODE_NORM_READ; |
| 375 | flash->command[0] = opcode; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 376 | m25p_addr2cmd(flash, from, flash->command); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 377 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 378 | spi_sync(flash->spi, &m); |
| 379 | |
Marek Vasut | 12ad2be | 2012-09-24 03:39:39 +0200 | [diff] [blame] | 380 | *retlen = m.actual_length - m25p_cmdsz(flash) - |
| 381 | (flash->fast_read ? 1 : 0); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 382 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 383 | mutex_unlock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 384 | |
| 385 | return 0; |
| 386 | } |
| 387 | |
| 388 | /* |
| 389 | * Write an address range to the flash chip. Data must be written in |
| 390 | * FLASH_PAGESIZE chunks. The address range may be any size provided |
| 391 | * it is within the physical boundaries. |
| 392 | */ |
| 393 | static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len, |
| 394 | size_t *retlen, const u_char *buf) |
| 395 | { |
| 396 | struct m25p *flash = mtd_to_m25p(mtd); |
| 397 | u32 page_offset, page_size; |
| 398 | struct spi_transfer t[2]; |
| 399 | struct spi_message m; |
| 400 | |
Brian Norris | 0a32a10 | 2011-07-19 10:06:10 -0700 | [diff] [blame] | 401 | pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), |
| 402 | __func__, (u32)to, len); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 403 | |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 404 | spi_message_init(&m); |
| 405 | memset(t, 0, (sizeof t)); |
| 406 | |
| 407 | t[0].tx_buf = flash->command; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 408 | t[0].len = m25p_cmdsz(flash); |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 409 | spi_message_add_tail(&t[0], &m); |
| 410 | |
| 411 | t[1].tx_buf = buf; |
| 412 | spi_message_add_tail(&t[1], &m); |
| 413 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 414 | mutex_lock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 415 | |
| 416 | /* Wait until finished previous write command. */ |
Chen Gong | bc01886 | 2008-06-05 21:50:04 +0800 | [diff] [blame] | 417 | if (wait_till_ready(flash)) { |
| 418 | mutex_unlock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 419 | return 1; |
Chen Gong | bc01886 | 2008-06-05 21:50:04 +0800 | [diff] [blame] | 420 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 421 | |
| 422 | write_enable(flash); |
| 423 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 424 | /* Set up the opcode in the write buffer. */ |
| 425 | flash->command[0] = OPCODE_PP; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 426 | m25p_addr2cmd(flash, to, flash->command); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 427 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 428 | page_offset = to & (flash->page_size - 1); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 429 | |
| 430 | /* do all the bytes fit onto one page? */ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 431 | if (page_offset + len <= flash->page_size) { |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 432 | t[1].len = len; |
| 433 | |
| 434 | spi_sync(flash->spi, &m); |
| 435 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 436 | *retlen = m.actual_length - m25p_cmdsz(flash); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 437 | } else { |
| 438 | u32 i; |
| 439 | |
| 440 | /* the size of data remaining on the first page */ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 441 | page_size = flash->page_size - page_offset; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 442 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 443 | t[1].len = page_size; |
| 444 | spi_sync(flash->spi, &m); |
| 445 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 446 | *retlen = m.actual_length - m25p_cmdsz(flash); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 447 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 448 | /* write everything in flash->page_size chunks */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 449 | for (i = page_size; i < len; i += page_size) { |
| 450 | page_size = len - i; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 451 | if (page_size > flash->page_size) |
| 452 | page_size = flash->page_size; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 453 | |
| 454 | /* write the next page to flash */ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 455 | m25p_addr2cmd(flash, to + i, flash->command); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 456 | |
| 457 | t[1].tx_buf = buf + i; |
| 458 | t[1].len = page_size; |
| 459 | |
| 460 | wait_till_ready(flash); |
| 461 | |
| 462 | write_enable(flash); |
| 463 | |
| 464 | spi_sync(flash->spi, &m); |
| 465 | |
Dan Carpenter | b06cd21 | 2010-08-12 09:53:52 +0200 | [diff] [blame] | 466 | *retlen += m.actual_length - m25p_cmdsz(flash); |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 467 | } |
| 468 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 469 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 470 | mutex_unlock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 471 | |
| 472 | return 0; |
| 473 | } |
| 474 | |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 475 | static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, |
| 476 | size_t *retlen, const u_char *buf) |
| 477 | { |
| 478 | struct m25p *flash = mtd_to_m25p(mtd); |
| 479 | struct spi_transfer t[2]; |
| 480 | struct spi_message m; |
| 481 | size_t actual; |
| 482 | int cmd_sz, ret; |
| 483 | |
Brian Norris | 0a32a10 | 2011-07-19 10:06:10 -0700 | [diff] [blame] | 484 | pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), |
| 485 | __func__, (u32)to, len); |
Nicolas Ferre | dcf1246 | 2010-12-15 12:59:32 +0100 | [diff] [blame] | 486 | |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 487 | spi_message_init(&m); |
| 488 | memset(t, 0, (sizeof t)); |
| 489 | |
| 490 | t[0].tx_buf = flash->command; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 491 | t[0].len = m25p_cmdsz(flash); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 492 | spi_message_add_tail(&t[0], &m); |
| 493 | |
| 494 | t[1].tx_buf = buf; |
| 495 | spi_message_add_tail(&t[1], &m); |
| 496 | |
| 497 | mutex_lock(&flash->lock); |
| 498 | |
| 499 | /* Wait until finished previous write command. */ |
| 500 | ret = wait_till_ready(flash); |
| 501 | if (ret) |
| 502 | goto time_out; |
| 503 | |
| 504 | write_enable(flash); |
| 505 | |
| 506 | actual = to % 2; |
| 507 | /* Start write from odd address. */ |
| 508 | if (actual) { |
| 509 | flash->command[0] = OPCODE_BP; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 510 | m25p_addr2cmd(flash, to, flash->command); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 511 | |
| 512 | /* write one byte. */ |
| 513 | t[1].len = 1; |
| 514 | spi_sync(flash->spi, &m); |
| 515 | ret = wait_till_ready(flash); |
| 516 | if (ret) |
| 517 | goto time_out; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 518 | *retlen += m.actual_length - m25p_cmdsz(flash); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 519 | } |
| 520 | to += actual; |
| 521 | |
| 522 | flash->command[0] = OPCODE_AAI_WP; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 523 | m25p_addr2cmd(flash, to, flash->command); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 524 | |
| 525 | /* Write out most of the data here. */ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 526 | cmd_sz = m25p_cmdsz(flash); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 527 | for (; actual < len - 1; actual += 2) { |
| 528 | t[0].len = cmd_sz; |
| 529 | /* write two bytes. */ |
| 530 | t[1].len = 2; |
| 531 | t[1].tx_buf = buf + actual; |
| 532 | |
| 533 | spi_sync(flash->spi, &m); |
| 534 | ret = wait_till_ready(flash); |
| 535 | if (ret) |
| 536 | goto time_out; |
| 537 | *retlen += m.actual_length - cmd_sz; |
| 538 | cmd_sz = 1; |
| 539 | to += 2; |
| 540 | } |
| 541 | write_disable(flash); |
| 542 | ret = wait_till_ready(flash); |
| 543 | if (ret) |
| 544 | goto time_out; |
| 545 | |
| 546 | /* Write out trailing byte if it exists. */ |
| 547 | if (actual != len) { |
| 548 | write_enable(flash); |
| 549 | flash->command[0] = OPCODE_BP; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 550 | m25p_addr2cmd(flash, to, flash->command); |
| 551 | t[0].len = m25p_cmdsz(flash); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 552 | t[1].len = 1; |
| 553 | t[1].tx_buf = buf + actual; |
| 554 | |
| 555 | spi_sync(flash->spi, &m); |
| 556 | ret = wait_till_ready(flash); |
| 557 | if (ret) |
| 558 | goto time_out; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 559 | *retlen += m.actual_length - m25p_cmdsz(flash); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 560 | write_disable(flash); |
| 561 | } |
| 562 | |
| 563 | time_out: |
| 564 | mutex_unlock(&flash->lock); |
| 565 | return ret; |
| 566 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 567 | |
Austin Boyle | 972e1b7 | 2013-01-04 13:02:28 +1300 | [diff] [blame] | 568 | static int m25p80_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
| 569 | { |
| 570 | struct m25p *flash = mtd_to_m25p(mtd); |
| 571 | uint32_t offset = ofs; |
| 572 | uint8_t status_old, status_new; |
| 573 | int res = 0; |
| 574 | |
| 575 | mutex_lock(&flash->lock); |
| 576 | /* Wait until finished previous command */ |
| 577 | if (wait_till_ready(flash)) { |
| 578 | res = 1; |
| 579 | goto err; |
| 580 | } |
| 581 | |
| 582 | status_old = read_sr(flash); |
| 583 | |
| 584 | if (offset < flash->mtd.size-(flash->mtd.size/2)) |
| 585 | status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0; |
| 586 | else if (offset < flash->mtd.size-(flash->mtd.size/4)) |
| 587 | status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1; |
| 588 | else if (offset < flash->mtd.size-(flash->mtd.size/8)) |
| 589 | status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0; |
| 590 | else if (offset < flash->mtd.size-(flash->mtd.size/16)) |
| 591 | status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2; |
| 592 | else if (offset < flash->mtd.size-(flash->mtd.size/32)) |
| 593 | status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0; |
| 594 | else if (offset < flash->mtd.size-(flash->mtd.size/64)) |
| 595 | status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1; |
| 596 | else |
| 597 | status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0; |
| 598 | |
| 599 | /* Only modify protection if it will not unlock other areas */ |
| 600 | if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) > |
| 601 | (status_old&(SR_BP2|SR_BP1|SR_BP0))) { |
| 602 | write_enable(flash); |
| 603 | if (write_sr(flash, status_new) < 0) { |
| 604 | res = 1; |
| 605 | goto err; |
| 606 | } |
| 607 | } |
| 608 | |
| 609 | err: mutex_unlock(&flash->lock); |
| 610 | return res; |
| 611 | } |
| 612 | |
| 613 | static int m25p80_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
| 614 | { |
| 615 | struct m25p *flash = mtd_to_m25p(mtd); |
| 616 | uint32_t offset = ofs; |
| 617 | uint8_t status_old, status_new; |
| 618 | int res = 0; |
| 619 | |
| 620 | mutex_lock(&flash->lock); |
| 621 | /* Wait until finished previous command */ |
| 622 | if (wait_till_ready(flash)) { |
| 623 | res = 1; |
| 624 | goto err; |
| 625 | } |
| 626 | |
| 627 | status_old = read_sr(flash); |
| 628 | |
| 629 | if (offset+len > flash->mtd.size-(flash->mtd.size/64)) |
| 630 | status_new = status_old & ~(SR_BP2|SR_BP1|SR_BP0); |
| 631 | else if (offset+len > flash->mtd.size-(flash->mtd.size/32)) |
| 632 | status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0; |
| 633 | else if (offset+len > flash->mtd.size-(flash->mtd.size/16)) |
| 634 | status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1; |
| 635 | else if (offset+len > flash->mtd.size-(flash->mtd.size/8)) |
| 636 | status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0; |
| 637 | else if (offset+len > flash->mtd.size-(flash->mtd.size/4)) |
| 638 | status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2; |
| 639 | else if (offset+len > flash->mtd.size-(flash->mtd.size/2)) |
| 640 | status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0; |
| 641 | else |
| 642 | status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1; |
| 643 | |
| 644 | /* Only modify protection if it will not lock other areas */ |
| 645 | if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) < |
| 646 | (status_old&(SR_BP2|SR_BP1|SR_BP0))) { |
| 647 | write_enable(flash); |
| 648 | if (write_sr(flash, status_new) < 0) { |
| 649 | res = 1; |
| 650 | goto err; |
| 651 | } |
| 652 | } |
| 653 | |
| 654 | err: mutex_unlock(&flash->lock); |
| 655 | return res; |
| 656 | } |
| 657 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 658 | /****************************************************************************/ |
| 659 | |
| 660 | /* |
| 661 | * SPI device driver setup and teardown |
| 662 | */ |
| 663 | |
| 664 | struct flash_info { |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 665 | /* JEDEC id zero means "no ID" (most older chips); otherwise it has |
| 666 | * a high byte of zero plus three data bytes: the manufacturer id, |
| 667 | * then a two byte device id. |
| 668 | */ |
| 669 | u32 jedec_id; |
Chen Gong | d0e8c47 | 2008-08-11 16:59:15 +0800 | [diff] [blame] | 670 | u16 ext_id; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 671 | |
| 672 | /* The size listed here is what works with OPCODE_SE, which isn't |
| 673 | * necessarily called a "sector" by the vendor. |
| 674 | */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 675 | unsigned sector_size; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 676 | u16 n_sectors; |
| 677 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 678 | u16 page_size; |
| 679 | u16 addr_width; |
| 680 | |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 681 | u16 flags; |
| 682 | #define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 683 | #define M25P_NO_ERASE 0x02 /* No erase command needed */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 684 | }; |
| 685 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 686 | #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ |
| 687 | ((kernel_ulong_t)&(struct flash_info) { \ |
| 688 | .jedec_id = (_jedec_id), \ |
| 689 | .ext_id = (_ext_id), \ |
| 690 | .sector_size = (_sector_size), \ |
| 691 | .n_sectors = (_n_sectors), \ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 692 | .page_size = 256, \ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 693 | .flags = (_flags), \ |
| 694 | }) |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 695 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 696 | #define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \ |
| 697 | ((kernel_ulong_t)&(struct flash_info) { \ |
| 698 | .sector_size = (_sector_size), \ |
| 699 | .n_sectors = (_n_sectors), \ |
| 700 | .page_size = (_page_size), \ |
| 701 | .addr_width = (_addr_width), \ |
| 702 | .flags = M25P_NO_ERASE, \ |
| 703 | }) |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 704 | |
| 705 | /* NOTE: double check command sets and memory organization when you add |
| 706 | * more flash chips. This current list focusses on newer chips, which |
| 707 | * have been converging on command sets which including JEDEC ID. |
| 708 | */ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 709 | static const struct spi_device_id m25p_ids[] = { |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 710 | /* Atmel -- some are (confusingly) marketed as "DataFlash" */ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 711 | { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) }, |
| 712 | { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 713 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 714 | { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) }, |
Mikhail Kshevetskiy | ada766e | 2011-09-23 19:36:18 +0400 | [diff] [blame] | 715 | { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 716 | { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 717 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 718 | { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) }, |
| 719 | { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) }, |
| 720 | { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, |
Aleksandr Koltsoff | 8fffed8 | 2011-01-04 10:42:35 +0200 | [diff] [blame] | 721 | { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 722 | |
Chunhe Lan | a5b2d76 | 2012-06-19 10:55:08 +0800 | [diff] [blame] | 723 | { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) }, |
| 724 | |
Gabor Juhos | 37a23c20 | 2011-01-25 11:20:26 +0100 | [diff] [blame] | 725 | /* EON -- en25xxx */ |
| 726 | { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, |
Gabor Juhos | 60845e7 | 2010-08-04 21:14:25 +0200 | [diff] [blame] | 727 | { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, |
Shaohui Xie | 86a9893 | 2011-09-30 15:08:38 +0800 | [diff] [blame] | 728 | { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, |
Gabor Juhos | 60845e7 | 2010-08-04 21:14:25 +0200 | [diff] [blame] | 729 | { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, |
Gabor Juhos | 58d864e | 2012-08-26 10:37:31 +0200 | [diff] [blame] | 730 | { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, |
Gabor Juhos | 60845e7 | 2010-08-04 21:14:25 +0200 | [diff] [blame] | 731 | |
Marek Vasut | 5ca11ca | 2012-05-01 04:04:00 +0200 | [diff] [blame] | 732 | /* Everspin */ |
| 733 | { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) }, |
| 734 | |
Michel Stempin | 55bf75b | 2013-01-06 00:39:36 +0100 | [diff] [blame^] | 735 | /* GigaDevice */ |
| 736 | { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) }, |
| 737 | { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) }, |
| 738 | |
Gabor Juhos | f80e521 | 2010-08-05 16:58:36 +0200 | [diff] [blame] | 739 | /* Intel/Numonyx -- xxxs33b */ |
| 740 | { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, |
| 741 | { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, |
| 742 | { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, |
Alexandre Pereira da Silva | 95c1b0c | 2012-06-12 16:55:15 -0300 | [diff] [blame] | 743 | { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) }, |
Gabor Juhos | f80e521 | 2010-08-05 16:58:36 +0200 | [diff] [blame] | 744 | |
Lennert Buytenhek | ab1ff21 | 2009-05-20 13:07:11 +0200 | [diff] [blame] | 745 | /* Macronix */ |
John Crispin | bb08bc1 | 2012-04-30 19:30:45 +0200 | [diff] [blame] | 746 | { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) }, |
Simon Guinot | df0094d | 2009-12-05 15:28:00 +0100 | [diff] [blame] | 747 | { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) }, |
Martin Michlmayr | 6175f4a | 2010-06-07 19:31:01 +0100 | [diff] [blame] | 748 | { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) }, |
Gabor Juhos | 9c76b4e | 2011-03-25 08:48:52 +0100 | [diff] [blame] | 749 | { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 750 | { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) }, |
| 751 | { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) }, |
| 752 | { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, |
| 753 | { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 754 | { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, |
Kevin Cernekee | ac622f5 | 2010-10-30 21:11:04 -0700 | [diff] [blame] | 755 | { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, |
Lennert Buytenhek | ab1ff21 | 2009-05-20 13:07:11 +0200 | [diff] [blame] | 756 | |
Vivien Didelot | 8da2868 | 2012-08-14 15:24:07 -0400 | [diff] [blame] | 757 | /* Micron */ |
Liming Wang | 98a9e24 | 2012-11-22 14:58:09 +0800 | [diff] [blame] | 758 | { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) }, |
| 759 | { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) }, |
Vivien Didelot | 8da2868 | 2012-08-14 15:24:07 -0400 | [diff] [blame] | 760 | { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) }, |
| 761 | |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 762 | /* Spansion -- single (large) sector size only, at least |
| 763 | * for the chips listed here (without boot sectors). |
| 764 | */ |
Marek Vasut | b277f77 | 2012-09-04 05:31:36 +0200 | [diff] [blame] | 765 | { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, 0) }, |
| 766 | { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, 0) }, |
Kevin Cernekee | baa9ae3 | 2011-05-08 10:48:01 -0700 | [diff] [blame] | 767 | { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) }, |
| 768 | { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, 0) }, |
Kevin Cernekee | 3d2d2b6 | 2011-05-08 10:48:02 -0700 | [diff] [blame] | 769 | { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, 0) }, |
| 770 | { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 771 | { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, |
| 772 | { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, |
| 773 | { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) }, |
| 774 | { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) }, |
Marek Vasut | 8bb8b85 | 2012-07-06 08:10:26 +0200 | [diff] [blame] | 775 | { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) }, |
| 776 | { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) }, |
| 777 | { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) }, |
| 778 | { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) }, |
| 779 | { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) }, |
Gernot Hoyler | f2df1ae | 2010-09-02 17:27:20 +0200 | [diff] [blame] | 780 | { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) }, |
| 781 | { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 782 | |
| 783 | /* SST -- large erase sizes are "overlays", "sectors" are 4K */ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 784 | { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) }, |
| 785 | { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K) }, |
| 786 | { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K) }, |
| 787 | { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K) }, |
| 788 | { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K) }, |
| 789 | { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K) }, |
| 790 | { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K) }, |
| 791 | { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 792 | |
| 793 | /* ST Microelectronics -- newer production may have feature updates */ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 794 | { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) }, |
| 795 | { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) }, |
| 796 | { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) }, |
| 797 | { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) }, |
| 798 | { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) }, |
| 799 | { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) }, |
| 800 | { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) }, |
| 801 | { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) }, |
| 802 | { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) }, |
Knut Wohlrab | 4800399 | 2012-07-17 15:45:53 +0200 | [diff] [blame] | 803 | { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 804 | |
Anton Vorontsov | f7b0009 | 2010-06-22 20:57:34 +0400 | [diff] [blame] | 805 | { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) }, |
| 806 | { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) }, |
| 807 | { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) }, |
| 808 | { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) }, |
| 809 | { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) }, |
| 810 | { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) }, |
| 811 | { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) }, |
| 812 | { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) }, |
| 813 | { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) }, |
| 814 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 815 | { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) }, |
| 816 | { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) }, |
| 817 | { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 818 | |
Alexandre Pereira da Silva | 943b35a | 2012-06-12 16:42:40 -0300 | [diff] [blame] | 819 | { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 820 | { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, |
| 821 | { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 822 | |
Kevin Cernekee | 16004f3 | 2011-05-08 10:47:59 -0700 | [diff] [blame] | 823 | { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) }, |
| 824 | { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) }, |
| 825 | { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) }, |
| 826 | { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) }, |
Yoshihiro Shimoda | d8f90b2 | 2011-02-09 17:00:33 +0900 | [diff] [blame] | 827 | |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 828 | /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 829 | { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, |
| 830 | { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) }, |
| 831 | { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) }, |
| 832 | { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, |
| 833 | { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, |
| 834 | { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, |
Gabor Juhos | 0af18d2 | 2010-08-04 21:14:27 +0200 | [diff] [blame] | 835 | { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, |
ing. Federico Fuga | 9d6367f | 2012-06-05 17:37:01 +0200 | [diff] [blame] | 836 | { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 837 | { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, |
Thierry Reding | d2ac467 | 2010-08-30 13:00:48 +0200 | [diff] [blame] | 838 | { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, |
Thomas Abraham | 4fba37a | 2012-05-09 04:04:54 +0530 | [diff] [blame] | 839 | { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, |
Stephen Warren | 9b7ef60 | 2012-11-12 12:58:28 -0700 | [diff] [blame] | 840 | { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, |
Matthieu CASTET | 0aa87b7 | 2012-09-25 11:05:27 +0200 | [diff] [blame] | 841 | { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) }, |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 842 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 843 | /* Catalyst / On Semiconductor -- non-JEDEC */ |
| 844 | { "cat25c11", CAT25_INFO( 16, 8, 16, 1) }, |
| 845 | { "cat25c03", CAT25_INFO( 32, 8, 16, 2) }, |
| 846 | { "cat25c09", CAT25_INFO( 128, 8, 32, 2) }, |
| 847 | { "cat25c17", CAT25_INFO( 256, 8, 32, 2) }, |
| 848 | { "cat25128", CAT25_INFO(2048, 8, 64, 2) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 849 | { }, |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 850 | }; |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 851 | MODULE_DEVICE_TABLE(spi, m25p_ids); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 852 | |
Bill Pemberton | 06f2551 | 2012-11-19 13:23:07 -0500 | [diff] [blame] | 853 | static const struct spi_device_id *jedec_probe(struct spi_device *spi) |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 854 | { |
| 855 | int tmp; |
| 856 | u8 code = OPCODE_RDID; |
Chen Gong | daa8473 | 2008-09-16 14:14:12 +0800 | [diff] [blame] | 857 | u8 id[5]; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 858 | u32 jedec; |
Chen Gong | d0e8c47 | 2008-08-11 16:59:15 +0800 | [diff] [blame] | 859 | u16 ext_jedec; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 860 | struct flash_info *info; |
| 861 | |
| 862 | /* JEDEC also defines an optional "extended device information" |
| 863 | * string for after vendor-specific data, after the three bytes |
| 864 | * we use here. Supporting some chips might require using it. |
| 865 | */ |
Chen Gong | daa8473 | 2008-09-16 14:14:12 +0800 | [diff] [blame] | 866 | tmp = spi_write_then_read(spi, &code, 1, id, 5); |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 867 | if (tmp < 0) { |
Brian Norris | 289c052 | 2011-07-19 10:06:09 -0700 | [diff] [blame] | 868 | pr_debug("%s: error %d reading JEDEC ID\n", |
Brian Norris | 0a32a10 | 2011-07-19 10:06:10 -0700 | [diff] [blame] | 869 | dev_name(&spi->dev), tmp); |
Anton Vorontsov | 9d2c4f3 | 2010-06-22 20:57:42 +0400 | [diff] [blame] | 870 | return ERR_PTR(tmp); |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 871 | } |
| 872 | jedec = id[0]; |
| 873 | jedec = jedec << 8; |
| 874 | jedec |= id[1]; |
| 875 | jedec = jedec << 8; |
| 876 | jedec |= id[2]; |
| 877 | |
Chen Gong | d0e8c47 | 2008-08-11 16:59:15 +0800 | [diff] [blame] | 878 | ext_jedec = id[3] << 8 | id[4]; |
| 879 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 880 | for (tmp = 0; tmp < ARRAY_SIZE(m25p_ids) - 1; tmp++) { |
| 881 | info = (void *)m25p_ids[tmp].driver_data; |
Mike Frysinger | a3d3f73 | 2008-11-26 10:23:25 +0000 | [diff] [blame] | 882 | if (info->jedec_id == jedec) { |
Mike Frysinger | 9168ab8 | 2008-11-26 10:23:35 +0000 | [diff] [blame] | 883 | if (info->ext_id != 0 && info->ext_id != ext_jedec) |
Chen Gong | d0e8c47 | 2008-08-11 16:59:15 +0800 | [diff] [blame] | 884 | continue; |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 885 | return &m25p_ids[tmp]; |
Mike Frysinger | a3d3f73 | 2008-11-26 10:23:25 +0000 | [diff] [blame] | 886 | } |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 887 | } |
Kevin Cernekee | f0dff9b | 2010-10-30 21:11:02 -0700 | [diff] [blame] | 888 | dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec); |
Anton Vorontsov | 9d2c4f3 | 2010-06-22 20:57:42 +0400 | [diff] [blame] | 889 | return ERR_PTR(-ENODEV); |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 890 | } |
| 891 | |
| 892 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 893 | /* |
| 894 | * board specific setup should have ensured the SPI clock used here |
| 895 | * matches what the READ command supports, at least until this driver |
| 896 | * understands FAST_READ (for clocks over 25 MHz). |
| 897 | */ |
Bill Pemberton | 06f2551 | 2012-11-19 13:23:07 -0500 | [diff] [blame] | 898 | static int m25p_probe(struct spi_device *spi) |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 899 | { |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 900 | const struct spi_device_id *id = spi_get_device_id(spi); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 901 | struct flash_platform_data *data; |
| 902 | struct m25p *flash; |
| 903 | struct flash_info *info; |
| 904 | unsigned i; |
Dmitry Eremin-Solenikov | ea6a472 | 2011-05-30 01:02:20 +0400 | [diff] [blame] | 905 | struct mtd_part_parser_data ppdata; |
Marek Vasut | 12ad2be | 2012-09-24 03:39:39 +0200 | [diff] [blame] | 906 | struct device_node __maybe_unused *np = spi->dev.of_node; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 907 | |
Shaohui Xie | 5f94913 | 2011-10-14 15:49:00 +0800 | [diff] [blame] | 908 | #ifdef CONFIG_MTD_OF_PARTS |
Marek Vasut | 12ad2be | 2012-09-24 03:39:39 +0200 | [diff] [blame] | 909 | if (!of_device_is_available(np)) |
Shaohui Xie | 5f94913 | 2011-10-14 15:49:00 +0800 | [diff] [blame] | 910 | return -ENODEV; |
| 911 | #endif |
| 912 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 913 | /* Platform data helps sort out which chip type we have, as |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 914 | * well as how this board partitions it. If we don't have |
| 915 | * a chip ID, try the JEDEC id commands; they'll work for most |
| 916 | * newer chips, even if we don't recognize the particular chip. |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 917 | */ |
| 918 | data = spi->dev.platform_data; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 919 | if (data && data->type) { |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 920 | const struct spi_device_id *plat_id; |
| 921 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 922 | for (i = 0; i < ARRAY_SIZE(m25p_ids) - 1; i++) { |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 923 | plat_id = &m25p_ids[i]; |
| 924 | if (strcmp(data->type, plat_id->name)) |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 925 | continue; |
| 926 | break; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 927 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 928 | |
Dan Carpenter | f78ec6b | 2010-08-12 09:58:27 +0200 | [diff] [blame] | 929 | if (i < ARRAY_SIZE(m25p_ids) - 1) |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 930 | id = plat_id; |
| 931 | else |
| 932 | dev_warn(&spi->dev, "unrecognized id %s\n", data->type); |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 933 | } |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 934 | |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 935 | info = (void *)id->driver_data; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 936 | |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 937 | if (info->jedec_id) { |
| 938 | const struct spi_device_id *jid; |
| 939 | |
| 940 | jid = jedec_probe(spi); |
Anton Vorontsov | 9d2c4f3 | 2010-06-22 20:57:42 +0400 | [diff] [blame] | 941 | if (IS_ERR(jid)) { |
| 942 | return PTR_ERR(jid); |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 943 | } else if (jid != id) { |
| 944 | /* |
| 945 | * JEDEC knows better, so overwrite platform ID. We |
| 946 | * can't trust partitions any longer, but we'll let |
| 947 | * mtd apply them anyway, since some partitions may be |
| 948 | * marked read-only, and we don't want to lose that |
| 949 | * information, even if it's not 100% accurate. |
| 950 | */ |
| 951 | dev_warn(&spi->dev, "found %s, expected %s\n", |
| 952 | jid->name, id->name); |
| 953 | id = jid; |
| 954 | info = (void *)jid->driver_data; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 955 | } |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 956 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 957 | |
Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 958 | flash = kzalloc(sizeof *flash, GFP_KERNEL); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 959 | if (!flash) |
| 960 | return -ENOMEM; |
Marek Vasut | 12ad2be | 2012-09-24 03:39:39 +0200 | [diff] [blame] | 961 | flash->command = kmalloc(MAX_CMD_SIZE + (flash->fast_read ? 1 : 0), |
| 962 | GFP_KERNEL); |
Johannes Stezenbach | 61c3506 | 2009-10-28 14:21:37 +0100 | [diff] [blame] | 963 | if (!flash->command) { |
| 964 | kfree(flash); |
| 965 | return -ENOMEM; |
| 966 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 967 | |
| 968 | flash->spi = spi; |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 969 | mutex_init(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 970 | dev_set_drvdata(&spi->dev, flash); |
| 971 | |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 972 | /* |
Gabor Juhos | f80e521 | 2010-08-05 16:58:36 +0200 | [diff] [blame] | 973 | * Atmel, SST and Intel/Numonyx serial flash tend to power |
Graf Yang | ea60658a | 2009-09-24 15:46:22 -0400 | [diff] [blame] | 974 | * up with the software protection bits set |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 975 | */ |
| 976 | |
Kevin Cernekee | aa08465 | 2011-05-08 10:48:00 -0700 | [diff] [blame] | 977 | if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ATMEL || |
| 978 | JEDEC_MFR(info->jedec_id) == CFI_MFR_INTEL || |
| 979 | JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) { |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 980 | write_enable(flash); |
| 981 | write_sr(flash, 0); |
| 982 | } |
| 983 | |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 984 | if (data && data->name) |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 985 | flash->mtd.name = data->name; |
| 986 | else |
Kay Sievers | 160bbab | 2008-12-23 10:00:14 +0000 | [diff] [blame] | 987 | flash->mtd.name = dev_name(&spi->dev); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 988 | |
| 989 | flash->mtd.type = MTD_NORFLASH; |
Artem B. Bityutskiy | 783ed81 | 2006-06-14 19:53:44 +0400 | [diff] [blame] | 990 | flash->mtd.writesize = 1; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 991 | flash->mtd.flags = MTD_CAP_NORFLASH; |
| 992 | flash->mtd.size = info->sector_size * info->n_sectors; |
Artem Bityutskiy | 3c3c10b | 2012-01-30 14:58:32 +0200 | [diff] [blame] | 993 | flash->mtd._erase = m25p80_erase; |
| 994 | flash->mtd._read = m25p80_read; |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 995 | |
Austin Boyle | 972e1b7 | 2013-01-04 13:02:28 +1300 | [diff] [blame] | 996 | /* flash protection support for STmicro chips */ |
| 997 | if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) { |
| 998 | flash->mtd._lock = m25p80_lock; |
| 999 | flash->mtd._unlock = m25p80_unlock; |
| 1000 | } |
| 1001 | |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 1002 | /* sst flash chips use AAI word program */ |
Kevin Cernekee | aa08465 | 2011-05-08 10:48:00 -0700 | [diff] [blame] | 1003 | if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) |
Artem Bityutskiy | 3c3c10b | 2012-01-30 14:58:32 +0200 | [diff] [blame] | 1004 | flash->mtd._write = sst_write; |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 1005 | else |
Artem Bityutskiy | 3c3c10b | 2012-01-30 14:58:32 +0200 | [diff] [blame] | 1006 | flash->mtd._write = m25p80_write; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1007 | |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1008 | /* prefer "small sector" erase if possible */ |
| 1009 | if (info->flags & SECT_4K) { |
| 1010 | flash->erase_opcode = OPCODE_BE_4K; |
| 1011 | flash->mtd.erasesize = 4096; |
| 1012 | } else { |
| 1013 | flash->erase_opcode = OPCODE_SE; |
| 1014 | flash->mtd.erasesize = info->sector_size; |
| 1015 | } |
| 1016 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 1017 | if (info->flags & M25P_NO_ERASE) |
| 1018 | flash->mtd.flags |= MTD_NO_ERASE; |
David Brownell | 87f39f0 | 2009-03-26 00:42:50 -0700 | [diff] [blame] | 1019 | |
Dmitry Eremin-Solenikov | ea6a472 | 2011-05-30 01:02:20 +0400 | [diff] [blame] | 1020 | ppdata.of_node = spi->dev.of_node; |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 1021 | flash->mtd.dev.parent = &spi->dev; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 1022 | flash->page_size = info->page_size; |
Brian Norris | b54f47c | 2012-01-31 00:06:03 -0800 | [diff] [blame] | 1023 | flash->mtd.writebufsize = flash->page_size; |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 1024 | |
Marek Vasut | 12ad2be | 2012-09-24 03:39:39 +0200 | [diff] [blame] | 1025 | flash->fast_read = false; |
| 1026 | #ifdef CONFIG_OF |
| 1027 | if (np && of_property_read_bool(np, "m25p,fast-read")) |
| 1028 | flash->fast_read = true; |
| 1029 | #endif |
| 1030 | |
| 1031 | #ifdef CONFIG_M25PXX_USE_FAST_READ |
| 1032 | flash->fast_read = true; |
| 1033 | #endif |
| 1034 | |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 1035 | if (info->addr_width) |
| 1036 | flash->addr_width = info->addr_width; |
| 1037 | else { |
| 1038 | /* enable 4-byte addressing if the device exceeds 16MiB */ |
| 1039 | if (flash->mtd.size > 0x1000000) { |
| 1040 | flash->addr_width = 4; |
Kevin Cernekee | baa9ae3 | 2011-05-08 10:48:01 -0700 | [diff] [blame] | 1041 | set_4byte(flash, info->jedec_id, 1); |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 1042 | } else |
| 1043 | flash->addr_width = 3; |
| 1044 | } |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 1045 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 1046 | dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name, |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1047 | (long long)flash->mtd.size >> 10); |
| 1048 | |
Brian Norris | 289c052 | 2011-07-19 10:06:09 -0700 | [diff] [blame] | 1049 | pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) " |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 1050 | ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1051 | flash->mtd.name, |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 1052 | (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1053 | flash->mtd.erasesize, flash->mtd.erasesize / 1024, |
| 1054 | flash->mtd.numeraseregions); |
| 1055 | |
| 1056 | if (flash->mtd.numeraseregions) |
| 1057 | for (i = 0; i < flash->mtd.numeraseregions; i++) |
Brian Norris | 289c052 | 2011-07-19 10:06:09 -0700 | [diff] [blame] | 1058 | pr_debug("mtd.eraseregions[%d] = { .offset = 0x%llx, " |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 1059 | ".erasesize = 0x%.8x (%uKiB), " |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1060 | ".numblocks = %d }\n", |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 1061 | i, (long long)flash->mtd.eraseregions[i].offset, |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1062 | flash->mtd.eraseregions[i].erasesize, |
| 1063 | flash->mtd.eraseregions[i].erasesize / 1024, |
| 1064 | flash->mtd.eraseregions[i].numblocks); |
| 1065 | |
| 1066 | |
| 1067 | /* partitions should match sector boundaries; and it may be good to |
| 1068 | * use readonly partitions for writeprotected sectors (BP2..BP0). |
| 1069 | */ |
Dmitry Eremin-Solenikov | 871770b | 2011-06-02 17:59:16 +0400 | [diff] [blame] | 1070 | return mtd_device_parse_register(&flash->mtd, NULL, &ppdata, |
| 1071 | data ? data->parts : NULL, |
| 1072 | data ? data->nr_parts : 0); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1073 | } |
| 1074 | |
| 1075 | |
Bill Pemberton | 810b7e0 | 2012-11-19 13:26:04 -0500 | [diff] [blame] | 1076 | static int m25p_remove(struct spi_device *spi) |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1077 | { |
| 1078 | struct m25p *flash = dev_get_drvdata(&spi->dev); |
| 1079 | int status; |
| 1080 | |
| 1081 | /* Clean up MTD stuff. */ |
Jamie Iles | ba52f3a | 2011-05-23 10:22:57 +0100 | [diff] [blame] | 1082 | status = mtd_device_unregister(&flash->mtd); |
Johannes Stezenbach | 61c3506 | 2009-10-28 14:21:37 +0100 | [diff] [blame] | 1083 | if (status == 0) { |
| 1084 | kfree(flash->command); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1085 | kfree(flash); |
Johannes Stezenbach | 61c3506 | 2009-10-28 14:21:37 +0100 | [diff] [blame] | 1086 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1087 | return 0; |
| 1088 | } |
| 1089 | |
| 1090 | |
| 1091 | static struct spi_driver m25p80_driver = { |
| 1092 | .driver = { |
| 1093 | .name = "m25p80", |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1094 | .owner = THIS_MODULE, |
| 1095 | }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 1096 | .id_table = m25p_ids, |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1097 | .probe = m25p_probe, |
Bill Pemberton | 5153b88 | 2012-11-19 13:21:24 -0500 | [diff] [blame] | 1098 | .remove = m25p_remove, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1099 | |
| 1100 | /* REVISIT: many of these chips have deep power-down modes, which |
| 1101 | * should clearly be entered on suspend() to minimize power use. |
| 1102 | * And also when they're otherwise idle... |
| 1103 | */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1104 | }; |
| 1105 | |
Axel Lin | c9d1b75 | 2012-01-27 15:45:20 +0800 | [diff] [blame] | 1106 | module_spi_driver(m25p80_driver); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1107 | |
| 1108 | MODULE_LICENSE("GPL"); |
| 1109 | MODULE_AUTHOR("Mike Lavender"); |
| 1110 | MODULE_DESCRIPTION("MTD SPI driver for ST M25Pxx flash chips"); |