Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1 | /* |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 2 | * MTD SPI driver for ST M25Pxx (and similar) serial flash chips |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 3 | * |
| 4 | * Author: Mike Lavender, mike@steroidmicros.com |
| 5 | * |
| 6 | * Copyright (c) 2005, Intec Automation Inc. |
| 7 | * |
| 8 | * Some parts are based on lart.c by Abraham Van Der Merwe |
| 9 | * |
| 10 | * Cleaned up and generalized based on mtd_dataflash.c |
| 11 | * |
| 12 | * This code is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of the GNU General Public License version 2 as |
| 14 | * published by the Free Software Foundation. |
| 15 | * |
| 16 | */ |
| 17 | |
| 18 | #include <linux/init.h> |
Anton Vorontsov | 9d2c4f3 | 2010-06-22 20:57:42 +0400 | [diff] [blame] | 19 | #include <linux/err.h> |
| 20 | #include <linux/errno.h> |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 21 | #include <linux/module.h> |
| 22 | #include <linux/device.h> |
| 23 | #include <linux/interrupt.h> |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 24 | #include <linux/mutex.h> |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 25 | #include <linux/math64.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 26 | #include <linux/slab.h> |
Alexey Dobriyan | d43c36d | 2009-10-07 17:09:06 +0400 | [diff] [blame] | 27 | #include <linux/sched.h> |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 28 | #include <linux/mod_devicetable.h> |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 29 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 30 | #include <linux/mtd/mtd.h> |
| 31 | #include <linux/mtd/partitions.h> |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 32 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 33 | #include <linux/spi/spi.h> |
| 34 | #include <linux/spi/flash.h> |
| 35 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 36 | /* Flash opcodes. */ |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 37 | #define OPCODE_WREN 0x06 /* Write enable */ |
| 38 | #define OPCODE_RDSR 0x05 /* Read status register */ |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 39 | #define OPCODE_WRSR 0x01 /* Write status register 1 byte */ |
Bryan Wu | 2230b76 | 2008-04-25 12:07:32 +0800 | [diff] [blame] | 40 | #define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */ |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 41 | #define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */ |
| 42 | #define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */ |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 43 | #define OPCODE_BE_4K 0x20 /* Erase 4KiB block */ |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 44 | #define OPCODE_BE_32K 0x52 /* Erase 32KiB block */ |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 45 | #define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */ |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 46 | #define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 47 | #define OPCODE_RDID 0x9f /* Read JEDEC ID */ |
| 48 | |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 49 | /* Used for SST flashes only. */ |
| 50 | #define OPCODE_BP 0x02 /* Byte program */ |
| 51 | #define OPCODE_WRDI 0x04 /* Write disable */ |
| 52 | #define OPCODE_AAI_WP 0xad /* Auto address increment word program */ |
| 53 | |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 54 | /* Used for Macronix flashes only. */ |
| 55 | #define OPCODE_EN4B 0xb7 /* Enter 4-byte mode */ |
| 56 | #define OPCODE_EX4B 0xe9 /* Exit 4-byte mode */ |
| 57 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 58 | /* Status Register bits. */ |
| 59 | #define SR_WIP 1 /* Write in progress */ |
| 60 | #define SR_WEL 2 /* Write enable latch */ |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 61 | /* meaning of other SR_* bits may differ between vendors */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 62 | #define SR_BP0 4 /* Block protect 0 */ |
| 63 | #define SR_BP1 8 /* Block protect 1 */ |
| 64 | #define SR_BP2 0x10 /* Block protect 2 */ |
| 65 | #define SR_SRWD 0x80 /* SR write protect */ |
| 66 | |
| 67 | /* Define max times to check status register before we give up. */ |
Steven A. Falco | 89bb871 | 2009-06-26 12:42:47 -0400 | [diff] [blame] | 68 | #define MAX_READY_WAIT_JIFFIES (40 * HZ) /* M25P16 specs 40s max chip erase */ |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 69 | #define MAX_CMD_SIZE 5 |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 70 | |
Bryan Wu | 2230b76 | 2008-04-25 12:07:32 +0800 | [diff] [blame] | 71 | #ifdef CONFIG_M25PXX_USE_FAST_READ |
| 72 | #define OPCODE_READ OPCODE_FAST_READ |
| 73 | #define FAST_READ_DUMMY_BYTE 1 |
| 74 | #else |
| 75 | #define OPCODE_READ OPCODE_NORM_READ |
| 76 | #define FAST_READ_DUMMY_BYTE 0 |
| 77 | #endif |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 78 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 79 | /****************************************************************************/ |
| 80 | |
| 81 | struct m25p { |
| 82 | struct spi_device *spi; |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 83 | struct mutex lock; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 84 | struct mtd_info mtd; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 85 | unsigned partitioned:1; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 86 | u16 page_size; |
| 87 | u16 addr_width; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 88 | u8 erase_opcode; |
Johannes Stezenbach | 61c3506 | 2009-10-28 14:21:37 +0100 | [diff] [blame] | 89 | u8 *command; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 90 | }; |
| 91 | |
| 92 | static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd) |
| 93 | { |
| 94 | return container_of(mtd, struct m25p, mtd); |
| 95 | } |
| 96 | |
| 97 | /****************************************************************************/ |
| 98 | |
| 99 | /* |
| 100 | * Internal helper functions |
| 101 | */ |
| 102 | |
| 103 | /* |
| 104 | * Read the status register, returning its value in the location |
| 105 | * Return the status register value. |
| 106 | * Returns negative if error occurred. |
| 107 | */ |
| 108 | static int read_sr(struct m25p *flash) |
| 109 | { |
| 110 | ssize_t retval; |
| 111 | u8 code = OPCODE_RDSR; |
| 112 | u8 val; |
| 113 | |
| 114 | retval = spi_write_then_read(flash->spi, &code, 1, &val, 1); |
| 115 | |
| 116 | if (retval < 0) { |
| 117 | dev_err(&flash->spi->dev, "error %d reading SR\n", |
| 118 | (int) retval); |
| 119 | return retval; |
| 120 | } |
| 121 | |
| 122 | return val; |
| 123 | } |
| 124 | |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 125 | /* |
| 126 | * Write status register 1 byte |
| 127 | * Returns negative if error occurred. |
| 128 | */ |
| 129 | static int write_sr(struct m25p *flash, u8 val) |
| 130 | { |
| 131 | flash->command[0] = OPCODE_WRSR; |
| 132 | flash->command[1] = val; |
| 133 | |
| 134 | return spi_write(flash->spi, flash->command, 2); |
| 135 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 136 | |
| 137 | /* |
| 138 | * Set write enable latch with Write Enable command. |
| 139 | * Returns negative if error occurred. |
| 140 | */ |
| 141 | static inline int write_enable(struct m25p *flash) |
| 142 | { |
| 143 | u8 code = OPCODE_WREN; |
| 144 | |
David Woodhouse | 8a1a627 | 2008-10-20 09:26:16 +0100 | [diff] [blame] | 145 | return spi_write_then_read(flash->spi, &code, 1, NULL, 0); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 146 | } |
| 147 | |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 148 | /* |
| 149 | * Send write disble instruction to the chip. |
| 150 | */ |
| 151 | static inline int write_disable(struct m25p *flash) |
| 152 | { |
| 153 | u8 code = OPCODE_WRDI; |
| 154 | |
| 155 | return spi_write_then_read(flash->spi, &code, 1, NULL, 0); |
| 156 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 157 | |
| 158 | /* |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 159 | * Enable/disable 4-byte addressing mode. |
| 160 | */ |
| 161 | static inline int set_4byte(struct m25p *flash, int enable) |
| 162 | { |
| 163 | u8 code = enable ? OPCODE_EN4B : OPCODE_EX4B; |
| 164 | |
| 165 | return spi_write_then_read(flash->spi, &code, 1, NULL, 0); |
| 166 | } |
| 167 | |
| 168 | /* |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 169 | * Service routine to read status register until ready, or timeout occurs. |
| 170 | * Returns non-zero if error. |
| 171 | */ |
| 172 | static int wait_till_ready(struct m25p *flash) |
| 173 | { |
Peter Horton | cd1a6de | 2009-05-08 13:51:53 +0100 | [diff] [blame] | 174 | unsigned long deadline; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 175 | int sr; |
| 176 | |
Peter Horton | cd1a6de | 2009-05-08 13:51:53 +0100 | [diff] [blame] | 177 | deadline = jiffies + MAX_READY_WAIT_JIFFIES; |
| 178 | |
| 179 | do { |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 180 | if ((sr = read_sr(flash)) < 0) |
| 181 | break; |
| 182 | else if (!(sr & SR_WIP)) |
| 183 | return 0; |
| 184 | |
Peter Horton | cd1a6de | 2009-05-08 13:51:53 +0100 | [diff] [blame] | 185 | cond_resched(); |
| 186 | |
| 187 | } while (!time_after_eq(jiffies, deadline)); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 188 | |
| 189 | return 1; |
| 190 | } |
| 191 | |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 192 | /* |
| 193 | * Erase the whole flash memory |
| 194 | * |
| 195 | * Returns 0 if successful, non-zero otherwise. |
| 196 | */ |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 197 | static int erase_chip(struct m25p *flash) |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 198 | { |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 199 | DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %lldKiB\n", |
Kay Sievers | 160bbab | 2008-12-23 10:00:14 +0000 | [diff] [blame] | 200 | dev_name(&flash->spi->dev), __func__, |
| 201 | (long long)(flash->mtd.size >> 10)); |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 202 | |
| 203 | /* Wait until finished previous write command. */ |
| 204 | if (wait_till_ready(flash)) |
| 205 | return 1; |
| 206 | |
| 207 | /* Send write enable, then erase commands. */ |
| 208 | write_enable(flash); |
| 209 | |
| 210 | /* Set up command buffer. */ |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 211 | flash->command[0] = OPCODE_CHIP_ERASE; |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 212 | |
| 213 | spi_write(flash->spi, flash->command, 1); |
| 214 | |
| 215 | return 0; |
| 216 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 217 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 218 | static void m25p_addr2cmd(struct m25p *flash, unsigned int addr, u8 *cmd) |
| 219 | { |
| 220 | /* opcode is in cmd[0] */ |
| 221 | cmd[1] = addr >> (flash->addr_width * 8 - 8); |
| 222 | cmd[2] = addr >> (flash->addr_width * 8 - 16); |
| 223 | cmd[3] = addr >> (flash->addr_width * 8 - 24); |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 224 | cmd[4] = addr >> (flash->addr_width * 8 - 32); |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 225 | } |
| 226 | |
| 227 | static int m25p_cmdsz(struct m25p *flash) |
| 228 | { |
| 229 | return 1 + flash->addr_width; |
| 230 | } |
| 231 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 232 | /* |
| 233 | * Erase one sector of flash memory at offset ``offset'' which is any |
| 234 | * address within the sector which should be erased. |
| 235 | * |
| 236 | * Returns 0 if successful, non-zero otherwise. |
| 237 | */ |
| 238 | static int erase_sector(struct m25p *flash, u32 offset) |
| 239 | { |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 240 | DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n", |
Kay Sievers | 160bbab | 2008-12-23 10:00:14 +0000 | [diff] [blame] | 241 | dev_name(&flash->spi->dev), __func__, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 242 | flash->mtd.erasesize / 1024, offset); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 243 | |
| 244 | /* Wait until finished previous write command. */ |
| 245 | if (wait_till_ready(flash)) |
| 246 | return 1; |
| 247 | |
| 248 | /* Send write enable, then erase commands. */ |
| 249 | write_enable(flash); |
| 250 | |
| 251 | /* Set up command buffer. */ |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 252 | flash->command[0] = flash->erase_opcode; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 253 | m25p_addr2cmd(flash, offset, flash->command); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 254 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 255 | spi_write(flash->spi, flash->command, m25p_cmdsz(flash)); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 256 | |
| 257 | return 0; |
| 258 | } |
| 259 | |
| 260 | /****************************************************************************/ |
| 261 | |
| 262 | /* |
| 263 | * MTD implementation |
| 264 | */ |
| 265 | |
| 266 | /* |
| 267 | * Erase an address range on the flash chip. The address range may extend |
| 268 | * one or more erase sectors. Return an error is there is a problem erasing. |
| 269 | */ |
| 270 | static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr) |
| 271 | { |
| 272 | struct m25p *flash = mtd_to_m25p(mtd); |
| 273 | u32 addr,len; |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 274 | uint32_t rem; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 275 | |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 276 | DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%llx, len %lld\n", |
Kay Sievers | 160bbab | 2008-12-23 10:00:14 +0000 | [diff] [blame] | 277 | dev_name(&flash->spi->dev), __func__, "at", |
| 278 | (long long)instr->addr, (long long)instr->len); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 279 | |
| 280 | /* sanity checks */ |
| 281 | if (instr->addr + instr->len > flash->mtd.size) |
| 282 | return -EINVAL; |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 283 | div_u64_rem(instr->len, mtd->erasesize, &rem); |
| 284 | if (rem) |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 285 | return -EINVAL; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 286 | |
| 287 | addr = instr->addr; |
| 288 | len = instr->len; |
| 289 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 290 | mutex_lock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 291 | |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 292 | /* whole-chip erase? */ |
Steven A. Falco | 3f33b0a | 2009-04-27 17:10:10 -0400 | [diff] [blame] | 293 | if (len == flash->mtd.size) { |
| 294 | if (erase_chip(flash)) { |
| 295 | instr->state = MTD_ERASE_FAILED; |
| 296 | mutex_unlock(&flash->lock); |
| 297 | return -EIO; |
| 298 | } |
Chen Gong | 7854643 | 2008-11-26 10:23:57 +0000 | [diff] [blame] | 299 | |
| 300 | /* REVISIT in some cases we could speed up erasing large regions |
| 301 | * by using OPCODE_SE instead of OPCODE_BE_4K. We may have set up |
| 302 | * to use "small sector erase", but that's not always optimal. |
| 303 | */ |
| 304 | |
| 305 | /* "sector"-at-a-time erase */ |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 306 | } else { |
| 307 | while (len) { |
| 308 | if (erase_sector(flash, addr)) { |
| 309 | instr->state = MTD_ERASE_FAILED; |
| 310 | mutex_unlock(&flash->lock); |
| 311 | return -EIO; |
| 312 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 313 | |
Chen Gong | faff375 | 2008-08-11 16:59:13 +0800 | [diff] [blame] | 314 | addr += mtd->erasesize; |
| 315 | len -= mtd->erasesize; |
| 316 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 317 | } |
| 318 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 319 | mutex_unlock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 320 | |
| 321 | instr->state = MTD_ERASE_DONE; |
| 322 | mtd_erase_callback(instr); |
| 323 | |
| 324 | return 0; |
| 325 | } |
| 326 | |
| 327 | /* |
| 328 | * Read an address range from the flash chip. The address range |
| 329 | * may be any size provided it is within the physical boundaries. |
| 330 | */ |
| 331 | static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len, |
| 332 | size_t *retlen, u_char *buf) |
| 333 | { |
| 334 | struct m25p *flash = mtd_to_m25p(mtd); |
| 335 | struct spi_transfer t[2]; |
| 336 | struct spi_message m; |
| 337 | |
| 338 | DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", |
Kay Sievers | 160bbab | 2008-12-23 10:00:14 +0000 | [diff] [blame] | 339 | dev_name(&flash->spi->dev), __func__, "from", |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 340 | (u32)from, len); |
| 341 | |
| 342 | /* sanity checks */ |
| 343 | if (!len) |
| 344 | return 0; |
| 345 | |
| 346 | if (from + len > flash->mtd.size) |
| 347 | return -EINVAL; |
| 348 | |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 349 | spi_message_init(&m); |
| 350 | memset(t, 0, (sizeof t)); |
| 351 | |
Bryan Wu | 2230b76 | 2008-04-25 12:07:32 +0800 | [diff] [blame] | 352 | /* NOTE: |
| 353 | * OPCODE_FAST_READ (if available) is faster. |
| 354 | * Should add 1 byte DUMMY_BYTE. |
| 355 | */ |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 356 | t[0].tx_buf = flash->command; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 357 | t[0].len = m25p_cmdsz(flash) + FAST_READ_DUMMY_BYTE; |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 358 | spi_message_add_tail(&t[0], &m); |
| 359 | |
| 360 | t[1].rx_buf = buf; |
| 361 | t[1].len = len; |
| 362 | spi_message_add_tail(&t[1], &m); |
| 363 | |
| 364 | /* Byte count starts at zero. */ |
Dan Carpenter | b06cd21 | 2010-08-12 09:53:52 +0200 | [diff] [blame] | 365 | *retlen = 0; |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 366 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 367 | mutex_lock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 368 | |
| 369 | /* Wait till previous write/erase is done. */ |
| 370 | if (wait_till_ready(flash)) { |
| 371 | /* REVISIT status return?? */ |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 372 | mutex_unlock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 373 | return 1; |
| 374 | } |
| 375 | |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 376 | /* FIXME switch to OPCODE_FAST_READ. It's required for higher |
| 377 | * clocks; and at this writing, every chip this driver handles |
| 378 | * supports that opcode. |
| 379 | */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 380 | |
| 381 | /* Set up the write data buffer. */ |
| 382 | flash->command[0] = OPCODE_READ; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 383 | m25p_addr2cmd(flash, from, flash->command); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 384 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 385 | spi_sync(flash->spi, &m); |
| 386 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 387 | *retlen = m.actual_length - m25p_cmdsz(flash) - FAST_READ_DUMMY_BYTE; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 388 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 389 | mutex_unlock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 390 | |
| 391 | return 0; |
| 392 | } |
| 393 | |
| 394 | /* |
| 395 | * Write an address range to the flash chip. Data must be written in |
| 396 | * FLASH_PAGESIZE chunks. The address range may be any size provided |
| 397 | * it is within the physical boundaries. |
| 398 | */ |
| 399 | static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len, |
| 400 | size_t *retlen, const u_char *buf) |
| 401 | { |
| 402 | struct m25p *flash = mtd_to_m25p(mtd); |
| 403 | u32 page_offset, page_size; |
| 404 | struct spi_transfer t[2]; |
| 405 | struct spi_message m; |
| 406 | |
| 407 | DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", |
Kay Sievers | 160bbab | 2008-12-23 10:00:14 +0000 | [diff] [blame] | 408 | dev_name(&flash->spi->dev), __func__, "to", |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 409 | (u32)to, len); |
| 410 | |
Dan Carpenter | b06cd21 | 2010-08-12 09:53:52 +0200 | [diff] [blame] | 411 | *retlen = 0; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 412 | |
| 413 | /* sanity checks */ |
| 414 | if (!len) |
| 415 | return(0); |
| 416 | |
| 417 | if (to + len > flash->mtd.size) |
| 418 | return -EINVAL; |
| 419 | |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 420 | spi_message_init(&m); |
| 421 | memset(t, 0, (sizeof t)); |
| 422 | |
| 423 | t[0].tx_buf = flash->command; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 424 | t[0].len = m25p_cmdsz(flash); |
Vitaly Wool | 8275c64 | 2006-01-08 13:34:28 -0800 | [diff] [blame] | 425 | spi_message_add_tail(&t[0], &m); |
| 426 | |
| 427 | t[1].tx_buf = buf; |
| 428 | spi_message_add_tail(&t[1], &m); |
| 429 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 430 | mutex_lock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 431 | |
| 432 | /* Wait until finished previous write command. */ |
Chen Gong | bc01886 | 2008-06-05 21:50:04 +0800 | [diff] [blame] | 433 | if (wait_till_ready(flash)) { |
| 434 | mutex_unlock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 435 | return 1; |
Chen Gong | bc01886 | 2008-06-05 21:50:04 +0800 | [diff] [blame] | 436 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 437 | |
| 438 | write_enable(flash); |
| 439 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 440 | /* Set up the opcode in the write buffer. */ |
| 441 | flash->command[0] = OPCODE_PP; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 442 | m25p_addr2cmd(flash, to, flash->command); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 443 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 444 | page_offset = to & (flash->page_size - 1); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 445 | |
| 446 | /* do all the bytes fit onto one page? */ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 447 | if (page_offset + len <= flash->page_size) { |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 448 | t[1].len = len; |
| 449 | |
| 450 | spi_sync(flash->spi, &m); |
| 451 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 452 | *retlen = m.actual_length - m25p_cmdsz(flash); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 453 | } else { |
| 454 | u32 i; |
| 455 | |
| 456 | /* the size of data remaining on the first page */ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 457 | page_size = flash->page_size - page_offset; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 458 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 459 | t[1].len = page_size; |
| 460 | spi_sync(flash->spi, &m); |
| 461 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 462 | *retlen = m.actual_length - m25p_cmdsz(flash); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 463 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 464 | /* write everything in flash->page_size chunks */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 465 | for (i = page_size; i < len; i += page_size) { |
| 466 | page_size = len - i; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 467 | if (page_size > flash->page_size) |
| 468 | page_size = flash->page_size; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 469 | |
| 470 | /* write the next page to flash */ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 471 | m25p_addr2cmd(flash, to + i, flash->command); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 472 | |
| 473 | t[1].tx_buf = buf + i; |
| 474 | t[1].len = page_size; |
| 475 | |
| 476 | wait_till_ready(flash); |
| 477 | |
| 478 | write_enable(flash); |
| 479 | |
| 480 | spi_sync(flash->spi, &m); |
| 481 | |
Dan Carpenter | b06cd21 | 2010-08-12 09:53:52 +0200 | [diff] [blame] | 482 | *retlen += m.actual_length - m25p_cmdsz(flash); |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 483 | } |
| 484 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 485 | |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 486 | mutex_unlock(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 487 | |
| 488 | return 0; |
| 489 | } |
| 490 | |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 491 | static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, |
| 492 | size_t *retlen, const u_char *buf) |
| 493 | { |
| 494 | struct m25p *flash = mtd_to_m25p(mtd); |
| 495 | struct spi_transfer t[2]; |
| 496 | struct spi_message m; |
| 497 | size_t actual; |
| 498 | int cmd_sz, ret; |
| 499 | |
Nicolas Ferre | dcf1246 | 2010-12-15 12:59:32 +0100 | [diff] [blame] | 500 | DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", |
| 501 | dev_name(&flash->spi->dev), __func__, "to", |
| 502 | (u32)to, len); |
| 503 | |
Dan Carpenter | b06cd21 | 2010-08-12 09:53:52 +0200 | [diff] [blame] | 504 | *retlen = 0; |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 505 | |
| 506 | /* sanity checks */ |
| 507 | if (!len) |
| 508 | return 0; |
| 509 | |
| 510 | if (to + len > flash->mtd.size) |
| 511 | return -EINVAL; |
| 512 | |
| 513 | spi_message_init(&m); |
| 514 | memset(t, 0, (sizeof t)); |
| 515 | |
| 516 | t[0].tx_buf = flash->command; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 517 | t[0].len = m25p_cmdsz(flash); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 518 | spi_message_add_tail(&t[0], &m); |
| 519 | |
| 520 | t[1].tx_buf = buf; |
| 521 | spi_message_add_tail(&t[1], &m); |
| 522 | |
| 523 | mutex_lock(&flash->lock); |
| 524 | |
| 525 | /* Wait until finished previous write command. */ |
| 526 | ret = wait_till_ready(flash); |
| 527 | if (ret) |
| 528 | goto time_out; |
| 529 | |
| 530 | write_enable(flash); |
| 531 | |
| 532 | actual = to % 2; |
| 533 | /* Start write from odd address. */ |
| 534 | if (actual) { |
| 535 | flash->command[0] = OPCODE_BP; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 536 | m25p_addr2cmd(flash, to, flash->command); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 537 | |
| 538 | /* write one byte. */ |
| 539 | t[1].len = 1; |
| 540 | spi_sync(flash->spi, &m); |
| 541 | ret = wait_till_ready(flash); |
| 542 | if (ret) |
| 543 | goto time_out; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 544 | *retlen += m.actual_length - m25p_cmdsz(flash); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 545 | } |
| 546 | to += actual; |
| 547 | |
| 548 | flash->command[0] = OPCODE_AAI_WP; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 549 | m25p_addr2cmd(flash, to, flash->command); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 550 | |
| 551 | /* Write out most of the data here. */ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 552 | cmd_sz = m25p_cmdsz(flash); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 553 | for (; actual < len - 1; actual += 2) { |
| 554 | t[0].len = cmd_sz; |
| 555 | /* write two bytes. */ |
| 556 | t[1].len = 2; |
| 557 | t[1].tx_buf = buf + actual; |
| 558 | |
| 559 | spi_sync(flash->spi, &m); |
| 560 | ret = wait_till_ready(flash); |
| 561 | if (ret) |
| 562 | goto time_out; |
| 563 | *retlen += m.actual_length - cmd_sz; |
| 564 | cmd_sz = 1; |
| 565 | to += 2; |
| 566 | } |
| 567 | write_disable(flash); |
| 568 | ret = wait_till_ready(flash); |
| 569 | if (ret) |
| 570 | goto time_out; |
| 571 | |
| 572 | /* Write out trailing byte if it exists. */ |
| 573 | if (actual != len) { |
| 574 | write_enable(flash); |
| 575 | flash->command[0] = OPCODE_BP; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 576 | m25p_addr2cmd(flash, to, flash->command); |
| 577 | t[0].len = m25p_cmdsz(flash); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 578 | t[1].len = 1; |
| 579 | t[1].tx_buf = buf + actual; |
| 580 | |
| 581 | spi_sync(flash->spi, &m); |
| 582 | ret = wait_till_ready(flash); |
| 583 | if (ret) |
| 584 | goto time_out; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 585 | *retlen += m.actual_length - m25p_cmdsz(flash); |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 586 | write_disable(flash); |
| 587 | } |
| 588 | |
| 589 | time_out: |
| 590 | mutex_unlock(&flash->lock); |
| 591 | return ret; |
| 592 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 593 | |
| 594 | /****************************************************************************/ |
| 595 | |
| 596 | /* |
| 597 | * SPI device driver setup and teardown |
| 598 | */ |
| 599 | |
| 600 | struct flash_info { |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 601 | /* JEDEC id zero means "no ID" (most older chips); otherwise it has |
| 602 | * a high byte of zero plus three data bytes: the manufacturer id, |
| 603 | * then a two byte device id. |
| 604 | */ |
| 605 | u32 jedec_id; |
Chen Gong | d0e8c47 | 2008-08-11 16:59:15 +0800 | [diff] [blame] | 606 | u16 ext_id; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 607 | |
| 608 | /* The size listed here is what works with OPCODE_SE, which isn't |
| 609 | * necessarily called a "sector" by the vendor. |
| 610 | */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 611 | unsigned sector_size; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 612 | u16 n_sectors; |
| 613 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 614 | u16 page_size; |
| 615 | u16 addr_width; |
| 616 | |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 617 | u16 flags; |
| 618 | #define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 619 | #define M25P_NO_ERASE 0x02 /* No erase command needed */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 620 | }; |
| 621 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 622 | #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ |
| 623 | ((kernel_ulong_t)&(struct flash_info) { \ |
| 624 | .jedec_id = (_jedec_id), \ |
| 625 | .ext_id = (_ext_id), \ |
| 626 | .sector_size = (_sector_size), \ |
| 627 | .n_sectors = (_n_sectors), \ |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 628 | .page_size = 256, \ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 629 | .flags = (_flags), \ |
| 630 | }) |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 631 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 632 | #define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width) \ |
| 633 | ((kernel_ulong_t)&(struct flash_info) { \ |
| 634 | .sector_size = (_sector_size), \ |
| 635 | .n_sectors = (_n_sectors), \ |
| 636 | .page_size = (_page_size), \ |
| 637 | .addr_width = (_addr_width), \ |
| 638 | .flags = M25P_NO_ERASE, \ |
| 639 | }) |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 640 | |
| 641 | /* NOTE: double check command sets and memory organization when you add |
| 642 | * more flash chips. This current list focusses on newer chips, which |
| 643 | * have been converging on command sets which including JEDEC ID. |
| 644 | */ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 645 | static const struct spi_device_id m25p_ids[] = { |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 646 | /* Atmel -- some are (confusingly) marketed as "DataFlash" */ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 647 | { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) }, |
| 648 | { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 649 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 650 | { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) }, |
| 651 | { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 652 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 653 | { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) }, |
| 654 | { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) }, |
| 655 | { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, |
Aleksandr Koltsoff | 8fffed8 | 2011-01-04 10:42:35 +0200 | [diff] [blame] | 656 | { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 657 | |
Gabor Juhos | 37a23c20 | 2011-01-25 11:20:26 +0100 | [diff] [blame^] | 658 | /* EON -- en25xxx */ |
| 659 | { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, |
Gabor Juhos | 60845e7 | 2010-08-04 21:14:25 +0200 | [diff] [blame] | 660 | { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, |
| 661 | { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, |
| 662 | |
Gabor Juhos | f80e521 | 2010-08-05 16:58:36 +0200 | [diff] [blame] | 663 | /* Intel/Numonyx -- xxxs33b */ |
| 664 | { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, |
| 665 | { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, |
| 666 | { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, |
| 667 | |
Lennert Buytenhek | ab1ff21 | 2009-05-20 13:07:11 +0200 | [diff] [blame] | 668 | /* Macronix */ |
Simon Guinot | df0094d | 2009-12-05 15:28:00 +0100 | [diff] [blame] | 669 | { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) }, |
Martin Michlmayr | 6175f4a | 2010-06-07 19:31:01 +0100 | [diff] [blame] | 670 | { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 671 | { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) }, |
| 672 | { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) }, |
| 673 | { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, |
| 674 | { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 675 | { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, |
Kevin Cernekee | ac622f5 | 2010-10-30 21:11:04 -0700 | [diff] [blame] | 676 | { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, |
Lennert Buytenhek | ab1ff21 | 2009-05-20 13:07:11 +0200 | [diff] [blame] | 677 | |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 678 | /* Spansion -- single (large) sector size only, at least |
| 679 | * for the chips listed here (without boot sectors). |
| 680 | */ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 681 | { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) }, |
| 682 | { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) }, |
| 683 | { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) }, |
| 684 | { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) }, |
David Jander | d86fbdb | 2010-09-30 13:26:02 +0200 | [diff] [blame] | 685 | { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SECT_4K) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 686 | { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) }, |
| 687 | { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, |
| 688 | { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, |
| 689 | { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, 0) }, |
| 690 | { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, 0) }, |
Gernot Hoyler | f2df1ae | 2010-09-02 17:27:20 +0200 | [diff] [blame] | 691 | { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K) }, |
| 692 | { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 693 | |
| 694 | /* SST -- large erase sizes are "overlays", "sectors" are 4K */ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 695 | { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) }, |
| 696 | { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K) }, |
| 697 | { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K) }, |
| 698 | { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K) }, |
| 699 | { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K) }, |
| 700 | { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K) }, |
| 701 | { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K) }, |
| 702 | { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 703 | |
| 704 | /* ST Microelectronics -- newer production may have feature updates */ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 705 | { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) }, |
| 706 | { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) }, |
| 707 | { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) }, |
| 708 | { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) }, |
| 709 | { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) }, |
| 710 | { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) }, |
| 711 | { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) }, |
| 712 | { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) }, |
| 713 | { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 714 | |
Anton Vorontsov | f7b0009 | 2010-06-22 20:57:34 +0400 | [diff] [blame] | 715 | { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) }, |
| 716 | { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) }, |
| 717 | { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) }, |
| 718 | { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) }, |
| 719 | { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) }, |
| 720 | { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) }, |
| 721 | { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) }, |
| 722 | { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) }, |
| 723 | { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) }, |
| 724 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 725 | { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) }, |
| 726 | { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) }, |
| 727 | { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 728 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 729 | { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, |
| 730 | { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 731 | |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 732 | /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 733 | { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, |
| 734 | { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) }, |
| 735 | { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) }, |
| 736 | { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, |
| 737 | { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, |
| 738 | { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, |
Gabor Juhos | 0af18d2 | 2010-08-04 21:14:27 +0200 | [diff] [blame] | 739 | { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 740 | { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, |
Thierry Reding | d2ac467 | 2010-08-30 13:00:48 +0200 | [diff] [blame] | 741 | { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 742 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 743 | /* Catalyst / On Semiconductor -- non-JEDEC */ |
| 744 | { "cat25c11", CAT25_INFO( 16, 8, 16, 1) }, |
| 745 | { "cat25c03", CAT25_INFO( 32, 8, 16, 2) }, |
| 746 | { "cat25c09", CAT25_INFO( 128, 8, 32, 2) }, |
| 747 | { "cat25c17", CAT25_INFO( 256, 8, 32, 2) }, |
| 748 | { "cat25128", CAT25_INFO(2048, 8, 64, 2) }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 749 | { }, |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 750 | }; |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 751 | MODULE_DEVICE_TABLE(spi, m25p_ids); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 752 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 753 | static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi) |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 754 | { |
| 755 | int tmp; |
| 756 | u8 code = OPCODE_RDID; |
Chen Gong | daa8473 | 2008-09-16 14:14:12 +0800 | [diff] [blame] | 757 | u8 id[5]; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 758 | u32 jedec; |
Chen Gong | d0e8c47 | 2008-08-11 16:59:15 +0800 | [diff] [blame] | 759 | u16 ext_jedec; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 760 | struct flash_info *info; |
| 761 | |
| 762 | /* JEDEC also defines an optional "extended device information" |
| 763 | * string for after vendor-specific data, after the three bytes |
| 764 | * we use here. Supporting some chips might require using it. |
| 765 | */ |
Chen Gong | daa8473 | 2008-09-16 14:14:12 +0800 | [diff] [blame] | 766 | tmp = spi_write_then_read(spi, &code, 1, id, 5); |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 767 | if (tmp < 0) { |
| 768 | DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", |
Kay Sievers | 160bbab | 2008-12-23 10:00:14 +0000 | [diff] [blame] | 769 | dev_name(&spi->dev), tmp); |
Anton Vorontsov | 9d2c4f3 | 2010-06-22 20:57:42 +0400 | [diff] [blame] | 770 | return ERR_PTR(tmp); |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 771 | } |
| 772 | jedec = id[0]; |
| 773 | jedec = jedec << 8; |
| 774 | jedec |= id[1]; |
| 775 | jedec = jedec << 8; |
| 776 | jedec |= id[2]; |
| 777 | |
Chen Gong | d0e8c47 | 2008-08-11 16:59:15 +0800 | [diff] [blame] | 778 | ext_jedec = id[3] << 8 | id[4]; |
| 779 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 780 | for (tmp = 0; tmp < ARRAY_SIZE(m25p_ids) - 1; tmp++) { |
| 781 | info = (void *)m25p_ids[tmp].driver_data; |
Mike Frysinger | a3d3f73 | 2008-11-26 10:23:25 +0000 | [diff] [blame] | 782 | if (info->jedec_id == jedec) { |
Mike Frysinger | 9168ab8 | 2008-11-26 10:23:35 +0000 | [diff] [blame] | 783 | if (info->ext_id != 0 && info->ext_id != ext_jedec) |
Chen Gong | d0e8c47 | 2008-08-11 16:59:15 +0800 | [diff] [blame] | 784 | continue; |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 785 | return &m25p_ids[tmp]; |
Mike Frysinger | a3d3f73 | 2008-11-26 10:23:25 +0000 | [diff] [blame] | 786 | } |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 787 | } |
Kevin Cernekee | f0dff9b | 2010-10-30 21:11:02 -0700 | [diff] [blame] | 788 | dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec); |
Anton Vorontsov | 9d2c4f3 | 2010-06-22 20:57:42 +0400 | [diff] [blame] | 789 | return ERR_PTR(-ENODEV); |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 790 | } |
| 791 | |
| 792 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 793 | /* |
| 794 | * board specific setup should have ensured the SPI clock used here |
| 795 | * matches what the READ command supports, at least until this driver |
| 796 | * understands FAST_READ (for clocks over 25 MHz). |
| 797 | */ |
| 798 | static int __devinit m25p_probe(struct spi_device *spi) |
| 799 | { |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 800 | const struct spi_device_id *id = spi_get_device_id(spi); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 801 | struct flash_platform_data *data; |
| 802 | struct m25p *flash; |
| 803 | struct flash_info *info; |
| 804 | unsigned i; |
| 805 | |
| 806 | /* Platform data helps sort out which chip type we have, as |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 807 | * well as how this board partitions it. If we don't have |
| 808 | * a chip ID, try the JEDEC id commands; they'll work for most |
| 809 | * newer chips, even if we don't recognize the particular chip. |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 810 | */ |
| 811 | data = spi->dev.platform_data; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 812 | if (data && data->type) { |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 813 | const struct spi_device_id *plat_id; |
| 814 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 815 | for (i = 0; i < ARRAY_SIZE(m25p_ids) - 1; i++) { |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 816 | plat_id = &m25p_ids[i]; |
| 817 | if (strcmp(data->type, plat_id->name)) |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 818 | continue; |
| 819 | break; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 820 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 821 | |
Dan Carpenter | f78ec6b | 2010-08-12 09:58:27 +0200 | [diff] [blame] | 822 | if (i < ARRAY_SIZE(m25p_ids) - 1) |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 823 | id = plat_id; |
| 824 | else |
| 825 | dev_warn(&spi->dev, "unrecognized id %s\n", data->type); |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 826 | } |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 827 | |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 828 | info = (void *)id->driver_data; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 829 | |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 830 | if (info->jedec_id) { |
| 831 | const struct spi_device_id *jid; |
| 832 | |
| 833 | jid = jedec_probe(spi); |
Anton Vorontsov | 9d2c4f3 | 2010-06-22 20:57:42 +0400 | [diff] [blame] | 834 | if (IS_ERR(jid)) { |
| 835 | return PTR_ERR(jid); |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 836 | } else if (jid != id) { |
| 837 | /* |
| 838 | * JEDEC knows better, so overwrite platform ID. We |
| 839 | * can't trust partitions any longer, but we'll let |
| 840 | * mtd apply them anyway, since some partitions may be |
| 841 | * marked read-only, and we don't want to lose that |
| 842 | * information, even if it's not 100% accurate. |
| 843 | */ |
| 844 | dev_warn(&spi->dev, "found %s, expected %s\n", |
| 845 | jid->name, id->name); |
| 846 | id = jid; |
| 847 | info = (void *)jid->driver_data; |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 848 | } |
Anton Vorontsov | 18c6182 | 2009-10-12 20:24:38 +0400 | [diff] [blame] | 849 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 850 | |
Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 851 | flash = kzalloc(sizeof *flash, GFP_KERNEL); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 852 | if (!flash) |
| 853 | return -ENOMEM; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 854 | flash->command = kmalloc(MAX_CMD_SIZE + FAST_READ_DUMMY_BYTE, GFP_KERNEL); |
Johannes Stezenbach | 61c3506 | 2009-10-28 14:21:37 +0100 | [diff] [blame] | 855 | if (!flash->command) { |
| 856 | kfree(flash); |
| 857 | return -ENOMEM; |
| 858 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 859 | |
| 860 | flash->spi = spi; |
David Brownell | 7d5230e | 2007-06-24 15:09:13 -0700 | [diff] [blame] | 861 | mutex_init(&flash->lock); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 862 | dev_set_drvdata(&spi->dev, flash); |
| 863 | |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 864 | /* |
Gabor Juhos | f80e521 | 2010-08-05 16:58:36 +0200 | [diff] [blame] | 865 | * Atmel, SST and Intel/Numonyx serial flash tend to power |
Graf Yang | ea60658a | 2009-09-24 15:46:22 -0400 | [diff] [blame] | 866 | * up with the software protection bits set |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 867 | */ |
| 868 | |
Graf Yang | ea60658a | 2009-09-24 15:46:22 -0400 | [diff] [blame] | 869 | if (info->jedec_id >> 16 == 0x1f || |
Gabor Juhos | f80e521 | 2010-08-05 16:58:36 +0200 | [diff] [blame] | 870 | info->jedec_id >> 16 == 0x89 || |
Graf Yang | ea60658a | 2009-09-24 15:46:22 -0400 | [diff] [blame] | 871 | info->jedec_id >> 16 == 0xbf) { |
Michael Hennerich | 7228982 | 2008-07-03 23:54:42 -0700 | [diff] [blame] | 872 | write_enable(flash); |
| 873 | write_sr(flash, 0); |
| 874 | } |
| 875 | |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 876 | if (data && data->name) |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 877 | flash->mtd.name = data->name; |
| 878 | else |
Kay Sievers | 160bbab | 2008-12-23 10:00:14 +0000 | [diff] [blame] | 879 | flash->mtd.name = dev_name(&spi->dev); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 880 | |
| 881 | flash->mtd.type = MTD_NORFLASH; |
Artem B. Bityutskiy | 783ed81 | 2006-06-14 19:53:44 +0400 | [diff] [blame] | 882 | flash->mtd.writesize = 1; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 883 | flash->mtd.flags = MTD_CAP_NORFLASH; |
| 884 | flash->mtd.size = info->sector_size * info->n_sectors; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 885 | flash->mtd.erase = m25p80_erase; |
| 886 | flash->mtd.read = m25p80_read; |
Graf Yang | 49aac4a | 2009-06-15 08:23:41 +0000 | [diff] [blame] | 887 | |
| 888 | /* sst flash chips use AAI word program */ |
| 889 | if (info->jedec_id >> 16 == 0xbf) |
| 890 | flash->mtd.write = sst_write; |
| 891 | else |
| 892 | flash->mtd.write = m25p80_write; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 893 | |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 894 | /* prefer "small sector" erase if possible */ |
| 895 | if (info->flags & SECT_4K) { |
| 896 | flash->erase_opcode = OPCODE_BE_4K; |
| 897 | flash->mtd.erasesize = 4096; |
| 898 | } else { |
| 899 | flash->erase_opcode = OPCODE_SE; |
| 900 | flash->mtd.erasesize = info->sector_size; |
| 901 | } |
| 902 | |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 903 | if (info->flags & M25P_NO_ERASE) |
| 904 | flash->mtd.flags |= MTD_NO_ERASE; |
David Brownell | 87f39f0 | 2009-03-26 00:42:50 -0700 | [diff] [blame] | 905 | |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 906 | flash->mtd.dev.parent = &spi->dev; |
Anton Vorontsov | 837479d | 2009-10-12 20:24:40 +0400 | [diff] [blame] | 907 | flash->page_size = info->page_size; |
Kevin Cernekee | 4b7f742 | 2010-10-30 21:11:03 -0700 | [diff] [blame] | 908 | |
| 909 | if (info->addr_width) |
| 910 | flash->addr_width = info->addr_width; |
| 911 | else { |
| 912 | /* enable 4-byte addressing if the device exceeds 16MiB */ |
| 913 | if (flash->mtd.size > 0x1000000) { |
| 914 | flash->addr_width = 4; |
| 915 | set_4byte(flash, 1); |
| 916 | } else |
| 917 | flash->addr_width = 3; |
| 918 | } |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 919 | |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 920 | dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name, |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 921 | (long long)flash->mtd.size >> 10); |
| 922 | |
| 923 | DEBUG(MTD_DEBUG_LEVEL2, |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 924 | "mtd .name = %s, .size = 0x%llx (%lldMiB) " |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 925 | ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 926 | flash->mtd.name, |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 927 | (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 928 | flash->mtd.erasesize, flash->mtd.erasesize / 1024, |
| 929 | flash->mtd.numeraseregions); |
| 930 | |
| 931 | if (flash->mtd.numeraseregions) |
| 932 | for (i = 0; i < flash->mtd.numeraseregions; i++) |
| 933 | DEBUG(MTD_DEBUG_LEVEL2, |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 934 | "mtd.eraseregions[%d] = { .offset = 0x%llx, " |
David Woodhouse | 02d087d | 2007-06-28 22:38:38 +0100 | [diff] [blame] | 935 | ".erasesize = 0x%.8x (%uKiB), " |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 936 | ".numblocks = %d }\n", |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 937 | i, (long long)flash->mtd.eraseregions[i].offset, |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 938 | flash->mtd.eraseregions[i].erasesize, |
| 939 | flash->mtd.eraseregions[i].erasesize / 1024, |
| 940 | flash->mtd.eraseregions[i].numblocks); |
| 941 | |
| 942 | |
| 943 | /* partitions should match sector boundaries; and it may be good to |
| 944 | * use readonly partitions for writeprotected sectors (BP2..BP0). |
| 945 | */ |
| 946 | if (mtd_has_partitions()) { |
| 947 | struct mtd_partition *parts = NULL; |
| 948 | int nr_parts = 0; |
| 949 | |
David Brownell | a4b6d51 | 2009-03-04 12:01:41 -0800 | [diff] [blame] | 950 | if (mtd_has_cmdlinepart()) { |
| 951 | static const char *part_probes[] |
| 952 | = { "cmdlinepart", NULL, }; |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 953 | |
David Brownell | a4b6d51 | 2009-03-04 12:01:41 -0800 | [diff] [blame] | 954 | nr_parts = parse_mtd_partitions(&flash->mtd, |
| 955 | part_probes, &parts, 0); |
| 956 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 957 | |
| 958 | if (nr_parts <= 0 && data && data->parts) { |
| 959 | parts = data->parts; |
| 960 | nr_parts = data->nr_parts; |
| 961 | } |
| 962 | |
Andres Salomon | 4084743 | 2010-10-29 21:04:19 -0700 | [diff] [blame] | 963 | #ifdef CONFIG_MTD_OF_PARTS |
Mingkai Hu | 97ff46c | 2010-10-12 18:18:34 +0800 | [diff] [blame] | 964 | if (nr_parts <= 0 && spi->dev.of_node) { |
| 965 | nr_parts = of_mtd_parse_partitions(&spi->dev, |
| 966 | spi->dev.of_node, &parts); |
| 967 | } |
| 968 | #endif |
| 969 | |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 970 | if (nr_parts > 0) { |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 971 | for (i = 0; i < nr_parts; i++) { |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 972 | DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 973 | "{.name = %s, .offset = 0x%llx, " |
| 974 | ".size = 0x%llx (%lldKiB) }\n", |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 975 | i, parts[i].name, |
Artem Bityutskiy | d85316a | 2008-12-18 14:10:05 +0200 | [diff] [blame] | 976 | (long long)parts[i].offset, |
| 977 | (long long)parts[i].size, |
| 978 | (long long)(parts[i].size >> 10)); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 979 | } |
| 980 | flash->partitioned = 1; |
| 981 | return add_mtd_partitions(&flash->mtd, parts, nr_parts); |
| 982 | } |
Anton Vorontsov | edcb3b1 | 2009-08-06 15:18:37 -0700 | [diff] [blame] | 983 | } else if (data && data->nr_parts) |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 984 | dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", |
| 985 | data->nr_parts, data->name); |
| 986 | |
| 987 | return add_mtd_device(&flash->mtd) == 1 ? -ENODEV : 0; |
| 988 | } |
| 989 | |
| 990 | |
| 991 | static int __devexit m25p_remove(struct spi_device *spi) |
| 992 | { |
| 993 | struct m25p *flash = dev_get_drvdata(&spi->dev); |
| 994 | int status; |
| 995 | |
| 996 | /* Clean up MTD stuff. */ |
| 997 | if (mtd_has_partitions() && flash->partitioned) |
| 998 | status = del_mtd_partitions(&flash->mtd); |
| 999 | else |
| 1000 | status = del_mtd_device(&flash->mtd); |
Johannes Stezenbach | 61c3506 | 2009-10-28 14:21:37 +0100 | [diff] [blame] | 1001 | if (status == 0) { |
| 1002 | kfree(flash->command); |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1003 | kfree(flash); |
Johannes Stezenbach | 61c3506 | 2009-10-28 14:21:37 +0100 | [diff] [blame] | 1004 | } |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1005 | return 0; |
| 1006 | } |
| 1007 | |
| 1008 | |
| 1009 | static struct spi_driver m25p80_driver = { |
| 1010 | .driver = { |
| 1011 | .name = "m25p80", |
| 1012 | .bus = &spi_bus_type, |
| 1013 | .owner = THIS_MODULE, |
| 1014 | }, |
Anton Vorontsov | b34bc03 | 2009-10-12 20:24:35 +0400 | [diff] [blame] | 1015 | .id_table = m25p_ids, |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1016 | .probe = m25p_probe, |
| 1017 | .remove = __devexit_p(m25p_remove), |
David Brownell | fa0a8c7 | 2007-06-24 15:12:35 -0700 | [diff] [blame] | 1018 | |
| 1019 | /* REVISIT: many of these chips have deep power-down modes, which |
| 1020 | * should clearly be entered on suspend() to minimize power use. |
| 1021 | * And also when they're otherwise idle... |
| 1022 | */ |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1023 | }; |
| 1024 | |
| 1025 | |
Peter Huewe | 627df23 | 2009-06-11 02:23:33 +0200 | [diff] [blame] | 1026 | static int __init m25p80_init(void) |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1027 | { |
| 1028 | return spi_register_driver(&m25p80_driver); |
| 1029 | } |
| 1030 | |
| 1031 | |
Peter Huewe | 627df23 | 2009-06-11 02:23:33 +0200 | [diff] [blame] | 1032 | static void __exit m25p80_exit(void) |
Mike Lavender | 2f9f762 | 2006-01-08 13:34:27 -0800 | [diff] [blame] | 1033 | { |
| 1034 | spi_unregister_driver(&m25p80_driver); |
| 1035 | } |
| 1036 | |
| 1037 | |
| 1038 | module_init(m25p80_init); |
| 1039 | module_exit(m25p80_exit); |
| 1040 | |
| 1041 | MODULE_LICENSE("GPL"); |
| 1042 | MODULE_AUTHOR("Mike Lavender"); |
| 1043 | MODULE_DESCRIPTION("MTD SPI driver for ST M25Pxx flash chips"); |