blob: 89b9d689153298f3b7a3965adf811a3d9963de7c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4 *
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
Todd Poynor02b15e32005-06-07 00:04:39 +01007 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * 2_by_8 routines added by Simon Munton
10 *
11 * 4_by_16 work by Carolyn J. Smith
12 *
Thomas Gleixner1f948b42005-11-07 11:15:37 +000013 * XIP support hooks by Vitaly Wool (based on code for Intel flash
Todd Poynor02b15e32005-06-07 00:04:39 +010014 * by Nicolas Pitre)
Thomas Gleixner1f948b42005-11-07 11:15:37 +000015 *
Christopher Moore87e92c02008-10-17 05:32:22 +020016 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
17 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
19 *
20 * This code is GPL
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 */
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/init.h>
28#include <asm/io.h>
29#include <asm/byteorder.h>
30
31#include <linux/errno.h>
32#include <linux/slab.h>
33#include <linux/delay.h>
34#include <linux/interrupt.h>
Kevin Cernekeeeafe1312010-04-29 10:26:56 -070035#include <linux/reboot.h>
Stefan Roese1648eaa2013-01-18 13:10:05 +010036#include <linux/of.h>
37#include <linux/of_platform.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/mtd/map.h>
39#include <linux/mtd/mtd.h>
40#include <linux/mtd/cfi.h>
Todd Poynor02b15e32005-06-07 00:04:39 +010041#include <linux/mtd/xip.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43#define AMD_BOOTLOC_BUG
44#define FORCE_WORD_WRITE 0
45
46#define MAX_WORD_RETRIES 3
47
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#define SST49LF004B 0x0060
Ryan Jackson89072ef2006-10-20 14:41:03 -070049#define SST49LF040B 0x0050
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +010050#define SST49LF008A 0x005a
Haavard Skinnemoen01655082006-08-09 11:06:07 +020051#define AT49BV6416 0x00d6
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
53static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
54static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
56static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
57static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
58static void cfi_amdstd_sync (struct mtd_info *);
59static int cfi_amdstd_suspend (struct mtd_info *);
60static void cfi_amdstd_resume (struct mtd_info *);
Kevin Cernekeeeafe1312010-04-29 10:26:56 -070061static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63
Ira W. Snyder30ec5a22012-01-06 11:29:19 -080064static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
65 size_t *retlen, const u_char *buf);
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067static void cfi_amdstd_destroy(struct mtd_info *);
68
69struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
70static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
71
72static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
73static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
74#include "fwh_lock.h"
75
Adrian Hunter69423d92008-12-10 13:37:21 +000076static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
77static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
Haavard Skinnemoen01655082006-08-09 11:06:07 +020078
Stefan Roese1648eaa2013-01-18 13:10:05 +010079static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
80static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
81static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083static struct mtd_chip_driver cfi_amdstd_chipdrv = {
84 .probe = NULL, /* Not usable directly */
85 .destroy = cfi_amdstd_destroy,
86 .name = "cfi_cmdset_0002",
87 .module = THIS_MODULE
88};
89
90
91/* #define DEBUG_CFI_FEATURES */
92
93
94#ifdef DEBUG_CFI_FEATURES
95static void cfi_tell_features(struct cfi_pri_amdstd *extp)
96{
97 const char* erase_suspend[3] = {
98 "Not supported", "Read only", "Read/write"
99 };
100 const char* top_bottom[6] = {
101 "No WP", "8x8KiB sectors at top & bottom, no WP",
102 "Bottom boot", "Top boot",
103 "Uniform, Bottom WP", "Uniform, Top WP"
104 };
105
106 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000107 printk(" Address sensitive unlock: %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 (extp->SiliconRevision & 1) ? "Not required" : "Required");
109
110 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
111 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
112 else
113 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
114
115 if (extp->BlkProt == 0)
116 printk(" Block protection: Not supported\n");
117 else
118 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
119
120
121 printk(" Temporary block unprotect: %s\n",
122 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
123 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
124 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
125 printk(" Burst mode: %s\n",
126 extp->BurstMode ? "Supported" : "Not supported");
127 if (extp->PageMode == 0)
128 printk(" Page mode: Not supported\n");
129 else
130 printk(" Page mode: %d word page\n", extp->PageMode << 2);
131
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000132 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 extp->VppMin >> 4, extp->VppMin & 0xf);
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000134 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 extp->VppMax >> 4, extp->VppMax & 0xf);
136
137 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
138 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
139 else
140 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
141}
142#endif
143
144#ifdef AMD_BOOTLOC_BUG
145/* Wheee. Bring me the head of someone at AMD. */
Guillaume LECERFcc318222010-11-17 12:35:50 +0100146static void fixup_amd_bootblock(struct mtd_info *mtd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147{
148 struct map_info *map = mtd->priv;
149 struct cfi_private *cfi = map->fldrv_priv;
150 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
151 __u8 major = extp->MajorVersion;
152 __u8 minor = extp->MinorVersion;
153
154 if (((major << 8) | minor) < 0x3131) {
155 /* CFI version 1.0 => don't trust bootloc */
Christopher Moore87e92c02008-10-17 05:32:22 +0200156
Brian Norris289c0522011-07-19 10:06:09 -0700157 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
Christopher Moore87e92c02008-10-17 05:32:22 +0200158 map->name, cfi->mfr, cfi->id);
159
160 /* AFAICS all 29LV400 with a bottom boot block have a device ID
161 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
162 * These were badly detected as they have the 0x80 bit set
163 * so treat them as a special case.
164 */
165 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
166
167 /* Macronix added CFI to their 2nd generation
168 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
169 * Fujitsu, Spansion, EON, ESI and older Macronix)
170 * has CFI.
171 *
172 * Therefore also check the manufacturer.
173 * This reduces the risk of false detection due to
174 * the 8-bit device ID.
175 */
Guillaume LECERFf3e69c62009-12-15 23:01:06 +0100176 (cfi->mfr == CFI_MFR_MACRONIX)) {
Brian Norris289c0522011-07-19 10:06:09 -0700177 pr_debug("%s: Macronix MX29LV400C with bottom boot block"
Christopher Moore87e92c02008-10-17 05:32:22 +0200178 " detected\n", map->name);
179 extp->TopBottom = 2; /* bottom boot */
180 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 if (cfi->id & 0x80) {
182 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
183 extp->TopBottom = 3; /* top boot */
184 } else {
185 extp->TopBottom = 2; /* bottom boot */
186 }
Christopher Moore87e92c02008-10-17 05:32:22 +0200187
Brian Norris289c0522011-07-19 10:06:09 -0700188 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
Christopher Moore87e92c02008-10-17 05:32:22 +0200189 " deduced %s from Device ID\n", map->name, major, minor,
190 extp->TopBottom == 2 ? "bottom" : "top");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 }
192}
193#endif
194
Guillaume LECERFcc318222010-11-17 12:35:50 +0100195static void fixup_use_write_buffers(struct mtd_info *mtd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196{
197 struct map_info *map = mtd->priv;
198 struct cfi_private *cfi = map->fldrv_priv;
199 if (cfi->cfiq->BufWriteTimeoutTyp) {
Brian Norris289c0522011-07-19 10:06:09 -0700200 pr_debug("Using buffer write method\n" );
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200201 mtd->_write = cfi_amdstd_write_buffers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 }
203}
204
Haavard Skinnemoen5b0c5c22006-08-09 10:54:44 +0200205/* Atmel chips don't use the same PRI format as AMD chips */
Guillaume LECERFcc318222010-11-17 12:35:50 +0100206static void fixup_convert_atmel_pri(struct mtd_info *mtd)
Haavard Skinnemoen5b0c5c22006-08-09 10:54:44 +0200207{
208 struct map_info *map = mtd->priv;
209 struct cfi_private *cfi = map->fldrv_priv;
210 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
211 struct cfi_pri_atmel atmel_pri;
212
213 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
HÃ¥vard Skinnemoende591da2006-09-15 17:19:31 +0200214 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
Haavard Skinnemoen5b0c5c22006-08-09 10:54:44 +0200215
216 if (atmel_pri.Features & 0x02)
217 extp->EraseSuspend = 2;
218
Haavard Skinnemoenbe8f78b2008-09-30 13:55:33 +0200219 /* Some chips got it backwards... */
220 if (cfi->id == AT49BV6416) {
221 if (atmel_pri.BottomBoot)
222 extp->TopBottom = 3;
223 else
224 extp->TopBottom = 2;
225 } else {
226 if (atmel_pri.BottomBoot)
227 extp->TopBottom = 2;
228 else
229 extp->TopBottom = 3;
230 }
Hans-Christian Egtvedtd10a39d2007-10-30 16:33:07 +0100231
232 /* burst write mode not supported */
233 cfi->cfiq->BufWriteTimeoutTyp = 0;
234 cfi->cfiq->BufWriteTimeoutMax = 0;
Haavard Skinnemoen5b0c5c22006-08-09 10:54:44 +0200235}
236
Guillaume LECERFcc318222010-11-17 12:35:50 +0100237static void fixup_use_secsi(struct mtd_info *mtd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238{
239 /* Setup for chips with a secsi area */
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200240 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
241 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242}
243
Guillaume LECERFcc318222010-11-17 12:35:50 +0100244static void fixup_use_erase_chip(struct mtd_info *mtd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245{
246 struct map_info *map = mtd->priv;
247 struct cfi_private *cfi = map->fldrv_priv;
248 if ((cfi->cfiq->NumEraseRegions == 1) &&
249 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200250 mtd->_erase = cfi_amdstd_erase_chip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 }
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000252
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253}
254
Haavard Skinnemoen01655082006-08-09 11:06:07 +0200255/*
256 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
257 * locked by default.
258 */
Guillaume LECERFcc318222010-11-17 12:35:50 +0100259static void fixup_use_atmel_lock(struct mtd_info *mtd)
Haavard Skinnemoen01655082006-08-09 11:06:07 +0200260{
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200261 mtd->_lock = cfi_atmel_lock;
262 mtd->_unlock = cfi_atmel_unlock;
Justin Treone619a752008-01-30 10:25:49 -0800263 mtd->flags |= MTD_POWERUP_LOCK;
Haavard Skinnemoen01655082006-08-09 11:06:07 +0200264}
265
Guillaume LECERF83dcd3b2010-04-24 17:58:22 +0200266static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
267{
268 struct map_info *map = mtd->priv;
269 struct cfi_private *cfi = map->fldrv_priv;
270
271 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300272 * These flashes report two separate eraseblock regions based on the
Guillaume LECERF83dcd3b2010-04-24 17:58:22 +0200273 * sector_erase-size and block_erase-size, although they both operate on the
274 * same memory. This is not allowed according to CFI, so we just pick the
275 * sector_erase-size.
276 */
277 cfi->cfiq->NumEraseRegions = 1;
278}
279
Guillaume LECERFcc318222010-11-17 12:35:50 +0100280static void fixup_sst39vf(struct mtd_info *mtd)
Guillaume LECERF83dcd3b2010-04-24 17:58:22 +0200281{
282 struct map_info *map = mtd->priv;
283 struct cfi_private *cfi = map->fldrv_priv;
284
285 fixup_old_sst_eraseregion(mtd);
286
287 cfi->addr_unlock1 = 0x5555;
288 cfi->addr_unlock2 = 0x2AAA;
289}
290
Guillaume LECERFcc318222010-11-17 12:35:50 +0100291static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
Guillaume LECERF5a0563f2010-04-24 17:58:27 +0200292{
293 struct map_info *map = mtd->priv;
294 struct cfi_private *cfi = map->fldrv_priv;
295
296 fixup_old_sst_eraseregion(mtd);
297
298 cfi->addr_unlock1 = 0x555;
299 cfi->addr_unlock2 = 0x2AA;
Guillaume LECERF08968042010-10-26 10:45:23 +0100300
301 cfi->sector_erase_cmd = CMD(0x50);
Guillaume LECERF5a0563f2010-04-24 17:58:27 +0200302}
303
Guillaume LECERFcc318222010-11-17 12:35:50 +0100304static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
Guillaume LECERF9fc05fc2010-10-26 11:31:55 +0100305{
306 struct map_info *map = mtd->priv;
307 struct cfi_private *cfi = map->fldrv_priv;
308
Guillaume LECERFcc318222010-11-17 12:35:50 +0100309 fixup_sst39vf_rev_b(mtd);
Guillaume LECERF9fc05fc2010-10-26 11:31:55 +0100310
311 /*
312 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
313 * it should report a size of 8KBytes (0x0020*256).
314 */
315 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
316 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
317}
318
Guillaume LECERFcc318222010-11-17 12:35:50 +0100319static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
Trent Piepho70b07252008-03-30 21:19:30 -0700320{
321 struct map_info *map = mtd->priv;
322 struct cfi_private *cfi = map->fldrv_priv;
323
324 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
325 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
David Woodhouse5df41de2012-05-13 23:34:24 -0500326 pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
Trent Piepho70b07252008-03-30 21:19:30 -0700327 }
328}
329
Guillaume LECERFcc318222010-11-17 12:35:50 +0100330static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
Trent Piepho70b07252008-03-30 21:19:30 -0700331{
332 struct map_info *map = mtd->priv;
333 struct cfi_private *cfi = map->fldrv_priv;
334
335 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
336 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
David Woodhouse5df41de2012-05-13 23:34:24 -0500337 pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
Trent Piepho70b07252008-03-30 21:19:30 -0700338 }
339}
340
Javier Martin43dc03c2012-05-11 12:15:41 +0200341static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
342{
343 struct map_info *map = mtd->priv;
344 struct cfi_private *cfi = map->fldrv_priv;
345
346 /*
347 * S29NS512P flash uses more than 8bits to report number of sectors,
348 * which is not permitted by CFI.
349 */
350 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
David Woodhouse5df41de2012-05-13 23:34:24 -0500351 pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
Javier Martin43dc03c2012-05-11 12:15:41 +0200352}
353
Guillaume LECERF83dcd3b2010-04-24 17:58:22 +0200354/* Used to fix CFI-Tables of chips without Extended Query Tables */
355static struct cfi_fixup cfi_nopri_fixup_table[] = {
Guillaume LECERFcc318222010-11-17 12:35:50 +0100356 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
357 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
358 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
359 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
360 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
361 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
362 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
363 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
364 { 0, 0, NULL }
Guillaume LECERF83dcd3b2010-04-24 17:58:22 +0200365};
366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367static struct cfi_fixup cfi_fixup_table[] = {
Guillaume LECERFcc318222010-11-17 12:35:50 +0100368 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369#ifdef AMD_BOOTLOC_BUG
Guillaume LECERFcc318222010-11-17 12:35:50 +0100370 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
Steffen Sledz1065cda2011-03-10 09:05:12 +0100371 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
Guillaume LECERFcc318222010-11-17 12:35:50 +0100372 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373#endif
Guillaume LECERFcc318222010-11-17 12:35:50 +0100374 { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
375 { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
376 { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
377 { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
378 { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
379 { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
380 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
381 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
382 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
383 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
Javier Martin43dc03c2012-05-11 12:15:41 +0200384 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
Guillaume LECERFcc318222010-11-17 12:35:50 +0100385 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
386 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
387 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
388 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389#if !FORCE_WORD_WRITE
Guillaume LECERFcc318222010-11-17 12:35:50 +0100390 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391#endif
Guillaume LECERFcc318222010-11-17 12:35:50 +0100392 { 0, 0, NULL }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393};
394static struct cfi_fixup jedec_fixup_table[] = {
Guillaume LECERFcc318222010-11-17 12:35:50 +0100395 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
396 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
397 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
398 { 0, 0, NULL }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399};
400
401static struct cfi_fixup fixup_table[] = {
402 /* The CFI vendor ids and the JEDEC vendor IDs appear
403 * to be common. It is like the devices id's are as
404 * well. This table is to pick all cases where
405 * we know that is the case.
406 */
Guillaume LECERFcc318222010-11-17 12:35:50 +0100407 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
408 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
409 { 0, 0, NULL }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410};
411
412
Wolfgang Grandeggerfefae482009-01-08 19:21:27 +0100413static void cfi_fixup_major_minor(struct cfi_private *cfi,
414 struct cfi_pri_amdstd *extp)
415{
Guillaume LECERFe6372762010-12-17 10:59:41 +0100416 if (cfi->mfr == CFI_MFR_SAMSUNG) {
Guillaume LECERFe8953b72010-12-17 10:59:47 +0100417 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
418 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
Guillaume LECERFe6372762010-12-17 10:59:41 +0100419 /*
420 * Samsung K8P2815UQB and K8D6x16UxM chips
421 * report major=0 / minor=0.
Guillaume LECERFe8953b72010-12-17 10:59:47 +0100422 * K8D3x16UxC chips report major=3 / minor=3.
Guillaume LECERFe6372762010-12-17 10:59:41 +0100423 */
424 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
425 " Extended Query version to 1.%c\n",
426 extp->MinorVersion);
427 extp->MajorVersion = '1';
428 }
429 }
430
Guillaume LECERF9fc05fc2010-10-26 11:31:55 +0100431 /*
432 * SST 38VF640x chips report major=0xFF / minor=0xFF.
433 */
434 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
435 extp->MajorVersion = '1';
436 extp->MinorVersion = '0';
437 }
Wolfgang Grandeggerfefae482009-01-08 19:21:27 +0100438}
439
Gerlando Falauto42096282012-07-03 09:09:47 +0200440static int is_m29ew(struct cfi_private *cfi)
441{
442 if (cfi->mfr == CFI_MFR_INTEL &&
443 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
444 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
445 return 1;
446 return 0;
447}
448
449/*
450 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
451 * Some revisions of the M29EW suffer from erase suspend hang ups. In
452 * particular, it can occur when the sequence
453 * Erase Confirm -> Suspend -> Program -> Resume
454 * causes a lockup due to internal timing issues. The consequence is that the
455 * erase cannot be resumed without inserting a dummy command after programming
456 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
457 * that writes an F0 command code before the RESUME command.
458 */
459static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
460 unsigned long adr)
461{
462 struct cfi_private *cfi = map->fldrv_priv;
463 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
464 if (is_m29ew(cfi))
465 map_write(map, CMD(0xF0), adr);
466}
467
468/*
469 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
470 *
471 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
472 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
473 * command is issued after an ERASE RESUME operation without waiting for a
474 * minimum delay. The result is that once the ERASE seems to be completed
475 * (no bits are toggling), the contents of the Flash memory block on which
476 * the erase was ongoing could be inconsistent with the expected values
477 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
478 * values), causing a consequent failure of the ERASE operation.
479 * The occurrence of this issue could be high, especially when file system
480 * operations on the Flash are intensive. As a result, it is recommended
481 * that a patch be applied. Intensive file system operations can cause many
482 * calls to the garbage routine to free Flash space (also by erasing physical
483 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
484 * commands can occur. The problem disappears when a delay is inserted after
485 * the RESUME command by using the udelay() function available in Linux.
486 * The DELAY value must be tuned based on the customer's platform.
487 * The maximum value that fixes the problem in all cases is 500us.
488 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
489 * in most cases.
490 * We have chosen 500µs because this latency is acceptable.
491 */
492static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
493{
494 /*
495 * Resolving the Delay After Resume Issue see Micron TN-13-07
496 * Worst case delay must be 500µs but 30-50µs should be ok as well
497 */
498 if (is_m29ew(cfi))
499 cfi_udelay(500);
500}
501
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
503{
504 struct cfi_private *cfi = map->fldrv_priv;
Stefan Roese1648eaa2013-01-18 13:10:05 +0100505 struct device_node __maybe_unused *np = map->device_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 struct mtd_info *mtd;
507 int i;
508
Burman Yan95b93a02006-11-15 21:10:29 +0200509 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 if (!mtd) {
511 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
512 return NULL;
513 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 mtd->priv = map;
515 mtd->type = MTD_NORFLASH;
516
517 /* Fill in the default mtd operations */
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200518 mtd->_erase = cfi_amdstd_erase_varsize;
519 mtd->_write = cfi_amdstd_write_words;
520 mtd->_read = cfi_amdstd_read;
521 mtd->_sync = cfi_amdstd_sync;
522 mtd->_suspend = cfi_amdstd_suspend;
523 mtd->_resume = cfi_amdstd_resume;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 mtd->flags = MTD_CAP_NORFLASH;
525 mtd->name = map->name;
Artem B. Bityutskiy783ed812006-06-14 19:53:44 +0400526 mtd->writesize = 1;
Anatolij Gustschin13ce77f2011-02-10 16:01:46 +0100527 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
Anatolij Gustschind261c722010-12-16 23:42:15 +0100528
Brian Norris0a32a102011-07-19 10:06:10 -0700529 pr_debug("MTD %s(): write buffer size %d\n", __func__,
530 mtd->writebufsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
Artem Bityutskiy3c3c10b2012-01-30 14:58:32 +0200532 mtd->_panic_write = cfi_amdstd_panic_write;
Kevin Cernekeeeafe1312010-04-29 10:26:56 -0700533 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
534
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 if (cfi->cfi_mode==CFI_MODE_CFI){
536 unsigned char bootloc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
538 struct cfi_pri_amdstd *extp;
539
540 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
Guillaume LECERF564b8492010-04-24 17:58:17 +0200541 if (extp) {
542 /*
543 * It's a real CFI chip, not one for which the probe
544 * routine faked a CFI structure.
545 */
546 cfi_fixup_major_minor(cfi, extp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
Guillaume LECERFe17f47a2010-07-02 14:39:10 +0200548 /*
Gernot Hoylerc9ddab22011-04-11 15:53:35 +0200549 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
Justin P. Mattock631dd1a2010-10-18 11:03:14 +0200550 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
551 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
Guillaume LECERF5da19532010-08-05 13:55:24 +0200552 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
Gernot Hoylerc9ddab22011-04-11 15:53:35 +0200553 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
Guillaume LECERFe17f47a2010-07-02 14:39:10 +0200554 */
Guillaume LECERF564b8492010-04-24 17:58:17 +0200555 if (extp->MajorVersion != '1' ||
Gernot Hoylerc9ddab22011-04-11 15:53:35 +0200556 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
Guillaume LECERF564b8492010-04-24 17:58:17 +0200557 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
Guillaume LECERFe17f47a2010-07-02 14:39:10 +0200558 "version %c.%c (%#02x/%#02x).\n",
559 extp->MajorVersion, extp->MinorVersion,
560 extp->MajorVersion, extp->MinorVersion);
Guillaume LECERF564b8492010-04-24 17:58:17 +0200561 kfree(extp);
562 kfree(mtd);
563 return NULL;
564 }
Wolfgang Grandeggerfefae482009-01-08 19:21:27 +0100565
Guillaume LECERFe17f47a2010-07-02 14:39:10 +0200566 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
567 extp->MajorVersion, extp->MinorVersion);
568
Guillaume LECERF564b8492010-04-24 17:58:17 +0200569 /* Install our own private info structure */
570 cfi->cmdset_priv = extp;
Todd Poynord88f9772005-07-20 22:01:17 +0100571
Guillaume LECERF564b8492010-04-24 17:58:17 +0200572 /* Apply cfi device specific fixups */
573 cfi_fixup(mtd, cfi_fixup_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
575#ifdef DEBUG_CFI_FEATURES
Guillaume LECERF564b8492010-04-24 17:58:17 +0200576 /* Tell the user about it in lots of lovely detail */
577 cfi_tell_features(extp);
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000578#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
Stefan Roese1648eaa2013-01-18 13:10:05 +0100580#ifdef CONFIG_OF
581 if (np && of_property_read_bool(
582 np, "use-advanced-sector-protection")
583 && extp->BlkProtUnprot == 8) {
584 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
585 mtd->_lock = cfi_ppb_lock;
586 mtd->_unlock = cfi_ppb_unlock;
587 mtd->_is_locked = cfi_ppb_is_locked;
588 }
589#endif
590
Guillaume LECERF564b8492010-04-24 17:58:17 +0200591 bootloc = extp->TopBottom;
David Woodhouse412da2f2010-05-14 01:35:54 +0100592 if ((bootloc < 2) || (bootloc > 5)) {
593 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
594 "bank location (%d). Assuming bottom.\n",
David Woodhouseabab7eb2010-05-14 09:14:24 +0100595 map->name, bootloc);
Guillaume LECERF564b8492010-04-24 17:58:17 +0200596 bootloc = 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 }
Guillaume LECERF564b8492010-04-24 17:58:17 +0200598
599 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
David Woodhouse412da2f2010-05-14 01:35:54 +0100600 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
Guillaume LECERF564b8492010-04-24 17:58:17 +0200601
602 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
603 int j = (cfi->cfiq->NumEraseRegions-1)-i;
604 __u32 swap;
605
606 swap = cfi->cfiq->EraseRegionInfo[i];
607 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
608 cfi->cfiq->EraseRegionInfo[j] = swap;
609 }
610 }
611 /* Set the default CFI lock/unlock addresses */
612 cfi->addr_unlock1 = 0x555;
613 cfi->addr_unlock2 = 0x2aa;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 }
Guillaume LECERF83dcd3b2010-04-24 17:58:22 +0200615 cfi_fixup(mtd, cfi_nopri_fixup_table);
Guillaume LECERF564b8492010-04-24 17:58:17 +0200616
617 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
618 kfree(mtd);
619 return NULL;
620 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
622 } /* CFI mode */
623 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
624 /* Apply jedec specific fixups */
625 cfi_fixup(mtd, jedec_fixup_table);
626 }
627 /* Apply generic fixups */
628 cfi_fixup(mtd, fixup_table);
629
630 for (i=0; i< cfi->numchips; i++) {
631 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
632 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
633 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
Vijay Sampath83d48092007-03-06 02:39:44 -0800634 cfi->chips[i].ref_point_counter = 0;
635 init_waitqueue_head(&(cfi->chips[i].wq));
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000636 }
637
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 map->fldrv = &cfi_amdstd_chipdrv;
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000639
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 return cfi_amdstd_setup(mtd);
641}
Guillaume LECERF80461122010-05-20 16:54:10 +0200642struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
David Woodhouse1e804ce2010-05-20 16:54:05 +0200643struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
David Woodhouse83ea4ef2006-05-08 22:58:25 +0100644EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
Guillaume LECERF80461122010-05-20 16:54:10 +0200645EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
David Woodhouse1e804ce2010-05-20 16:54:05 +0200646EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647
648static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
649{
650 struct map_info *map = mtd->priv;
651 struct cfi_private *cfi = map->fldrv_priv;
652 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
653 unsigned long offset = 0;
654 int i,j;
655
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000656 printk(KERN_NOTICE "number of %s chips: %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000658 /* Select the correct geometry setup */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 mtd->size = devsize * cfi->numchips;
660
661 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
662 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
663 * mtd->numeraseregions, GFP_KERNEL);
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000664 if (!mtd->eraseregions) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
666 goto setup_err;
667 }
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000668
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
670 unsigned long ernum, ersize;
671 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
672 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 if (mtd->erasesize < ersize) {
675 mtd->erasesize = ersize;
676 }
677 for (j=0; j<cfi->numchips; j++) {
678 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
679 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
680 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
681 }
682 offset += (ersize * ernum);
683 }
684 if (offset != devsize) {
685 /* Argh */
686 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
687 goto setup_err;
688 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 __module_get(THIS_MODULE);
Kevin Cernekeeeafe1312010-04-29 10:26:56 -0700691 register_reboot_notifier(&mtd->reboot_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 return mtd;
693
694 setup_err:
Jiri Slaby17fabf12010-01-10 10:01:19 +0100695 kfree(mtd->eraseregions);
696 kfree(mtd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 kfree(cfi->cmdset_priv);
698 kfree(cfi->cfiq);
699 return NULL;
700}
701
702/*
703 * Return true if the chip is ready.
704 *
705 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
706 * non-suspended sector) and is indicated by no toggle bits toggling.
707 *
708 * Note that anything more complicated than checking if no bits are toggling
709 * (including checking DQ5 for an error status) is tricky to get working
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300710 * correctly and is therefore not done (particularly with interleaved chips
711 * as each chip must be checked independently of the others).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 */
Todd Poynor02b15e32005-06-07 00:04:39 +0100713static int __xipram chip_ready(struct map_info *map, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714{
715 map_word d, t;
716
717 d = map_read(map, addr);
718 t = map_read(map, addr);
719
720 return map_word_equal(map, d, t);
721}
722
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +0100723/*
724 * Return true if the chip is ready and has the correct value.
725 *
726 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
727 * non-suspended sector) and it is indicated by no bits toggling.
728 *
729 * Error are indicated by toggling bits or bits held with the wrong value,
730 * or with bits toggling.
731 *
732 * Note that anything more complicated than checking if no bits are toggling
733 * (including checking DQ5 for an error status) is tricky to get working
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300734 * correctly and is therefore not done (particularly with interleaved chips
735 * as each chip must be checked independently of the others).
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +0100736 *
737 */
Todd Poynor02b15e32005-06-07 00:04:39 +0100738static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +0100739{
740 map_word oldd, curd;
741
742 oldd = map_read(map, addr);
743 curd = map_read(map, addr);
744
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000745 return map_word_equal(map, oldd, curd) &&
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +0100746 map_word_equal(map, curd, expected);
747}
748
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
750{
751 DECLARE_WAITQUEUE(wait, current);
752 struct cfi_private *cfi = map->fldrv_priv;
753 unsigned long timeo;
754 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
755
756 resettime:
757 timeo = jiffies + HZ;
758 retry:
759 switch (chip->state) {
760
761 case FL_STATUS:
762 for (;;) {
763 if (chip_ready(map, adr))
764 break;
765
766 if (time_after(jiffies, timeo)) {
767 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 return -EIO;
769 }
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200770 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 cfi_udelay(1);
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200772 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 /* Someone else might have been playing with it. */
774 goto retry;
775 }
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000776
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 case FL_READY:
778 case FL_CFI_QUERY:
779 case FL_JEDEC_QUERY:
780 return 0;
781
782 case FL_ERASING:
Joakim Tjernlund2695eab2009-11-19 12:01:58 +0100783 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
784 !(mode == FL_READY || mode == FL_POINT ||
785 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 goto sleep;
787
788 /* We could check to see if we're trying to access the sector
789 * that is currently being erased. However, no user will try
790 * anything like that so we just wait for the timeout. */
791
792 /* Erase suspend */
793 /* It's harmless to issue the Erase-Suspend and Erase-Resume
794 * commands when the erase algorithm isn't in progress. */
795 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
796 chip->oldstate = FL_ERASING;
797 chip->state = FL_ERASE_SUSPENDING;
798 chip->erase_suspended = 1;
799 for (;;) {
800 if (chip_ready(map, adr))
801 break;
802
803 if (time_after(jiffies, timeo)) {
804 /* Should have suspended the erase by now.
805 * Send an Erase-Resume command as either
806 * there was an error (so leave the erase
807 * routine to recover from it) or we trying to
808 * use the erase-in-progress sector. */
Tadashi Abe100f2342011-05-19 15:58:15 +0900809 put_chip(map, chip, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
811 return -EIO;
812 }
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000813
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200814 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 cfi_udelay(1);
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200816 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
818 So we can just loop here. */
819 }
820 chip->state = FL_READY;
821 return 0;
822
Todd Poynor02b15e32005-06-07 00:04:39 +0100823 case FL_XIP_WHILE_ERASING:
824 if (mode != FL_READY && mode != FL_POINT &&
825 (!cfip || !(cfip->EraseSuspend&2)))
826 goto sleep;
827 chip->oldstate = chip->state;
828 chip->state = FL_READY;
829 return 0;
830
Kevin Cernekeeeafe1312010-04-29 10:26:56 -0700831 case FL_SHUTDOWN:
832 /* The machine is rebooting */
833 return -EIO;
834
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 case FL_POINT:
836 /* Only if there's no operation suspended... */
837 if (mode == FL_READY && chip->oldstate == FL_READY)
838 return 0;
839
840 default:
841 sleep:
842 set_current_state(TASK_UNINTERRUPTIBLE);
843 add_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200844 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 schedule();
846 remove_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200847 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 goto resettime;
849 }
850}
851
852
853static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
854{
855 struct cfi_private *cfi = map->fldrv_priv;
856
857 switch(chip->oldstate) {
858 case FL_ERASING:
Gerlando Falauto42096282012-07-03 09:09:47 +0200859 cfi_fixup_m29ew_erase_suspend(map,
860 chip->in_progress_block_addr);
Guillaume LECERF08968042010-10-26 10:45:23 +0100861 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
Gerlando Falauto42096282012-07-03 09:09:47 +0200862 cfi_fixup_m29ew_delay_after_resume(cfi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 chip->oldstate = FL_READY;
864 chip->state = FL_ERASING;
865 break;
866
Todd Poynor02b15e32005-06-07 00:04:39 +0100867 case FL_XIP_WHILE_ERASING:
868 chip->state = chip->oldstate;
869 chip->oldstate = FL_READY;
870 break;
871
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 case FL_READY:
873 case FL_STATUS:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 break;
875 default:
876 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
877 }
878 wake_up(&chip->wq);
879}
880
Todd Poynor02b15e32005-06-07 00:04:39 +0100881#ifdef CONFIG_MTD_XIP
882
883/*
884 * No interrupt what so ever can be serviced while the flash isn't in array
885 * mode. This is ensured by the xip_disable() and xip_enable() functions
886 * enclosing any code path where the flash is known not to be in array mode.
887 * And within a XIP disabled code path, only functions marked with __xipram
888 * may be called and nothing else (it's a good thing to inspect generated
889 * assembly to make sure inline functions were actually inlined and that gcc
890 * didn't emit calls to its own support functions). Also configuring MTD CFI
891 * support to a single buswidth and a single interleave is also recommended.
892 */
Thomas Gleixnerf8eb3212005-07-05 01:03:06 +0200893
Todd Poynor02b15e32005-06-07 00:04:39 +0100894static void xip_disable(struct map_info *map, struct flchip *chip,
895 unsigned long adr)
896{
897 /* TODO: chips with no XIP use should ignore and return */
898 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
899 local_irq_disable();
900}
901
902static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
903 unsigned long adr)
904{
905 struct cfi_private *cfi = map->fldrv_priv;
906
907 if (chip->state != FL_POINT && chip->state != FL_READY) {
908 map_write(map, CMD(0xf0), adr);
909 chip->state = FL_READY;
910 }
911 (void) map_read(map, adr);
Thomas Gleixner97f927a2005-07-07 16:50:16 +0200912 xip_iprefetch();
Todd Poynor02b15e32005-06-07 00:04:39 +0100913 local_irq_enable();
914}
915
916/*
917 * When a delay is required for the flash operation to complete, the
918 * xip_udelay() function is polling for both the given timeout and pending
919 * (but still masked) hardware interrupts. Whenever there is an interrupt
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000920 * pending then the flash erase operation is suspended, array mode restored
Todd Poynor02b15e32005-06-07 00:04:39 +0100921 * and interrupts unmasked. Task scheduling might also happen at that
922 * point. The CPU eventually returns from the interrupt or the call to
923 * schedule() and the suspended flash operation is resumed for the remaining
924 * of the delay period.
925 *
926 * Warning: this function _will_ fool interrupt latency tracing tools.
927 */
928
929static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
930 unsigned long adr, int usec)
931{
932 struct cfi_private *cfi = map->fldrv_priv;
933 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
934 map_word status, OK = CMD(0x80);
935 unsigned long suspended, start = xip_currtime();
936 flstate_t oldstate;
937
938 do {
939 cpu_relax();
940 if (xip_irqpending() && extp &&
941 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
942 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
943 /*
Thomas Gleixner1f948b42005-11-07 11:15:37 +0000944 * Let's suspend the erase operation when supported.
945 * Note that we currently don't try to suspend
946 * interleaved chips if there is already another
Todd Poynor02b15e32005-06-07 00:04:39 +0100947 * operation suspended (imagine what happens
948 * when one chip was already done with the current
949 * operation while another chip suspended it, then
950 * we resume the whole thing at once). Yes, it
951 * can happen!
952 */
953 map_write(map, CMD(0xb0), adr);
954 usec -= xip_elapsed_since(start);
955 suspended = xip_currtime();
956 do {
957 if (xip_elapsed_since(suspended) > 100000) {
958 /*
959 * The chip doesn't want to suspend
960 * after waiting for 100 msecs.
961 * This is a critical error but there
962 * is not much we can do here.
963 */
964 return;
965 }
966 status = map_read(map, adr);
967 } while (!map_word_andequal(map, status, OK, OK));
968
969 /* Suspend succeeded */
970 oldstate = chip->state;
971 if (!map_word_bitsset(map, status, CMD(0x40)))
972 break;
973 chip->state = FL_XIP_WHILE_ERASING;
974 chip->erase_suspended = 1;
975 map_write(map, CMD(0xf0), adr);
976 (void) map_read(map, adr);
Paulius Zaleckasca5c23c2008-02-27 01:42:39 +0200977 xip_iprefetch();
Todd Poynor02b15e32005-06-07 00:04:39 +0100978 local_irq_enable();
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200979 mutex_unlock(&chip->mutex);
Paulius Zaleckasca5c23c2008-02-27 01:42:39 +0200980 xip_iprefetch();
Todd Poynor02b15e32005-06-07 00:04:39 +0100981 cond_resched();
982
983 /*
984 * We're back. However someone else might have
985 * decided to go write to the chip if we are in
986 * a suspended erase state. If so let's wait
987 * until it's done.
988 */
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200989 mutex_lock(&chip->mutex);
Todd Poynor02b15e32005-06-07 00:04:39 +0100990 while (chip->state != FL_XIP_WHILE_ERASING) {
991 DECLARE_WAITQUEUE(wait, current);
992 set_current_state(TASK_UNINTERRUPTIBLE);
993 add_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200994 mutex_unlock(&chip->mutex);
Todd Poynor02b15e32005-06-07 00:04:39 +0100995 schedule();
996 remove_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +0200997 mutex_lock(&chip->mutex);
Todd Poynor02b15e32005-06-07 00:04:39 +0100998 }
999 /* Disallow XIP again */
1000 local_irq_disable();
1001
Gerlando Falauto42096282012-07-03 09:09:47 +02001002 /* Correct Erase Suspend Hangups for M29EW */
1003 cfi_fixup_m29ew_erase_suspend(map, adr);
Todd Poynor02b15e32005-06-07 00:04:39 +01001004 /* Resume the write or erase operation */
Guillaume LECERF08968042010-10-26 10:45:23 +01001005 map_write(map, cfi->sector_erase_cmd, adr);
Todd Poynor02b15e32005-06-07 00:04:39 +01001006 chip->state = oldstate;
1007 start = xip_currtime();
1008 } else if (usec >= 1000000/HZ) {
1009 /*
1010 * Try to save on CPU power when waiting delay
1011 * is at least a system timer tick period.
1012 * No need to be extremely accurate here.
1013 */
1014 xip_cpu_idle();
1015 }
1016 status = map_read(map, adr);
1017 } while (!map_word_andequal(map, status, OK, OK)
1018 && xip_elapsed_since(start) < usec);
1019}
1020
1021#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1022
1023/*
1024 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1025 * the flash is actively programming or erasing since we have to poll for
1026 * the operation to complete anyway. We can't do that in a generic way with
1027 * a XIP setup so do it before the actual flash operation in this case
1028 * and stub it out from INVALIDATE_CACHE_UDELAY.
1029 */
1030#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1031 INVALIDATE_CACHED_RANGE(map, from, size)
1032
1033#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1034 UDELAY(map, chip, adr, usec)
1035
1036/*
1037 * Extra notes:
1038 *
1039 * Activating this XIP support changes the way the code works a bit. For
1040 * example the code to suspend the current process when concurrent access
1041 * happens is never executed because xip_udelay() will always return with the
1042 * same chip state as it was entered with. This is why there is no care for
1043 * the presence of add_wait_queue() or schedule() calls from within a couple
1044 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1045 * The queueing and scheduling are always happening within xip_udelay().
1046 *
1047 * Similarly, get_chip() and put_chip() just happen to always be executed
1048 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1049 * is in array mode, therefore never executing many cases therein and not
1050 * causing any problem with XIP.
1051 */
1052
1053#else
1054
1055#define xip_disable(map, chip, adr)
1056#define xip_enable(map, chip, adr)
1057#define XIP_INVAL_CACHED_RANGE(x...)
1058
1059#define UDELAY(map, chip, adr, usec) \
1060do { \
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001061 mutex_unlock(&chip->mutex); \
Todd Poynor02b15e32005-06-07 00:04:39 +01001062 cfi_udelay(usec); \
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001063 mutex_lock(&chip->mutex); \
Todd Poynor02b15e32005-06-07 00:04:39 +01001064} while (0)
1065
1066#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1067do { \
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001068 mutex_unlock(&chip->mutex); \
Todd Poynor02b15e32005-06-07 00:04:39 +01001069 INVALIDATE_CACHED_RANGE(map, adr, len); \
1070 cfi_udelay(usec); \
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001071 mutex_lock(&chip->mutex); \
Todd Poynor02b15e32005-06-07 00:04:39 +01001072} while (0)
1073
1074#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
1076static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1077{
1078 unsigned long cmd_addr;
1079 struct cfi_private *cfi = map->fldrv_priv;
1080 int ret;
1081
1082 adr += chip->start;
1083
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001084 /* Ensure cmd read/writes are aligned. */
1085 cmd_addr = adr & ~(map_bankwidth(map)-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001087 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 ret = get_chip(map, chip, cmd_addr, FL_READY);
1089 if (ret) {
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001090 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 return ret;
1092 }
1093
1094 if (chip->state != FL_POINT && chip->state != FL_READY) {
1095 map_write(map, CMD(0xf0), cmd_addr);
1096 chip->state = FL_READY;
1097 }
1098
1099 map_copy_from(map, buf, adr, len);
1100
1101 put_chip(map, chip, cmd_addr);
1102
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001103 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 return 0;
1105}
1106
1107
1108static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1109{
1110 struct map_info *map = mtd->priv;
1111 struct cfi_private *cfi = map->fldrv_priv;
1112 unsigned long ofs;
1113 int chipnum;
1114 int ret = 0;
1115
1116 /* ofs: offset within the first chip that the first read should start */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 chipnum = (from >> cfi->chipshift);
1118 ofs = from - (chipnum << cfi->chipshift);
1119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 while (len) {
1121 unsigned long thislen;
1122
1123 if (chipnum >= cfi->numchips)
1124 break;
1125
1126 if ((len + ofs -1) >> cfi->chipshift)
1127 thislen = (1<<cfi->chipshift) - ofs;
1128 else
1129 thislen = len;
1130
1131 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1132 if (ret)
1133 break;
1134
1135 *retlen += thislen;
1136 len -= thislen;
1137 buf += thislen;
1138
1139 ofs = 0;
1140 chipnum++;
1141 }
1142 return ret;
1143}
1144
1145
1146static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1147{
1148 DECLARE_WAITQUEUE(wait, current);
1149 unsigned long timeo = jiffies + HZ;
1150 struct cfi_private *cfi = map->fldrv_priv;
1151
1152 retry:
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001153 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
1155 if (chip->state != FL_READY){
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 set_current_state(TASK_UNINTERRUPTIBLE);
1157 add_wait_queue(&chip->wq, &wait);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001158
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001159 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160
1161 schedule();
1162 remove_wait_queue(&chip->wq, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 timeo = jiffies + HZ;
1164
1165 goto retry;
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
1168 adr += chip->start;
1169
1170 chip->state = FL_READY;
1171
1172 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1173 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1174 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001175
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 map_copy_from(map, buf, adr, len);
1177
1178 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1179 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1180 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1181 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001182
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 wake_up(&chip->wq);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001184 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
1186 return 0;
1187}
1188
1189static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1190{
1191 struct map_info *map = mtd->priv;
1192 struct cfi_private *cfi = map->fldrv_priv;
1193 unsigned long ofs;
1194 int chipnum;
1195 int ret = 0;
1196
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 /* ofs: offset within the first chip that the first read should start */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 /* 8 secsi bytes per chip */
1199 chipnum=from>>3;
1200 ofs=from & 7;
1201
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 while (len) {
1203 unsigned long thislen;
1204
1205 if (chipnum >= cfi->numchips)
1206 break;
1207
1208 if ((len + ofs -1) >> 3)
1209 thislen = (1<<3) - ofs;
1210 else
1211 thislen = len;
1212
1213 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1214 if (ret)
1215 break;
1216
1217 *retlen += thislen;
1218 len -= thislen;
1219 buf += thislen;
1220
1221 ofs = 0;
1222 chipnum++;
1223 }
1224 return ret;
1225}
1226
1227
Todd Poynor02b15e32005-06-07 00:04:39 +01001228static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229{
1230 struct cfi_private *cfi = map->fldrv_priv;
1231 unsigned long timeo = jiffies + HZ;
1232 /*
1233 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1234 * have a max write time of a few hundreds usec). However, we should
1235 * use the maximum timeout value given by the chip at probe time
1236 * instead. Unfortunately, struct flchip does have a field for
1237 * maximum timeout, only for typical which can be far too short
1238 * depending of the conditions. The ' + 1' is to avoid having a
1239 * timeout of 0 jiffies if HZ is smaller than 1000.
1240 */
1241 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1242 int ret = 0;
1243 map_word oldd;
1244 int retry_cnt = 0;
1245
1246 adr += chip->start;
1247
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001248 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 ret = get_chip(map, chip, adr, FL_WRITING);
1250 if (ret) {
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001251 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 return ret;
1253 }
1254
Brian Norris289c0522011-07-19 10:06:09 -07001255 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 __func__, adr, datum.x[0] );
1257
1258 /*
1259 * Check for a NOP for the case when the datum to write is already
1260 * present - it saves time and works around buggy chips that corrupt
1261 * data at other locations when 0xff is written to a location that
1262 * already contains 0xff.
1263 */
1264 oldd = map_read(map, adr);
1265 if (map_word_equal(map, oldd, datum)) {
Brian Norris289c0522011-07-19 10:06:09 -07001266 pr_debug("MTD %s(): NOP\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 __func__);
1268 goto op_done;
1269 }
1270
Todd Poynor02b15e32005-06-07 00:04:39 +01001271 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 ENABLE_VPP(map);
Todd Poynor02b15e32005-06-07 00:04:39 +01001273 xip_disable(map, chip, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 retry:
1275 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1276 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1277 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1278 map_write(map, datum, adr);
1279 chip->state = FL_WRITING;
1280
Todd Poynor02b15e32005-06-07 00:04:39 +01001281 INVALIDATE_CACHE_UDELAY(map, chip,
1282 adr, map_bankwidth(map),
1283 chip->word_write_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
1285 /* See comment above for timeout value. */
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001286 timeo = jiffies + uWriteTimeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 for (;;) {
1288 if (chip->state != FL_WRITING) {
1289 /* Someone's suspended the write. Sleep */
1290 DECLARE_WAITQUEUE(wait, current);
1291
1292 set_current_state(TASK_UNINTERRUPTIBLE);
1293 add_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001294 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 schedule();
1296 remove_wait_queue(&chip->wq, &wait);
1297 timeo = jiffies + (HZ / 2); /* FIXME */
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001298 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 continue;
1300 }
1301
Konstantin Baidarovb95f9602005-11-07 09:00:05 +00001302 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
Todd Poynor02b15e32005-06-07 00:04:39 +01001303 xip_enable(map, chip, adr);
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01001304 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
Todd Poynor02b15e32005-06-07 00:04:39 +01001305 xip_disable(map, chip, adr);
Konstantin Baidarovb95f9602005-11-07 09:00:05 +00001306 break;
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01001307 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308
Konstantin Baidarovb95f9602005-11-07 09:00:05 +00001309 if (chip_ready(map, adr))
1310 break;
1311
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 /* Latency issues. Drop the lock, wait a while and retry */
Todd Poynor02b15e32005-06-07 00:04:39 +01001313 UDELAY(map, chip, adr, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 }
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01001315 /* Did we succeed? */
1316 if (!chip_good(map, adr, datum)) {
1317 /* reset on all failures. */
1318 map_write( map, CMD(0xF0), chip->start );
1319 /* FIXME - should have reset delay before continuing */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001321 if (++retry_cnt <= MAX_WORD_RETRIES)
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01001322 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01001324 ret = -EIO;
1325 }
Todd Poynor02b15e32005-06-07 00:04:39 +01001326 xip_enable(map, chip, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 op_done:
1328 chip->state = FL_READY;
Paul Parsonse7d93772012-03-07 14:11:16 +00001329 DISABLE_VPP(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 put_chip(map, chip, adr);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001331 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332
1333 return ret;
1334}
1335
1336
1337static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1338 size_t *retlen, const u_char *buf)
1339{
1340 struct map_info *map = mtd->priv;
1341 struct cfi_private *cfi = map->fldrv_priv;
1342 int ret = 0;
1343 int chipnum;
1344 unsigned long ofs, chipstart;
1345 DECLARE_WAITQUEUE(wait, current);
1346
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 chipnum = to >> cfi->chipshift;
1348 ofs = to - (chipnum << cfi->chipshift);
1349 chipstart = cfi->chips[chipnum].start;
1350
1351 /* If it's not bus-aligned, do the first byte write */
1352 if (ofs & (map_bankwidth(map)-1)) {
1353 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1354 int i = ofs - bus_ofs;
1355 int n = 0;
1356 map_word tmp_buf;
1357
1358 retry:
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001359 mutex_lock(&cfi->chips[chipnum].mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
1361 if (cfi->chips[chipnum].state != FL_READY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 set_current_state(TASK_UNINTERRUPTIBLE);
1363 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1364
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001365 mutex_unlock(&cfi->chips[chipnum].mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366
1367 schedule();
1368 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 goto retry;
1370 }
1371
1372 /* Load 'tmp_buf' with old contents of flash */
1373 tmp_buf = map_read(map, bus_ofs+chipstart);
1374
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001375 mutex_unlock(&cfi->chips[chipnum].mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
1377 /* Number of bytes to copy from buffer */
1378 n = min_t(int, len, map_bankwidth(map)-i);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001379
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1381
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001382 ret = do_write_oneword(map, &cfi->chips[chipnum],
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 bus_ofs, tmp_buf);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001384 if (ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 return ret;
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001386
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 ofs += n;
1388 buf += n;
1389 (*retlen) += n;
1390 len -= n;
1391
1392 if (ofs >> cfi->chipshift) {
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001393 chipnum ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 ofs = 0;
1395 if (chipnum == cfi->numchips)
1396 return 0;
1397 }
1398 }
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001399
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 /* We are now aligned, write as much as possible */
1401 while(len >= map_bankwidth(map)) {
1402 map_word datum;
1403
1404 datum = map_word_load(map, buf);
1405
1406 ret = do_write_oneword(map, &cfi->chips[chipnum],
1407 ofs, datum);
1408 if (ret)
1409 return ret;
1410
1411 ofs += map_bankwidth(map);
1412 buf += map_bankwidth(map);
1413 (*retlen) += map_bankwidth(map);
1414 len -= map_bankwidth(map);
1415
1416 if (ofs >> cfi->chipshift) {
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001417 chipnum ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 ofs = 0;
1419 if (chipnum == cfi->numchips)
1420 return 0;
1421 chipstart = cfi->chips[chipnum].start;
1422 }
1423 }
1424
1425 /* Write the trailing bytes if any */
1426 if (len & (map_bankwidth(map)-1)) {
1427 map_word tmp_buf;
1428
1429 retry1:
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001430 mutex_lock(&cfi->chips[chipnum].mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431
1432 if (cfi->chips[chipnum].state != FL_READY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 set_current_state(TASK_UNINTERRUPTIBLE);
1434 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1435
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001436 mutex_unlock(&cfi->chips[chipnum].mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437
1438 schedule();
1439 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 goto retry1;
1441 }
1442
1443 tmp_buf = map_read(map, ofs + chipstart);
1444
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001445 mutex_unlock(&cfi->chips[chipnum].mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446
1447 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001448
1449 ret = do_write_oneword(map, &cfi->chips[chipnum],
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 ofs, tmp_buf);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001451 if (ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 return ret;
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001453
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 (*retlen) += len;
1455 }
1456
1457 return 0;
1458}
1459
1460
1461/*
1462 * FIXME: interleaved mode not tested, and probably not supported!
1463 */
Todd Poynor02b15e32005-06-07 00:04:39 +01001464static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001465 unsigned long adr, const u_char *buf,
Todd Poynor02b15e32005-06-07 00:04:39 +01001466 int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467{
1468 struct cfi_private *cfi = map->fldrv_priv;
1469 unsigned long timeo = jiffies + HZ;
1470 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1471 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1472 int ret = -EIO;
1473 unsigned long cmd_adr;
1474 int z, words;
1475 map_word datum;
1476
1477 adr += chip->start;
1478 cmd_adr = adr;
1479
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001480 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 ret = get_chip(map, chip, adr, FL_WRITING);
1482 if (ret) {
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001483 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 return ret;
1485 }
1486
1487 datum = map_word_load(map, buf);
1488
Brian Norris289c0522011-07-19 10:06:09 -07001489 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 __func__, adr, datum.x[0] );
1491
Todd Poynor02b15e32005-06-07 00:04:39 +01001492 XIP_INVAL_CACHED_RANGE(map, adr, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 ENABLE_VPP(map);
Todd Poynor02b15e32005-06-07 00:04:39 +01001494 xip_disable(map, chip, cmd_adr);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001495
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1497 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498
1499 /* Write Buffer Load */
1500 map_write(map, CMD(0x25), cmd_adr);
1501
1502 chip->state = FL_WRITING_TO_BUFFER;
1503
1504 /* Write length of data to come */
1505 words = len / map_bankwidth(map);
1506 map_write(map, CMD(words - 1), cmd_adr);
1507 /* Write data */
1508 z = 0;
1509 while(z < words * map_bankwidth(map)) {
1510 datum = map_word_load(map, buf);
1511 map_write(map, datum, adr + z);
1512
1513 z += map_bankwidth(map);
1514 buf += map_bankwidth(map);
1515 }
1516 z -= map_bankwidth(map);
1517
1518 adr += z;
1519
1520 /* Write Buffer Program Confirm: GO GO GO */
1521 map_write(map, CMD(0x29), cmd_adr);
1522 chip->state = FL_WRITING;
1523
Todd Poynor02b15e32005-06-07 00:04:39 +01001524 INVALIDATE_CACHE_UDELAY(map, chip,
1525 adr, map_bankwidth(map),
1526 chip->word_write_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001528 timeo = jiffies + uWriteTimeout;
1529
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 for (;;) {
1531 if (chip->state != FL_WRITING) {
1532 /* Someone's suspended the write. Sleep */
1533 DECLARE_WAITQUEUE(wait, current);
1534
1535 set_current_state(TASK_UNINTERRUPTIBLE);
1536 add_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001537 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 schedule();
1539 remove_wait_queue(&chip->wq, &wait);
1540 timeo = jiffies + (HZ / 2); /* FIXME */
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001541 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 continue;
1543 }
1544
Konstantin Baidarovb95f9602005-11-07 09:00:05 +00001545 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1546 break;
1547
Todd Poynor02b15e32005-06-07 00:04:39 +01001548 if (chip_ready(map, adr)) {
1549 xip_enable(map, chip, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 goto op_done;
Todd Poynor02b15e32005-06-07 00:04:39 +01001551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552
1553 /* Latency issues. Drop the lock, wait a while and retry */
Todd Poynor02b15e32005-06-07 00:04:39 +01001554 UDELAY(map, chip, adr, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 }
1556
Harald Nordgard-Hansen070c3222012-11-23 23:11:03 +01001557 /*
1558 * Recovery from write-buffer programming failures requires
1559 * the write-to-buffer-reset sequence. Since the last part
1560 * of the sequence also works as a normal reset, we can run
1561 * the same commands regardless of why we are here.
1562 * See e.g.
1563 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
1564 */
1565 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1566 cfi->device_type, NULL);
1567 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1568 cfi->device_type, NULL);
1569 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
1570 cfi->device_type, NULL);
Todd Poynor02b15e32005-06-07 00:04:39 +01001571 xip_enable(map, chip, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 /* FIXME - should have reset delay before continuing */
1573
Huang Shijie25983b12013-04-01 17:40:18 +08001574 printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n",
1575 __func__, adr);
Todd Poynor02b15e32005-06-07 00:04:39 +01001576
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 ret = -EIO;
1578 op_done:
1579 chip->state = FL_READY;
Paul Parsonse7d93772012-03-07 14:11:16 +00001580 DISABLE_VPP(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 put_chip(map, chip, adr);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001582 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583
1584 return ret;
1585}
1586
1587
1588static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1589 size_t *retlen, const u_char *buf)
1590{
1591 struct map_info *map = mtd->priv;
1592 struct cfi_private *cfi = map->fldrv_priv;
1593 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1594 int ret = 0;
1595 int chipnum;
1596 unsigned long ofs;
1597
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 chipnum = to >> cfi->chipshift;
1599 ofs = to - (chipnum << cfi->chipshift);
1600
1601 /* If it's not bus-aligned, do the first word write */
1602 if (ofs & (map_bankwidth(map)-1)) {
1603 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1604 if (local_len > len)
1605 local_len = len;
1606 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1607 local_len, retlen, buf);
1608 if (ret)
1609 return ret;
1610 ofs += local_len;
1611 buf += local_len;
1612 len -= local_len;
1613
1614 if (ofs >> cfi->chipshift) {
1615 chipnum ++;
1616 ofs = 0;
1617 if (chipnum == cfi->numchips)
1618 return 0;
1619 }
1620 }
1621
1622 /* Write buffer is worth it only if more than one word to write... */
1623 while (len >= map_bankwidth(map) * 2) {
1624 /* We must not cross write block boundaries */
1625 int size = wbufsize - (ofs & (wbufsize-1));
1626
1627 if (size > len)
1628 size = len;
1629 if (size % map_bankwidth(map))
1630 size -= size % map_bankwidth(map);
1631
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001632 ret = do_write_buffer(map, &cfi->chips[chipnum],
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 ofs, buf, size);
1634 if (ret)
1635 return ret;
1636
1637 ofs += size;
1638 buf += size;
1639 (*retlen) += size;
1640 len -= size;
1641
1642 if (ofs >> cfi->chipshift) {
Thomas Gleixner1f948b42005-11-07 11:15:37 +00001643 chipnum ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 ofs = 0;
1645 if (chipnum == cfi->numchips)
1646 return 0;
1647 }
1648 }
1649
1650 if (len) {
1651 size_t retlen_dregs = 0;
1652
1653 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1654 len, &retlen_dregs, buf);
1655
1656 *retlen += retlen_dregs;
1657 return ret;
1658 }
1659
1660 return 0;
1661}
1662
Ira W. Snyder30ec5a22012-01-06 11:29:19 -08001663/*
1664 * Wait for the flash chip to become ready to write data
1665 *
1666 * This is only called during the panic_write() path. When panic_write()
1667 * is called, the kernel is in the process of a panic, and will soon be
1668 * dead. Therefore we don't take any locks, and attempt to get access
1669 * to the chip as soon as possible.
1670 */
1671static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
1672 unsigned long adr)
1673{
1674 struct cfi_private *cfi = map->fldrv_priv;
1675 int retries = 10;
1676 int i;
1677
1678 /*
1679 * If the driver thinks the chip is idle, and no toggle bits
1680 * are changing, then the chip is actually idle for sure.
1681 */
1682 if (chip->state == FL_READY && chip_ready(map, adr))
1683 return 0;
1684
1685 /*
1686 * Try several times to reset the chip and then wait for it
1687 * to become idle. The upper limit of a few milliseconds of
1688 * delay isn't a big problem: the kernel is dying anyway. It
1689 * is more important to save the messages.
1690 */
1691 while (retries > 0) {
1692 const unsigned long timeo = (HZ / 1000) + 1;
1693
1694 /* send the reset command */
1695 map_write(map, CMD(0xF0), chip->start);
1696
1697 /* wait for the chip to become ready */
1698 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
1699 if (chip_ready(map, adr))
1700 return 0;
1701
1702 udelay(1);
1703 }
1704 }
1705
1706 /* the chip never became ready */
1707 return -EBUSY;
1708}
1709
1710/*
1711 * Write out one word of data to a single flash chip during a kernel panic
1712 *
1713 * This is only called during the panic_write() path. When panic_write()
1714 * is called, the kernel is in the process of a panic, and will soon be
1715 * dead. Therefore we don't take any locks, and attempt to get access
1716 * to the chip as soon as possible.
1717 *
1718 * The implementation of this routine is intentionally similar to
1719 * do_write_oneword(), in order to ease code maintenance.
1720 */
1721static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
1722 unsigned long adr, map_word datum)
1723{
1724 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
1725 struct cfi_private *cfi = map->fldrv_priv;
1726 int retry_cnt = 0;
1727 map_word oldd;
1728 int ret = 0;
1729 int i;
1730
1731 adr += chip->start;
1732
1733 ret = cfi_amdstd_panic_wait(map, chip, adr);
1734 if (ret)
1735 return ret;
1736
1737 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
1738 __func__, adr, datum.x[0]);
1739
1740 /*
1741 * Check for a NOP for the case when the datum to write is already
1742 * present - it saves time and works around buggy chips that corrupt
1743 * data at other locations when 0xff is written to a location that
1744 * already contains 0xff.
1745 */
1746 oldd = map_read(map, adr);
1747 if (map_word_equal(map, oldd, datum)) {
1748 pr_debug("MTD %s(): NOP\n", __func__);
1749 goto op_done;
1750 }
1751
1752 ENABLE_VPP(map);
1753
1754retry:
1755 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1756 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1757 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1758 map_write(map, datum, adr);
1759
1760 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
1761 if (chip_ready(map, adr))
1762 break;
1763
1764 udelay(1);
1765 }
1766
1767 if (!chip_good(map, adr, datum)) {
1768 /* reset on all failures. */
1769 map_write(map, CMD(0xF0), chip->start);
1770 /* FIXME - should have reset delay before continuing */
1771
1772 if (++retry_cnt <= MAX_WORD_RETRIES)
1773 goto retry;
1774
1775 ret = -EIO;
1776 }
1777
1778op_done:
1779 DISABLE_VPP(map);
1780 return ret;
1781}
1782
1783/*
1784 * Write out some data during a kernel panic
1785 *
1786 * This is used by the mtdoops driver to save the dying messages from a
1787 * kernel which has panic'd.
1788 *
1789 * This routine ignores all of the locking used throughout the rest of the
1790 * driver, in order to ensure that the data gets written out no matter what
1791 * state this driver (and the flash chip itself) was in when the kernel crashed.
1792 *
1793 * The implementation of this routine is intentionally similar to
1794 * cfi_amdstd_write_words(), in order to ease code maintenance.
1795 */
1796static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1797 size_t *retlen, const u_char *buf)
1798{
1799 struct map_info *map = mtd->priv;
1800 struct cfi_private *cfi = map->fldrv_priv;
1801 unsigned long ofs, chipstart;
1802 int ret = 0;
1803 int chipnum;
1804
Ira W. Snyder30ec5a22012-01-06 11:29:19 -08001805 chipnum = to >> cfi->chipshift;
1806 ofs = to - (chipnum << cfi->chipshift);
1807 chipstart = cfi->chips[chipnum].start;
1808
1809 /* If it's not bus aligned, do the first byte write */
1810 if (ofs & (map_bankwidth(map) - 1)) {
1811 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
1812 int i = ofs - bus_ofs;
1813 int n = 0;
1814 map_word tmp_buf;
1815
1816 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
1817 if (ret)
1818 return ret;
1819
1820 /* Load 'tmp_buf' with old contents of flash */
1821 tmp_buf = map_read(map, bus_ofs + chipstart);
1822
1823 /* Number of bytes to copy from buffer */
1824 n = min_t(int, len, map_bankwidth(map) - i);
1825
1826 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1827
1828 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1829 bus_ofs, tmp_buf);
1830 if (ret)
1831 return ret;
1832
1833 ofs += n;
1834 buf += n;
1835 (*retlen) += n;
1836 len -= n;
1837
1838 if (ofs >> cfi->chipshift) {
1839 chipnum++;
1840 ofs = 0;
1841 if (chipnum == cfi->numchips)
1842 return 0;
1843 }
1844 }
1845
1846 /* We are now aligned, write as much as possible */
1847 while (len >= map_bankwidth(map)) {
1848 map_word datum;
1849
1850 datum = map_word_load(map, buf);
1851
1852 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1853 ofs, datum);
1854 if (ret)
1855 return ret;
1856
1857 ofs += map_bankwidth(map);
1858 buf += map_bankwidth(map);
1859 (*retlen) += map_bankwidth(map);
1860 len -= map_bankwidth(map);
1861
1862 if (ofs >> cfi->chipshift) {
1863 chipnum++;
1864 ofs = 0;
1865 if (chipnum == cfi->numchips)
1866 return 0;
1867
1868 chipstart = cfi->chips[chipnum].start;
1869 }
1870 }
1871
1872 /* Write the trailing bytes if any */
1873 if (len & (map_bankwidth(map) - 1)) {
1874 map_word tmp_buf;
1875
1876 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
1877 if (ret)
1878 return ret;
1879
1880 tmp_buf = map_read(map, ofs + chipstart);
1881
1882 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1883
1884 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1885 ofs, tmp_buf);
1886 if (ret)
1887 return ret;
1888
1889 (*retlen) += len;
1890 }
1891
1892 return 0;
1893}
1894
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
1896/*
1897 * Handle devices with one erase region, that only implement
1898 * the chip erase command.
1899 */
Todd Poynor02b15e32005-06-07 00:04:39 +01001900static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901{
1902 struct cfi_private *cfi = map->fldrv_priv;
1903 unsigned long timeo = jiffies + HZ;
1904 unsigned long int adr;
1905 DECLARE_WAITQUEUE(wait, current);
1906 int ret = 0;
1907
1908 adr = cfi->addr_unlock1;
1909
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001910 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 ret = get_chip(map, chip, adr, FL_WRITING);
1912 if (ret) {
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001913 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 return ret;
1915 }
1916
Brian Norris289c0522011-07-19 10:06:09 -07001917 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 __func__, chip->start );
1919
Todd Poynor02b15e32005-06-07 00:04:39 +01001920 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 ENABLE_VPP(map);
Todd Poynor02b15e32005-06-07 00:04:39 +01001922 xip_disable(map, chip, adr);
1923
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1925 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1926 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1927 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1928 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1929 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1930
1931 chip->state = FL_ERASING;
1932 chip->erase_suspended = 0;
1933 chip->in_progress_block_addr = adr;
1934
Todd Poynor02b15e32005-06-07 00:04:39 +01001935 INVALIDATE_CACHE_UDELAY(map, chip,
1936 adr, map->size,
1937 chip->erase_time*500);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938
1939 timeo = jiffies + (HZ*20);
1940
1941 for (;;) {
1942 if (chip->state != FL_ERASING) {
1943 /* Someone's suspended the erase. Sleep */
1944 set_current_state(TASK_UNINTERRUPTIBLE);
1945 add_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001946 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 schedule();
1948 remove_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001949 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 continue;
1951 }
1952 if (chip->erase_suspended) {
1953 /* This erase was suspended and resumed.
1954 Adjust the timeout */
1955 timeo = jiffies + (HZ*20); /* FIXME */
1956 chip->erase_suspended = 0;
1957 }
1958
1959 if (chip_ready(map, adr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 break;
1961
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01001962 if (time_after(jiffies, timeo)) {
1963 printk(KERN_WARNING "MTD %s(): software timeout\n",
1964 __func__ );
1965 break;
1966 }
1967
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 /* Latency issues. Drop the lock, wait a while and retry */
Todd Poynor02b15e32005-06-07 00:04:39 +01001969 UDELAY(map, chip, adr, 1000000/HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 }
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01001971 /* Did we succeed? */
1972 if (!chip_good(map, adr, map_word_ff(map))) {
1973 /* reset on all failures. */
1974 map_write( map, CMD(0xF0), chip->start );
1975 /* FIXME - should have reset delay before continuing */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01001977 ret = -EIO;
1978 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 chip->state = FL_READY;
Todd Poynor02b15e32005-06-07 00:04:39 +01001981 xip_enable(map, chip, adr);
Paul Parsonse7d93772012-03-07 14:11:16 +00001982 DISABLE_VPP(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 put_chip(map, chip, adr);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001984 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985
1986 return ret;
1987}
1988
1989
Todd Poynor02b15e32005-06-07 00:04:39 +01001990static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991{
1992 struct cfi_private *cfi = map->fldrv_priv;
1993 unsigned long timeo = jiffies + HZ;
1994 DECLARE_WAITQUEUE(wait, current);
1995 int ret = 0;
1996
1997 adr += chip->start;
1998
Stefani Seiboldc4e77372010-04-18 22:46:44 +02001999 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 ret = get_chip(map, chip, adr, FL_ERASING);
2001 if (ret) {
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002002 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 return ret;
2004 }
2005
Brian Norris289c0522011-07-19 10:06:09 -07002006 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 __func__, adr );
2008
Todd Poynor02b15e32005-06-07 00:04:39 +01002009 XIP_INVAL_CACHED_RANGE(map, adr, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 ENABLE_VPP(map);
Todd Poynor02b15e32005-06-07 00:04:39 +01002011 xip_disable(map, chip, adr);
2012
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2014 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2015 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2016 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2017 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
Guillaume LECERF08968042010-10-26 10:45:23 +01002018 map_write(map, cfi->sector_erase_cmd, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
2020 chip->state = FL_ERASING;
2021 chip->erase_suspended = 0;
2022 chip->in_progress_block_addr = adr;
Todd Poynor02b15e32005-06-07 00:04:39 +01002023
2024 INVALIDATE_CACHE_UDELAY(map, chip,
2025 adr, len,
2026 chip->erase_time*500);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027
2028 timeo = jiffies + (HZ*20);
2029
2030 for (;;) {
2031 if (chip->state != FL_ERASING) {
2032 /* Someone's suspended the erase. Sleep */
2033 set_current_state(TASK_UNINTERRUPTIBLE);
2034 add_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002035 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 schedule();
2037 remove_wait_queue(&chip->wq, &wait);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002038 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 continue;
2040 }
2041 if (chip->erase_suspended) {
2042 /* This erase was suspended and resumed.
2043 Adjust the timeout */
2044 timeo = jiffies + (HZ*20); /* FIXME */
2045 chip->erase_suspended = 0;
2046 }
2047
Todd Poynor02b15e32005-06-07 00:04:39 +01002048 if (chip_ready(map, adr)) {
2049 xip_enable(map, chip, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 break;
Todd Poynor02b15e32005-06-07 00:04:39 +01002051 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01002053 if (time_after(jiffies, timeo)) {
Todd Poynor02b15e32005-06-07 00:04:39 +01002054 xip_enable(map, chip, adr);
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01002055 printk(KERN_WARNING "MTD %s(): software timeout\n",
2056 __func__ );
2057 break;
2058 }
2059
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 /* Latency issues. Drop the lock, wait a while and retry */
Todd Poynor02b15e32005-06-07 00:04:39 +01002061 UDELAY(map, chip, adr, 1000000/HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 }
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01002063 /* Did we succeed? */
Thomas Gleixner22fd9a82005-05-24 15:33:49 +02002064 if (!chip_good(map, adr, map_word_ff(map))) {
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01002065 /* reset on all failures. */
2066 map_write( map, CMD(0xF0), chip->start );
2067 /* FIXME - should have reset delay before continuing */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
Eric W. Biedermannfb4a90b2005-05-20 04:28:26 +01002069 ret = -EIO;
2070 }
2071
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 chip->state = FL_READY;
Paul Parsonse7d93772012-03-07 14:11:16 +00002073 DISABLE_VPP(map);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 put_chip(map, chip, adr);
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002075 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 return ret;
2077}
2078
2079
Ben Dooksce0f33a2007-05-28 19:59:00 +01002080static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081{
2082 unsigned long ofs, len;
2083 int ret;
2084
2085 ofs = instr->addr;
2086 len = instr->len;
2087
2088 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
2089 if (ret)
2090 return ret;
2091
2092 instr->state = MTD_ERASE_DONE;
2093 mtd_erase_callback(instr);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002094
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 return 0;
2096}
2097
2098
2099static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2100{
2101 struct map_info *map = mtd->priv;
2102 struct cfi_private *cfi = map->fldrv_priv;
2103 int ret = 0;
2104
2105 if (instr->addr != 0)
2106 return -EINVAL;
2107
2108 if (instr->len != mtd->size)
2109 return -EINVAL;
2110
2111 ret = do_erase_chip(map, &cfi->chips[0]);
2112 if (ret)
2113 return ret;
2114
2115 instr->state = MTD_ERASE_DONE;
2116 mtd_erase_callback(instr);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002117
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 return 0;
2119}
2120
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002121static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2122 unsigned long adr, int len, void *thunk)
2123{
2124 struct cfi_private *cfi = map->fldrv_priv;
2125 int ret;
2126
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002127 mutex_lock(&chip->mutex);
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002128 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2129 if (ret)
2130 goto out_unlock;
2131 chip->state = FL_LOCKING;
2132
Brian Norris0a32a102011-07-19 10:06:10 -07002133 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002134
2135 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2136 cfi->device_type, NULL);
2137 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2138 cfi->device_type, NULL);
2139 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2140 cfi->device_type, NULL);
2141 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2142 cfi->device_type, NULL);
2143 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2144 cfi->device_type, NULL);
2145 map_write(map, CMD(0x40), chip->start + adr);
2146
2147 chip->state = FL_READY;
2148 put_chip(map, chip, adr + chip->start);
2149 ret = 0;
2150
2151out_unlock:
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002152 mutex_unlock(&chip->mutex);
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002153 return ret;
2154}
2155
2156static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2157 unsigned long adr, int len, void *thunk)
2158{
2159 struct cfi_private *cfi = map->fldrv_priv;
2160 int ret;
2161
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002162 mutex_lock(&chip->mutex);
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002163 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2164 if (ret)
2165 goto out_unlock;
2166 chip->state = FL_UNLOCKING;
2167
Brian Norris0a32a102011-07-19 10:06:10 -07002168 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002169
2170 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2171 cfi->device_type, NULL);
2172 map_write(map, CMD(0x70), adr);
2173
2174 chip->state = FL_READY;
2175 put_chip(map, chip, adr + chip->start);
2176 ret = 0;
2177
2178out_unlock:
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002179 mutex_unlock(&chip->mutex);
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002180 return ret;
2181}
2182
Adrian Hunter69423d92008-12-10 13:37:21 +00002183static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002184{
2185 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2186}
2187
Adrian Hunter69423d92008-12-10 13:37:21 +00002188static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
Haavard Skinnemoen01655082006-08-09 11:06:07 +02002189{
2190 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2191}
2192
Stefan Roese1648eaa2013-01-18 13:10:05 +01002193/*
2194 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2195 */
2196
2197struct ppb_lock {
2198 struct flchip *chip;
2199 loff_t offset;
2200 int locked;
2201};
2202
2203#define MAX_SECTORS 512
2204
2205#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
2206#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
2207#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
2208
2209static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2210 struct flchip *chip,
2211 unsigned long adr, int len, void *thunk)
2212{
2213 struct cfi_private *cfi = map->fldrv_priv;
2214 unsigned long timeo;
2215 int ret;
2216
2217 mutex_lock(&chip->mutex);
2218 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2219 if (ret) {
2220 mutex_unlock(&chip->mutex);
2221 return ret;
2222 }
2223
2224 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2225
2226 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2227 cfi->device_type, NULL);
2228 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2229 cfi->device_type, NULL);
2230 /* PPB entry command */
2231 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2232 cfi->device_type, NULL);
2233
2234 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2235 chip->state = FL_LOCKING;
2236 map_write(map, CMD(0xA0), chip->start + adr);
2237 map_write(map, CMD(0x00), chip->start + adr);
2238 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2239 /*
2240 * Unlocking of one specific sector is not supported, so we
2241 * have to unlock all sectors of this device instead
2242 */
2243 chip->state = FL_UNLOCKING;
2244 map_write(map, CMD(0x80), chip->start);
2245 map_write(map, CMD(0x30), chip->start);
2246 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2247 chip->state = FL_JEDEC_QUERY;
2248 /* Return locked status: 0->locked, 1->unlocked */
2249 ret = !cfi_read_query(map, adr);
2250 } else
2251 BUG();
2252
2253 /*
2254 * Wait for some time as unlocking of all sectors takes quite long
2255 */
2256 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
2257 for (;;) {
2258 if (chip_ready(map, adr))
2259 break;
2260
2261 if (time_after(jiffies, timeo)) {
2262 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2263 ret = -EIO;
2264 break;
2265 }
2266
2267 UDELAY(map, chip, adr, 1);
2268 }
2269
2270 /* Exit BC commands */
2271 map_write(map, CMD(0x90), chip->start);
2272 map_write(map, CMD(0x00), chip->start);
2273
2274 chip->state = FL_READY;
2275 put_chip(map, chip, adr + chip->start);
2276 mutex_unlock(&chip->mutex);
2277
2278 return ret;
2279}
2280
2281static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2282 uint64_t len)
2283{
2284 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2285 DO_XXLOCK_ONEBLOCK_LOCK);
2286}
2287
2288static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2289 uint64_t len)
2290{
2291 struct mtd_erase_region_info *regions = mtd->eraseregions;
2292 struct map_info *map = mtd->priv;
2293 struct cfi_private *cfi = map->fldrv_priv;
2294 struct ppb_lock *sect;
2295 unsigned long adr;
2296 loff_t offset;
2297 uint64_t length;
2298 int chipnum;
2299 int i;
2300 int sectors;
2301 int ret;
2302
2303 /*
2304 * PPB unlocking always unlocks all sectors of the flash chip.
2305 * We need to re-lock all previously locked sectors. So lets
2306 * first check the locking status of all sectors and save
2307 * it for future use.
2308 */
2309 sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL);
2310 if (!sect)
2311 return -ENOMEM;
2312
2313 /*
2314 * This code to walk all sectors is a slightly modified version
2315 * of the cfi_varsize_frob() code.
2316 */
2317 i = 0;
2318 chipnum = 0;
2319 adr = 0;
2320 sectors = 0;
2321 offset = 0;
2322 length = mtd->size;
2323
2324 while (length) {
2325 int size = regions[i].erasesize;
2326
2327 /*
2328 * Only test sectors that shall not be unlocked. The other
2329 * sectors shall be unlocked, so lets keep their locking
2330 * status at "unlocked" (locked=0) for the final re-locking.
2331 */
2332 if ((adr < ofs) || (adr >= (ofs + len))) {
2333 sect[sectors].chip = &cfi->chips[chipnum];
2334 sect[sectors].offset = offset;
2335 sect[sectors].locked = do_ppb_xxlock(
2336 map, &cfi->chips[chipnum], adr, 0,
2337 DO_XXLOCK_ONEBLOCK_GETLOCK);
2338 }
2339
2340 adr += size;
2341 offset += size;
2342 length -= size;
2343
2344 if (offset == regions[i].offset + size * regions[i].numblocks)
2345 i++;
2346
2347 if (adr >> cfi->chipshift) {
2348 adr = 0;
2349 chipnum++;
2350
2351 if (chipnum >= cfi->numchips)
2352 break;
2353 }
2354
2355 sectors++;
2356 if (sectors >= MAX_SECTORS) {
2357 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2358 MAX_SECTORS);
2359 kfree(sect);
2360 return -EINVAL;
2361 }
2362 }
2363
2364 /* Now unlock the whole chip */
2365 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2366 DO_XXLOCK_ONEBLOCK_UNLOCK);
2367 if (ret) {
2368 kfree(sect);
2369 return ret;
2370 }
2371
2372 /*
2373 * PPB unlocking always unlocks all sectors of the flash chip.
2374 * We need to re-lock all previously locked sectors.
2375 */
2376 for (i = 0; i < sectors; i++) {
2377 if (sect[i].locked)
2378 do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
2379 DO_XXLOCK_ONEBLOCK_LOCK);
2380 }
2381
2382 kfree(sect);
2383 return ret;
2384}
2385
2386static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2387 uint64_t len)
2388{
2389 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2390 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2391}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392
2393static void cfi_amdstd_sync (struct mtd_info *mtd)
2394{
2395 struct map_info *map = mtd->priv;
2396 struct cfi_private *cfi = map->fldrv_priv;
2397 int i;
2398 struct flchip *chip;
2399 int ret = 0;
2400 DECLARE_WAITQUEUE(wait, current);
2401
2402 for (i=0; !ret && i<cfi->numchips; i++) {
2403 chip = &cfi->chips[i];
2404
2405 retry:
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002406 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407
2408 switch(chip->state) {
2409 case FL_READY:
2410 case FL_STATUS:
2411 case FL_CFI_QUERY:
2412 case FL_JEDEC_QUERY:
2413 chip->oldstate = chip->state;
2414 chip->state = FL_SYNCING;
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002415 /* No need to wake_up() on this state change -
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 * as the whole point is that nobody can do anything
2417 * with the chip now anyway.
2418 */
2419 case FL_SYNCING:
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002420 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 break;
2422
2423 default:
2424 /* Not an idle state */
Dmitry Adamushkof8e30e42008-04-08 17:41:59 -07002425 set_current_state(TASK_UNINTERRUPTIBLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 add_wait_queue(&chip->wq, &wait);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002427
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002428 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429
2430 schedule();
2431
2432 remove_wait_queue(&chip->wq, &wait);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002433
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 goto retry;
2435 }
2436 }
2437
2438 /* Unlock the chips again */
2439
2440 for (i--; i >=0; i--) {
2441 chip = &cfi->chips[i];
2442
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002443 mutex_lock(&chip->mutex);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002444
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 if (chip->state == FL_SYNCING) {
2446 chip->state = chip->oldstate;
2447 wake_up(&chip->wq);
2448 }
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002449 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 }
2451}
2452
2453
2454static int cfi_amdstd_suspend(struct mtd_info *mtd)
2455{
2456 struct map_info *map = mtd->priv;
2457 struct cfi_private *cfi = map->fldrv_priv;
2458 int i;
2459 struct flchip *chip;
2460 int ret = 0;
2461
2462 for (i=0; !ret && i<cfi->numchips; i++) {
2463 chip = &cfi->chips[i];
2464
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002465 mutex_lock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466
2467 switch(chip->state) {
2468 case FL_READY:
2469 case FL_STATUS:
2470 case FL_CFI_QUERY:
2471 case FL_JEDEC_QUERY:
2472 chip->oldstate = chip->state;
2473 chip->state = FL_PM_SUSPENDED;
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002474 /* No need to wake_up() on this state change -
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 * as the whole point is that nobody can do anything
2476 * with the chip now anyway.
2477 */
2478 case FL_PM_SUSPENDED:
2479 break;
2480
2481 default:
2482 ret = -EAGAIN;
2483 break;
2484 }
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002485 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 }
2487
2488 /* Unlock the chips again */
2489
2490 if (ret) {
2491 for (i--; i >=0; i--) {
2492 chip = &cfi->chips[i];
2493
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002494 mutex_lock(&chip->mutex);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002495
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 if (chip->state == FL_PM_SUSPENDED) {
2497 chip->state = chip->oldstate;
2498 wake_up(&chip->wq);
2499 }
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002500 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 }
2502 }
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002503
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 return ret;
2505}
2506
2507
2508static void cfi_amdstd_resume(struct mtd_info *mtd)
2509{
2510 struct map_info *map = mtd->priv;
2511 struct cfi_private *cfi = map->fldrv_priv;
2512 int i;
2513 struct flchip *chip;
2514
2515 for (i=0; i<cfi->numchips; i++) {
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002516
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 chip = &cfi->chips[i];
2518
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002519 mutex_lock(&chip->mutex);
Thomas Gleixner1f948b42005-11-07 11:15:37 +00002520
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 if (chip->state == FL_PM_SUSPENDED) {
2522 chip->state = FL_READY;
2523 map_write(map, CMD(0xF0), chip->start);
2524 wake_up(&chip->wq);
2525 }
2526 else
2527 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2528
Stefani Seiboldc4e77372010-04-18 22:46:44 +02002529 mutex_unlock(&chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 }
2531}
2532
Kevin Cernekeeeafe1312010-04-29 10:26:56 -07002533
2534/*
2535 * Ensure that the flash device is put back into read array mode before
2536 * unloading the driver or rebooting. On some systems, rebooting while
2537 * the flash is in query/program/erase mode will prevent the CPU from
2538 * fetching the bootloader code, requiring a hard reset or power cycle.
2539 */
2540static int cfi_amdstd_reset(struct mtd_info *mtd)
2541{
2542 struct map_info *map = mtd->priv;
2543 struct cfi_private *cfi = map->fldrv_priv;
2544 int i, ret;
2545 struct flchip *chip;
2546
2547 for (i = 0; i < cfi->numchips; i++) {
2548
2549 chip = &cfi->chips[i];
2550
2551 mutex_lock(&chip->mutex);
2552
2553 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2554 if (!ret) {
2555 map_write(map, CMD(0xF0), chip->start);
2556 chip->state = FL_SHUTDOWN;
2557 put_chip(map, chip, chip->start);
2558 }
2559
2560 mutex_unlock(&chip->mutex);
2561 }
2562
2563 return 0;
2564}
2565
2566
2567static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2568 void *v)
2569{
2570 struct mtd_info *mtd;
2571
2572 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2573 cfi_amdstd_reset(mtd);
2574 return NOTIFY_DONE;
2575}
2576
2577
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578static void cfi_amdstd_destroy(struct mtd_info *mtd)
2579{
2580 struct map_info *map = mtd->priv;
2581 struct cfi_private *cfi = map->fldrv_priv;
Jesper Juhlfa671642005-11-07 01:01:27 -08002582
Kevin Cernekeeeafe1312010-04-29 10:26:56 -07002583 cfi_amdstd_reset(mtd);
2584 unregister_reboot_notifier(&mtd->reboot_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 kfree(cfi->cmdset_priv);
2586 kfree(cfi->cfiq);
2587 kfree(cfi);
2588 kfree(mtd->eraseregions);
2589}
2590
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591MODULE_LICENSE("GPL");
2592MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2593MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
Guillaume LECERF80461122010-05-20 16:54:10 +02002594MODULE_ALIAS("cfi_cmdset_0006");
David Woodhouse1e804ce2010-05-20 16:54:05 +02002595MODULE_ALIAS("cfi_cmdset_0701");