blob: adaad7c8fd46017014d4c1bd54078668c325e55d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
Nicolas Pitree102d542005-08-06 05:46:59 +01007 * $Id: cfi_cmdset_0001.c,v 1.183 2005/08/06 04:46:56 nico Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
Nicolas Pitre963a6fb2005-04-01 02:59:56 +010032#include <linux/reboot.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/mtd/xip.h>
34#include <linux/mtd/map.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/compatmac.h>
37#include <linux/mtd/cfi.h>
38
39/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42// debugging, turns off buffer write mode if set to 1
43#define FORCE_WORD_WRITE 0
44
45#define MANUFACTURER_INTEL 0x0089
46#define I82802AB 0x00ad
47#define I82802AC 0x00ac
48#define MANUFACTURER_ST 0x0020
49#define M50LPW080 0x002F
50
51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
Nicolas Pitree102d542005-08-06 05:46:59 +010054static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56static void cfi_intelext_sync (struct mtd_info *);
57static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
Todd Poynor8048d2f2005-03-31 00:57:33 +010059#ifdef CONFIG_MTD_OTP
Nicolas Pitref77814d2005-02-08 17:11:19 +000060static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65 struct otp_info *, size_t);
66static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67 struct otp_info *, size_t);
Todd Poynor8048d2f2005-03-31 00:57:33 +010068#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070069static int cfi_intelext_suspend (struct mtd_info *);
70static void cfi_intelext_resume (struct mtd_info *);
Nicolas Pitre963a6fb2005-04-01 02:59:56 +010071static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
73static void cfi_intelext_destroy(struct mtd_info *);
74
75struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76
77static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79
80static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81 size_t *retlen, u_char **mtdbuf);
82static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83 size_t len);
84
85static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87#include "fwh_lock.h"
88
89
90
91/*
92 * *********** SETUP AND PROBE BITS ***********
93 */
94
95static struct mtd_chip_driver cfi_intelext_chipdrv = {
96 .probe = NULL, /* Not usable directly */
97 .destroy = cfi_intelext_destroy,
98 .name = "cfi_cmdset_0001",
99 .module = THIS_MODULE
100};
101
102/* #define DEBUG_LOCK_BITS */
103/* #define DEBUG_CFI_FEATURES */
104
105#ifdef DEBUG_CFI_FEATURES
106static void cfi_tell_features(struct cfi_pri_intelext *extp)
107{
108 int i;
Nicolas Pitre638d9832005-08-06 05:40:46 +0100109 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
111 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
Nicolas Pitre638d9832005-08-06 05:40:46 +0100121 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122 for (i=11; i<32; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 if (extp->FeatureSupport & (1<<i))
124 printk(" - Unknown Bit %X: supported\n", i);
125 }
126
127 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129 for (i=1; i<8; i++) {
130 if (extp->SuspendCmdSupport & (1<<i))
131 printk(" - Unknown Bit %X: supported\n", i);
132 }
133
134 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
Nicolas Pitre638d9832005-08-06 05:40:46 +0100136 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137 for (i=2; i<3; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 if (extp->BlkStatusRegMask & (1<<i))
139 printk(" - Unknown Bit %X Active: yes\n",i);
140 }
Nicolas Pitre638d9832005-08-06 05:40:46 +0100141 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143 for (i=6; i<16; i++) {
144 if (extp->BlkStatusRegMask & (1<<i))
145 printk(" - Unknown Bit %X Active: yes\n",i);
146 }
147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150 if (extp->VppOptimal)
151 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153}
154#endif
155
156#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159{
160 struct map_info *map = mtd->priv;
161 struct cfi_private *cfi = map->fldrv_priv;
162 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163
164 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165 "erase on write disabled.\n");
166 extp->SuspendCmdSupport &= ~1;
167}
168#endif
169
170#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172{
173 struct map_info *map = mtd->priv;
174 struct cfi_private *cfi = map->fldrv_priv;
175 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176
177 if (cfip && (cfip->FeatureSupport&4)) {
178 cfip->FeatureSupport &= ~4;
179 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180 }
181}
182#endif
183
184static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185{
186 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv;
188
189 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
190 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
191}
192
193static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194{
195 struct map_info *map = mtd->priv;
196 struct cfi_private *cfi = map->fldrv_priv;
197
198 /* Note this is done after the region info is endian swapped */
199 cfi->cfiq->EraseRegionInfo[1] =
200 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201};
202
203static void fixup_use_point(struct mtd_info *mtd, void *param)
204{
205 struct map_info *map = mtd->priv;
206 if (!mtd->point && map_is_linear(map)) {
207 mtd->point = cfi_intelext_point;
208 mtd->unpoint = cfi_intelext_unpoint;
209 }
210}
211
212static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213{
214 struct map_info *map = mtd->priv;
215 struct cfi_private *cfi = map->fldrv_priv;
216 if (cfi->cfiq->BufWriteTimeoutTyp) {
217 printk(KERN_INFO "Using buffer write method\n" );
218 mtd->write = cfi_intelext_write_buffers;
Nicolas Pitree102d542005-08-06 05:46:59 +0100219 mtd->writev = cfi_intelext_writev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 }
221}
222
223static struct cfi_fixup cfi_fixup_table[] = {
224#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
226#endif
227#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229#endif
230#if !FORCE_WORD_WRITE
231 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232#endif
233 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235 { 0, 0, NULL, NULL }
236};
237
238static struct cfi_fixup jedec_fixup_table[] = {
239 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
240 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
241 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
242 { 0, 0, NULL, NULL }
243};
244static struct cfi_fixup fixup_table[] = {
245 /* The CFI vendor ids and the JEDEC vendor IDs appear
246 * to be common. It is like the devices id's are as
247 * well. This table is to pick all cases where
248 * we know that is the case.
249 */
250 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251 { 0, 0, NULL, NULL }
252};
253
254static inline struct cfi_pri_intelext *
255read_pri_intelext(struct map_info *map, __u16 adr)
256{
257 struct cfi_pri_intelext *extp;
258 unsigned int extp_size = sizeof(*extp);
259
260 again:
261 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262 if (!extp)
263 return NULL;
264
Todd Poynord88f9772005-07-20 22:01:17 +0100265 if (extp->MajorVersion != '1' ||
Nicolas Pitre638d9832005-08-06 05:40:46 +0100266 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
Todd Poynord88f9772005-07-20 22:01:17 +0100267 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
268 "version %c.%c.\n", extp->MajorVersion,
269 extp->MinorVersion);
270 kfree(extp);
271 return NULL;
272 }
273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 /* Do some byteswapping if necessary */
275 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278
Nicolas Pitre638d9832005-08-06 05:40:46 +0100279 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 unsigned int extra_size = 0;
281 int nb_parts, i;
282
283 /* Protection Register info */
Nicolas Pitre72b56a22005-02-05 02:06:19 +0000284 extra_size += (extp->NumProtectionFields - 1) *
285 sizeof(struct cfi_intelext_otpinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287 /* Burst Read info */
Nicolas Pitre638d9832005-08-06 05:40:46 +0100288 extra_size += (extp->MinorVersion < '4') ? 6 : 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
290 /* Number of hardware-partitions */
291 extra_size += 1;
292 if (extp_size < sizeof(*extp) + extra_size)
293 goto need_more;
294 nb_parts = extp->extra[extra_size - 1];
295
Nicolas Pitre638d9832005-08-06 05:40:46 +0100296 /* skip the sizeof(partregion) field in CFI 1.4 */
297 if (extp->MinorVersion >= '4')
298 extra_size += 2;
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 for (i = 0; i < nb_parts; i++) {
301 struct cfi_intelext_regioninfo *rinfo;
302 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
303 extra_size += sizeof(*rinfo);
304 if (extp_size < sizeof(*extp) + extra_size)
305 goto need_more;
306 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
307 extra_size += (rinfo->NumBlockTypes - 1)
308 * sizeof(struct cfi_intelext_blockinfo);
309 }
310
Nicolas Pitre638d9832005-08-06 05:40:46 +0100311 if (extp->MinorVersion >= '4')
312 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 if (extp_size < sizeof(*extp) + extra_size) {
315 need_more:
316 extp_size = sizeof(*extp) + extra_size;
317 kfree(extp);
318 if (extp_size > 4096) {
319 printk(KERN_ERR
320 "%s: cfi_pri_intelext is too fat\n",
321 __FUNCTION__);
322 return NULL;
323 }
324 goto again;
325 }
326 }
327
328 return extp;
329}
330
331/* This routine is made available to other mtd code via
332 * inter_module_register. It must only be accessed through
333 * inter_module_get which will bump the use count of this module. The
334 * addresses passed back in cfi are valid as long as the use count of
335 * this module is non-zero, i.e. between inter_module_get and
336 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
337 */
338struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
339{
340 struct cfi_private *cfi = map->fldrv_priv;
341 struct mtd_info *mtd;
342 int i;
343
344 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
345 if (!mtd) {
346 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
347 return NULL;
348 }
349 memset(mtd, 0, sizeof(*mtd));
350 mtd->priv = map;
351 mtd->type = MTD_NORFLASH;
352
353 /* Fill in the default mtd operations */
354 mtd->erase = cfi_intelext_erase_varsize;
355 mtd->read = cfi_intelext_read;
356 mtd->write = cfi_intelext_write_words;
357 mtd->sync = cfi_intelext_sync;
358 mtd->lock = cfi_intelext_lock;
359 mtd->unlock = cfi_intelext_unlock;
360 mtd->suspend = cfi_intelext_suspend;
361 mtd->resume = cfi_intelext_resume;
362 mtd->flags = MTD_CAP_NORFLASH;
363 mtd->name = map->name;
Nicolas Pitre963a6fb2005-04-01 02:59:56 +0100364
365 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 if (cfi->cfi_mode == CFI_MODE_CFI) {
368 /*
369 * It's a real CFI chip, not one for which the probe
370 * routine faked a CFI structure. So we read the feature
371 * table from it.
372 */
373 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
374 struct cfi_pri_intelext *extp;
375
376 extp = read_pri_intelext(map, adr);
377 if (!extp) {
378 kfree(mtd);
379 return NULL;
380 }
381
382 /* Install our own private info structure */
383 cfi->cmdset_priv = extp;
384
385 cfi_fixup(mtd, cfi_fixup_table);
386
387#ifdef DEBUG_CFI_FEATURES
388 /* Tell the user about it in lots of lovely detail */
389 cfi_tell_features(extp);
390#endif
391
392 if(extp->SuspendCmdSupport & 1) {
393 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
394 }
395 }
396 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
397 /* Apply jedec specific fixups */
398 cfi_fixup(mtd, jedec_fixup_table);
399 }
400 /* Apply generic fixups */
401 cfi_fixup(mtd, fixup_table);
402
403 for (i=0; i< cfi->numchips; i++) {
404 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
405 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
406 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
407 cfi->chips[i].ref_point_counter = 0;
408 }
409
410 map->fldrv = &cfi_intelext_chipdrv;
411
412 return cfi_intelext_setup(mtd);
413}
414
415static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
416{
417 struct map_info *map = mtd->priv;
418 struct cfi_private *cfi = map->fldrv_priv;
419 unsigned long offset = 0;
420 int i,j;
421 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
422
423 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
424
425 mtd->size = devsize * cfi->numchips;
426
427 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
428 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
429 * mtd->numeraseregions, GFP_KERNEL);
430 if (!mtd->eraseregions) {
431 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
432 goto setup_err;
433 }
434
435 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
436 unsigned long ernum, ersize;
437 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
438 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
439
440 if (mtd->erasesize < ersize) {
441 mtd->erasesize = ersize;
442 }
443 for (j=0; j<cfi->numchips; j++) {
444 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
445 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
446 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
447 }
448 offset += (ersize * ernum);
449 }
450
451 if (offset != devsize) {
452 /* Argh */
453 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
454 goto setup_err;
455 }
456
457 for (i=0; i<mtd->numeraseregions;i++){
Nicolas Pitre48436532005-08-06 05:16:52 +0100458 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 i,mtd->eraseregions[i].offset,
460 mtd->eraseregions[i].erasesize,
461 mtd->eraseregions[i].numblocks);
462 }
463
Nicolas Pitref77814d2005-02-08 17:11:19 +0000464#ifdef CONFIG_MTD_OTP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
Nicolas Pitref77814d2005-02-08 17:11:19 +0000466 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
467 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
468 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
469 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
470 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471#endif
472
473 /* This function has the potential to distort the reality
474 a bit and therefore should be called last. */
475 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
476 goto setup_err;
477
478 __module_get(THIS_MODULE);
Nicolas Pitre963a6fb2005-04-01 02:59:56 +0100479 register_reboot_notifier(&mtd->reboot_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 return mtd;
481
482 setup_err:
483 if(mtd) {
484 if(mtd->eraseregions)
485 kfree(mtd->eraseregions);
486 kfree(mtd);
487 }
488 kfree(cfi->cmdset_priv);
489 return NULL;
490}
491
492static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
493 struct cfi_private **pcfi)
494{
495 struct map_info *map = mtd->priv;
496 struct cfi_private *cfi = *pcfi;
497 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
498
499 /*
500 * Probing of multi-partition flash ships.
501 *
502 * To support multiple partitions when available, we simply arrange
503 * for each of them to have their own flchip structure even if they
504 * are on the same physical chip. This means completely recreating
505 * a new cfi_private structure right here which is a blatent code
506 * layering violation, but this is still the least intrusive
507 * arrangement at this point. This can be rearranged in the future
508 * if someone feels motivated enough. --nico
509 */
Nicolas Pitre638d9832005-08-06 05:40:46 +0100510 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 && extp->FeatureSupport & (1 << 9)) {
512 struct cfi_private *newcfi;
513 struct flchip *chip;
514 struct flchip_shared *shared;
515 int offs, numregions, numparts, partshift, numvirtchips, i, j;
516
517 /* Protection Register info */
Nicolas Pitre72b56a22005-02-05 02:06:19 +0000518 offs = (extp->NumProtectionFields - 1) *
519 sizeof(struct cfi_intelext_otpinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
521 /* Burst Read info */
Nicolas Pitre638d9832005-08-06 05:40:46 +0100522 offs += (extp->MinorVersion < '4') ? 6 : 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
524 /* Number of partition regions */
525 numregions = extp->extra[offs];
526 offs += 1;
527
Nicolas Pitre638d9832005-08-06 05:40:46 +0100528 /* skip the sizeof(partregion) field in CFI 1.4 */
529 if (extp->MinorVersion >= '4')
530 offs += 2;
531
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 /* Number of hardware partitions */
533 numparts = 0;
534 for (i = 0; i < numregions; i++) {
535 struct cfi_intelext_regioninfo *rinfo;
536 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
537 numparts += rinfo->NumIdentPartitions;
538 offs += sizeof(*rinfo)
539 + (rinfo->NumBlockTypes - 1) *
540 sizeof(struct cfi_intelext_blockinfo);
541 }
542
Nicolas Pitre638d9832005-08-06 05:40:46 +0100543 /* Programming Region info */
544 if (extp->MinorVersion >= '4') {
545 struct cfi_intelext_programming_regioninfo *prinfo;
546 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
547 MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
548 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
549 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
550 mtd->flags |= MTD_PROGRAM_REGIONS;
551 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
552 map->name, MTD_PROGREGION_SIZE(mtd),
553 MTD_PROGREGION_CTRLMODE_VALID(mtd),
554 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
555 }
556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 /*
558 * All functions below currently rely on all chips having
559 * the same geometry so we'll just assume that all hardware
560 * partitions are of the same size too.
561 */
562 partshift = cfi->chipshift - __ffs(numparts);
563
564 if ((1 << partshift) < mtd->erasesize) {
565 printk( KERN_ERR
566 "%s: bad number of hw partitions (%d)\n",
567 __FUNCTION__, numparts);
568 return -EINVAL;
569 }
570
571 numvirtchips = cfi->numchips * numparts;
572 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
573 if (!newcfi)
574 return -ENOMEM;
575 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
576 if (!shared) {
577 kfree(newcfi);
578 return -ENOMEM;
579 }
580 memcpy(newcfi, cfi, sizeof(struct cfi_private));
581 newcfi->numchips = numvirtchips;
582 newcfi->chipshift = partshift;
583
584 chip = &newcfi->chips[0];
585 for (i = 0; i < cfi->numchips; i++) {
586 shared[i].writing = shared[i].erasing = NULL;
587 spin_lock_init(&shared[i].lock);
588 for (j = 0; j < numparts; j++) {
589 *chip = cfi->chips[i];
590 chip->start += j << partshift;
591 chip->priv = &shared[i];
592 /* those should be reset too since
593 they create memory references. */
594 init_waitqueue_head(&chip->wq);
595 spin_lock_init(&chip->_spinlock);
596 chip->mutex = &chip->_spinlock;
597 chip++;
598 }
599 }
600
601 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
602 "--> %d partitions of %d KiB\n",
603 map->name, cfi->numchips, cfi->interleave,
604 newcfi->numchips, 1<<(newcfi->chipshift-10));
605
606 map->fldrv_priv = newcfi;
607 *pcfi = newcfi;
608 kfree(cfi);
609 }
610
611 return 0;
612}
613
614/*
615 * *********** CHIP ACCESS FUNCTIONS ***********
616 */
617
618static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
619{
620 DECLARE_WAITQUEUE(wait, current);
621 struct cfi_private *cfi = map->fldrv_priv;
622 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
623 unsigned long timeo;
624 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
625
626 resettime:
627 timeo = jiffies + HZ;
628 retry:
Nicolas Pitref77814d2005-02-08 17:11:19 +0000629 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 /*
631 * OK. We have possibility for contension on the write/erase
632 * operations which are global to the real chip and not per
633 * partition. So let's fight it over in the partition which
634 * currently has authority on the operation.
635 *
636 * The rules are as follows:
637 *
638 * - any write operation must own shared->writing.
639 *
640 * - any erase operation must own _both_ shared->writing and
641 * shared->erasing.
642 *
643 * - contension arbitration is handled in the owner's context.
644 *
645 * The 'shared' struct can be read when its lock is taken.
646 * However any writes to it can only be made when the current
647 * owner's lock is also held.
648 */
649 struct flchip_shared *shared = chip->priv;
650 struct flchip *contender;
651 spin_lock(&shared->lock);
652 contender = shared->writing;
653 if (contender && contender != chip) {
654 /*
655 * The engine to perform desired operation on this
656 * partition is already in use by someone else.
657 * Let's fight over it in the context of the chip
658 * currently using it. If it is possible to suspend,
659 * that other partition will do just that, otherwise
660 * it'll happily send us to sleep. In any case, when
661 * get_chip returns success we're clear to go ahead.
662 */
663 int ret = spin_trylock(contender->mutex);
664 spin_unlock(&shared->lock);
665 if (!ret)
666 goto retry;
667 spin_unlock(chip->mutex);
668 ret = get_chip(map, contender, contender->start, mode);
669 spin_lock(chip->mutex);
670 if (ret) {
671 spin_unlock(contender->mutex);
672 return ret;
673 }
674 timeo = jiffies + HZ;
675 spin_lock(&shared->lock);
676 }
677
678 /* We now own it */
679 shared->writing = chip;
680 if (mode == FL_ERASING)
681 shared->erasing = chip;
682 if (contender && contender != chip)
683 spin_unlock(contender->mutex);
684 spin_unlock(&shared->lock);
685 }
686
687 switch (chip->state) {
688
689 case FL_STATUS:
690 for (;;) {
691 status = map_read(map, adr);
692 if (map_word_andequal(map, status, status_OK, status_OK))
693 break;
694
695 /* At this point we're fine with write operations
696 in other partitions as they don't conflict. */
697 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
698 break;
699
700 if (time_after(jiffies, timeo)) {
Nicolas Pitre48436532005-08-06 05:16:52 +0100701 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
702 map->name, status.x[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 return -EIO;
704 }
705 spin_unlock(chip->mutex);
706 cfi_udelay(1);
707 spin_lock(chip->mutex);
708 /* Someone else might have been playing with it. */
709 goto retry;
710 }
711
712 case FL_READY:
713 case FL_CFI_QUERY:
714 case FL_JEDEC_QUERY:
715 return 0;
716
717 case FL_ERASING:
718 if (!cfip ||
719 !(cfip->FeatureSupport & 2) ||
720 !(mode == FL_READY || mode == FL_POINT ||
721 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
722 goto sleep;
723
724
725 /* Erase suspend */
726 map_write(map, CMD(0xB0), adr);
727
728 /* If the flash has finished erasing, then 'erase suspend'
729 * appears to make some (28F320) flash devices switch to
730 * 'read' mode. Make sure that we switch to 'read status'
731 * mode so we get the right data. --rmk
732 */
733 map_write(map, CMD(0x70), adr);
734 chip->oldstate = FL_ERASING;
735 chip->state = FL_ERASE_SUSPENDING;
736 chip->erase_suspended = 1;
737 for (;;) {
738 status = map_read(map, adr);
739 if (map_word_andequal(map, status, status_OK, status_OK))
740 break;
741
742 if (time_after(jiffies, timeo)) {
743 /* Urgh. Resume and pretend we weren't here. */
744 map_write(map, CMD(0xd0), adr);
745 /* Make sure we're in 'read status' mode if it had finished */
746 map_write(map, CMD(0x70), adr);
747 chip->state = FL_ERASING;
748 chip->oldstate = FL_READY;
Nicolas Pitre48436532005-08-06 05:16:52 +0100749 printk(KERN_ERR "%s: Chip not ready after erase "
750 "suspended: status = 0x%lx\n", map->name, status.x[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 return -EIO;
752 }
753
754 spin_unlock(chip->mutex);
755 cfi_udelay(1);
756 spin_lock(chip->mutex);
757 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
758 So we can just loop here. */
759 }
760 chip->state = FL_STATUS;
761 return 0;
762
763 case FL_XIP_WHILE_ERASING:
764 if (mode != FL_READY && mode != FL_POINT &&
765 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
766 goto sleep;
767 chip->oldstate = chip->state;
768 chip->state = FL_READY;
769 return 0;
770
771 case FL_POINT:
772 /* Only if there's no operation suspended... */
773 if (mode == FL_READY && chip->oldstate == FL_READY)
774 return 0;
775
776 default:
777 sleep:
778 set_current_state(TASK_UNINTERRUPTIBLE);
779 add_wait_queue(&chip->wq, &wait);
780 spin_unlock(chip->mutex);
781 schedule();
782 remove_wait_queue(&chip->wq, &wait);
783 spin_lock(chip->mutex);
784 goto resettime;
785 }
786}
787
788static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
789{
790 struct cfi_private *cfi = map->fldrv_priv;
791
792 if (chip->priv) {
793 struct flchip_shared *shared = chip->priv;
794 spin_lock(&shared->lock);
795 if (shared->writing == chip && chip->oldstate == FL_READY) {
796 /* We own the ability to write, but we're done */
797 shared->writing = shared->erasing;
798 if (shared->writing && shared->writing != chip) {
799 /* give back ownership to who we loaned it from */
800 struct flchip *loaner = shared->writing;
801 spin_lock(loaner->mutex);
802 spin_unlock(&shared->lock);
803 spin_unlock(chip->mutex);
804 put_chip(map, loaner, loaner->start);
805 spin_lock(chip->mutex);
806 spin_unlock(loaner->mutex);
807 wake_up(&chip->wq);
808 return;
809 }
810 shared->erasing = NULL;
811 shared->writing = NULL;
812 } else if (shared->erasing == chip && shared->writing != chip) {
813 /*
814 * We own the ability to erase without the ability
815 * to write, which means the erase was suspended
816 * and some other partition is currently writing.
817 * Don't let the switch below mess things up since
818 * we don't have ownership to resume anything.
819 */
820 spin_unlock(&shared->lock);
821 wake_up(&chip->wq);
822 return;
823 }
824 spin_unlock(&shared->lock);
825 }
826
827 switch(chip->oldstate) {
828 case FL_ERASING:
829 chip->state = chip->oldstate;
830 /* What if one interleaved chip has finished and the
831 other hasn't? The old code would leave the finished
832 one in READY mode. That's bad, and caused -EROFS
833 errors to be returned from do_erase_oneblock because
834 that's the only bit it checked for at the time.
835 As the state machine appears to explicitly allow
836 sending the 0x70 (Read Status) command to an erasing
837 chip and expecting it to be ignored, that's what we
838 do. */
839 map_write(map, CMD(0xd0), adr);
840 map_write(map, CMD(0x70), adr);
841 chip->oldstate = FL_READY;
842 chip->state = FL_ERASING;
843 break;
844
845 case FL_XIP_WHILE_ERASING:
846 chip->state = chip->oldstate;
847 chip->oldstate = FL_READY;
848 break;
849
850 case FL_READY:
851 case FL_STATUS:
852 case FL_JEDEC_QUERY:
853 /* We should really make set_vpp() count, rather than doing this */
854 DISABLE_VPP(map);
855 break;
856 default:
Nicolas Pitre48436532005-08-06 05:16:52 +0100857 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 }
859 wake_up(&chip->wq);
860}
861
862#ifdef CONFIG_MTD_XIP
863
864/*
865 * No interrupt what so ever can be serviced while the flash isn't in array
866 * mode. This is ensured by the xip_disable() and xip_enable() functions
867 * enclosing any code path where the flash is known not to be in array mode.
868 * And within a XIP disabled code path, only functions marked with __xipram
869 * may be called and nothing else (it's a good thing to inspect generated
870 * assembly to make sure inline functions were actually inlined and that gcc
871 * didn't emit calls to its own support functions). Also configuring MTD CFI
872 * support to a single buswidth and a single interleave is also recommended.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 */
874
875static void xip_disable(struct map_info *map, struct flchip *chip,
876 unsigned long adr)
877{
878 /* TODO: chips with no XIP use should ignore and return */
879 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 local_irq_disable();
881}
882
883static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
884 unsigned long adr)
885{
886 struct cfi_private *cfi = map->fldrv_priv;
887 if (chip->state != FL_POINT && chip->state != FL_READY) {
888 map_write(map, CMD(0xff), adr);
889 chip->state = FL_READY;
890 }
891 (void) map_read(map, adr);
Thomas Gleixner97f927a2005-07-07 16:50:16 +0200892 xip_iprefetch();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894}
895
896/*
897 * When a delay is required for the flash operation to complete, the
898 * xip_udelay() function is polling for both the given timeout and pending
899 * (but still masked) hardware interrupts. Whenever there is an interrupt
900 * pending then the flash erase or write operation is suspended, array mode
901 * restored and interrupts unmasked. Task scheduling might also happen at that
902 * point. The CPU eventually returns from the interrupt or the call to
903 * schedule() and the suspended flash operation is resumed for the remaining
904 * of the delay period.
905 *
906 * Warning: this function _will_ fool interrupt latency tracing tools.
907 */
908
909static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
910 unsigned long adr, int usec)
911{
912 struct cfi_private *cfi = map->fldrv_priv;
913 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
914 map_word status, OK = CMD(0x80);
915 unsigned long suspended, start = xip_currtime();
916 flstate_t oldstate, newstate;
917
918 do {
919 cpu_relax();
920 if (xip_irqpending() && cfip &&
921 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
922 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
923 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
924 /*
925 * Let's suspend the erase or write operation when
926 * supported. Note that we currently don't try to
927 * suspend interleaved chips if there is already
928 * another operation suspended (imagine what happens
929 * when one chip was already done with the current
930 * operation while another chip suspended it, then
931 * we resume the whole thing at once). Yes, it
932 * can happen!
933 */
934 map_write(map, CMD(0xb0), adr);
935 map_write(map, CMD(0x70), adr);
936 usec -= xip_elapsed_since(start);
937 suspended = xip_currtime();
938 do {
939 if (xip_elapsed_since(suspended) > 100000) {
940 /*
941 * The chip doesn't want to suspend
942 * after waiting for 100 msecs.
943 * This is a critical error but there
944 * is not much we can do here.
945 */
946 return;
947 }
948 status = map_read(map, adr);
949 } while (!map_word_andequal(map, status, OK, OK));
950
951 /* Suspend succeeded */
952 oldstate = chip->state;
953 if (oldstate == FL_ERASING) {
954 if (!map_word_bitsset(map, status, CMD(0x40)))
955 break;
956 newstate = FL_XIP_WHILE_ERASING;
957 chip->erase_suspended = 1;
958 } else {
959 if (!map_word_bitsset(map, status, CMD(0x04)))
960 break;
961 newstate = FL_XIP_WHILE_WRITING;
962 chip->write_suspended = 1;
963 }
964 chip->state = newstate;
965 map_write(map, CMD(0xff), adr);
966 (void) map_read(map, adr);
967 asm volatile (".rep 8; nop; .endr");
968 local_irq_enable();
Nicolas Pitre6da70122005-05-19 18:05:47 +0100969 spin_unlock(chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 asm volatile (".rep 8; nop; .endr");
971 cond_resched();
972
973 /*
974 * We're back. However someone else might have
975 * decided to go write to the chip if we are in
976 * a suspended erase state. If so let's wait
977 * until it's done.
978 */
Nicolas Pitre6da70122005-05-19 18:05:47 +0100979 spin_lock(chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 while (chip->state != newstate) {
981 DECLARE_WAITQUEUE(wait, current);
982 set_current_state(TASK_UNINTERRUPTIBLE);
983 add_wait_queue(&chip->wq, &wait);
Nicolas Pitre6da70122005-05-19 18:05:47 +0100984 spin_unlock(chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 schedule();
986 remove_wait_queue(&chip->wq, &wait);
Nicolas Pitre6da70122005-05-19 18:05:47 +0100987 spin_lock(chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 }
989 /* Disallow XIP again */
990 local_irq_disable();
991
992 /* Resume the write or erase operation */
993 map_write(map, CMD(0xd0), adr);
994 map_write(map, CMD(0x70), adr);
995 chip->state = oldstate;
996 start = xip_currtime();
997 } else if (usec >= 1000000/HZ) {
998 /*
999 * Try to save on CPU power when waiting delay
1000 * is at least a system timer tick period.
1001 * No need to be extremely accurate here.
1002 */
1003 xip_cpu_idle();
1004 }
1005 status = map_read(map, adr);
1006 } while (!map_word_andequal(map, status, OK, OK)
1007 && xip_elapsed_since(start) < usec);
1008}
1009
1010#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1011
1012/*
1013 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1014 * the flash is actively programming or erasing since we have to poll for
1015 * the operation to complete anyway. We can't do that in a generic way with
Nicolas Pitre6da70122005-05-19 18:05:47 +01001016 * a XIP setup so do it before the actual flash operation in this case
1017 * and stub it out from INVALIDATE_CACHE_UDELAY.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 */
Nicolas Pitre6da70122005-05-19 18:05:47 +01001019#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1020 INVALIDATE_CACHED_RANGE(map, from, size)
1021
1022#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1023 UDELAY(map, chip, adr, usec)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
1025/*
1026 * Extra notes:
1027 *
1028 * Activating this XIP support changes the way the code works a bit. For
1029 * example the code to suspend the current process when concurrent access
1030 * happens is never executed because xip_udelay() will always return with the
1031 * same chip state as it was entered with. This is why there is no care for
1032 * the presence of add_wait_queue() or schedule() calls from within a couple
1033 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1034 * The queueing and scheduling are always happening within xip_udelay().
1035 *
1036 * Similarly, get_chip() and put_chip() just happen to always be executed
1037 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1038 * is in array mode, therefore never executing many cases therein and not
1039 * causing any problem with XIP.
1040 */
1041
1042#else
1043
1044#define xip_disable(map, chip, adr)
1045#define xip_enable(map, chip, adr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046#define XIP_INVAL_CACHED_RANGE(x...)
1047
Nicolas Pitre6da70122005-05-19 18:05:47 +01001048#define UDELAY(map, chip, adr, usec) \
1049do { \
1050 spin_unlock(chip->mutex); \
1051 cfi_udelay(usec); \
1052 spin_lock(chip->mutex); \
1053} while (0)
1054
1055#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1056do { \
1057 spin_unlock(chip->mutex); \
1058 INVALIDATE_CACHED_RANGE(map, adr, len); \
1059 cfi_udelay(usec); \
1060 spin_lock(chip->mutex); \
1061} while (0)
1062
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063#endif
1064
1065static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1066{
1067 unsigned long cmd_addr;
1068 struct cfi_private *cfi = map->fldrv_priv;
1069 int ret = 0;
1070
1071 adr += chip->start;
1072
1073 /* Ensure cmd read/writes are aligned. */
1074 cmd_addr = adr & ~(map_bankwidth(map)-1);
1075
1076 spin_lock(chip->mutex);
1077
1078 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1079
1080 if (!ret) {
1081 if (chip->state != FL_POINT && chip->state != FL_READY)
1082 map_write(map, CMD(0xff), cmd_addr);
1083
1084 chip->state = FL_POINT;
1085 chip->ref_point_counter++;
1086 }
1087 spin_unlock(chip->mutex);
1088
1089 return ret;
1090}
1091
1092static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1093{
1094 struct map_info *map = mtd->priv;
1095 struct cfi_private *cfi = map->fldrv_priv;
1096 unsigned long ofs;
1097 int chipnum;
1098 int ret = 0;
1099
1100 if (!map->virt || (from + len > mtd->size))
1101 return -EINVAL;
1102
1103 *mtdbuf = (void *)map->virt + from;
1104 *retlen = 0;
1105
1106 /* Now lock the chip(s) to POINT state */
1107
1108 /* ofs: offset within the first chip that the first read should start */
1109 chipnum = (from >> cfi->chipshift);
1110 ofs = from - (chipnum << cfi->chipshift);
1111
1112 while (len) {
1113 unsigned long thislen;
1114
1115 if (chipnum >= cfi->numchips)
1116 break;
1117
1118 if ((len + ofs -1) >> cfi->chipshift)
1119 thislen = (1<<cfi->chipshift) - ofs;
1120 else
1121 thislen = len;
1122
1123 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1124 if (ret)
1125 break;
1126
1127 *retlen += thislen;
1128 len -= thislen;
1129
1130 ofs = 0;
1131 chipnum++;
1132 }
1133 return 0;
1134}
1135
1136static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1137{
1138 struct map_info *map = mtd->priv;
1139 struct cfi_private *cfi = map->fldrv_priv;
1140 unsigned long ofs;
1141 int chipnum;
1142
1143 /* Now unlock the chip(s) POINT state */
1144
1145 /* ofs: offset within the first chip that the first read should start */
1146 chipnum = (from >> cfi->chipshift);
1147 ofs = from - (chipnum << cfi->chipshift);
1148
1149 while (len) {
1150 unsigned long thislen;
1151 struct flchip *chip;
1152
1153 chip = &cfi->chips[chipnum];
1154 if (chipnum >= cfi->numchips)
1155 break;
1156
1157 if ((len + ofs -1) >> cfi->chipshift)
1158 thislen = (1<<cfi->chipshift) - ofs;
1159 else
1160 thislen = len;
1161
1162 spin_lock(chip->mutex);
1163 if (chip->state == FL_POINT) {
1164 chip->ref_point_counter--;
1165 if(chip->ref_point_counter == 0)
1166 chip->state = FL_READY;
1167 } else
Nicolas Pitre48436532005-08-06 05:16:52 +01001168 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
1170 put_chip(map, chip, chip->start);
1171 spin_unlock(chip->mutex);
1172
1173 len -= thislen;
1174 ofs = 0;
1175 chipnum++;
1176 }
1177}
1178
1179static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1180{
1181 unsigned long cmd_addr;
1182 struct cfi_private *cfi = map->fldrv_priv;
1183 int ret;
1184
1185 adr += chip->start;
1186
1187 /* Ensure cmd read/writes are aligned. */
1188 cmd_addr = adr & ~(map_bankwidth(map)-1);
1189
1190 spin_lock(chip->mutex);
1191 ret = get_chip(map, chip, cmd_addr, FL_READY);
1192 if (ret) {
1193 spin_unlock(chip->mutex);
1194 return ret;
1195 }
1196
1197 if (chip->state != FL_POINT && chip->state != FL_READY) {
1198 map_write(map, CMD(0xff), cmd_addr);
1199
1200 chip->state = FL_READY;
1201 }
1202
1203 map_copy_from(map, buf, adr, len);
1204
1205 put_chip(map, chip, cmd_addr);
1206
1207 spin_unlock(chip->mutex);
1208 return 0;
1209}
1210
1211static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1212{
1213 struct map_info *map = mtd->priv;
1214 struct cfi_private *cfi = map->fldrv_priv;
1215 unsigned long ofs;
1216 int chipnum;
1217 int ret = 0;
1218
1219 /* ofs: offset within the first chip that the first read should start */
1220 chipnum = (from >> cfi->chipshift);
1221 ofs = from - (chipnum << cfi->chipshift);
1222
1223 *retlen = 0;
1224
1225 while (len) {
1226 unsigned long thislen;
1227
1228 if (chipnum >= cfi->numchips)
1229 break;
1230
1231 if ((len + ofs -1) >> cfi->chipshift)
1232 thislen = (1<<cfi->chipshift) - ofs;
1233 else
1234 thislen = len;
1235
1236 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1237 if (ret)
1238 break;
1239
1240 *retlen += thislen;
1241 len -= thislen;
1242 buf += thislen;
1243
1244 ofs = 0;
1245 chipnum++;
1246 }
1247 return ret;
1248}
1249
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
Nicolas Pitref77814d2005-02-08 17:11:19 +00001251 unsigned long adr, map_word datum, int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252{
1253 struct cfi_private *cfi = map->fldrv_priv;
Nicolas Pitref77814d2005-02-08 17:11:19 +00001254 map_word status, status_OK, write_cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 unsigned long timeo;
1256 int z, ret=0;
1257
1258 adr += chip->start;
1259
Nicolas Pitre638d9832005-08-06 05:40:46 +01001260 /* Let's determine those according to the interleave only once */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 status_OK = CMD(0x80);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001262 switch (mode) {
Nicolas Pitre638d9832005-08-06 05:40:46 +01001263 case FL_WRITING:
1264 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1265 break;
1266 case FL_OTP_WRITE:
1267 write_cmd = CMD(0xc0);
1268 break;
1269 default:
1270 return -EINVAL;
Nicolas Pitref77814d2005-02-08 17:11:19 +00001271 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
1273 spin_lock(chip->mutex);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001274 ret = get_chip(map, chip, adr, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 if (ret) {
1276 spin_unlock(chip->mutex);
1277 return ret;
1278 }
1279
1280 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1281 ENABLE_VPP(map);
1282 xip_disable(map, chip, adr);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001283 map_write(map, write_cmd, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 map_write(map, datum, adr);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001285 chip->state = mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
Nicolas Pitre6da70122005-05-19 18:05:47 +01001287 INVALIDATE_CACHE_UDELAY(map, chip,
1288 adr, map_bankwidth(map),
1289 chip->word_write_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290
1291 timeo = jiffies + (HZ/2);
1292 z = 0;
1293 for (;;) {
Nicolas Pitref77814d2005-02-08 17:11:19 +00001294 if (chip->state != mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 /* Someone's suspended the write. Sleep */
1296 DECLARE_WAITQUEUE(wait, current);
1297
1298 set_current_state(TASK_UNINTERRUPTIBLE);
1299 add_wait_queue(&chip->wq, &wait);
1300 spin_unlock(chip->mutex);
1301 schedule();
1302 remove_wait_queue(&chip->wq, &wait);
1303 timeo = jiffies + (HZ / 2); /* FIXME */
1304 spin_lock(chip->mutex);
1305 continue;
1306 }
1307
1308 status = map_read(map, adr);
1309 if (map_word_andequal(map, status, status_OK, status_OK))
1310 break;
1311
1312 /* OK Still waiting */
1313 if (time_after(jiffies, timeo)) {
Nicolas Pitre48436532005-08-06 05:16:52 +01001314 map_write(map, CMD(0x70), adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 chip->state = FL_STATUS;
1316 xip_enable(map, chip, adr);
Nicolas Pitre48436532005-08-06 05:16:52 +01001317 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 ret = -EIO;
1319 goto out;
1320 }
1321
1322 /* Latency issues. Drop the lock, wait a while and retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 z++;
1324 UDELAY(map, chip, adr, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 }
1326 if (!z) {
1327 chip->word_write_time--;
1328 if (!chip->word_write_time)
Nicolas Pitre48436532005-08-06 05:16:52 +01001329 chip->word_write_time = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 }
1331 if (z > 1)
1332 chip->word_write_time++;
1333
1334 /* Done and happy. */
1335 chip->state = FL_STATUS;
1336
Nicolas Pitre48436532005-08-06 05:16:52 +01001337 /* check for errors */
1338 if (map_word_bitsset(map, status, CMD(0x1a))) {
1339 unsigned long chipstatus = MERGESTATUS(status);
1340
1341 /* reset status */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 map_write(map, CMD(0x50), adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 map_write(map, CMD(0x70), adr);
Nicolas Pitre48436532005-08-06 05:16:52 +01001344 xip_enable(map, chip, adr);
1345
1346 if (chipstatus & 0x02) {
1347 ret = -EROFS;
1348 } else if (chipstatus & 0x08) {
1349 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1350 ret = -EIO;
1351 } else {
1352 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1353 ret = -EINVAL;
1354 }
1355
1356 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 }
1358
1359 xip_enable(map, chip, adr);
1360 out: put_chip(map, chip, adr);
1361 spin_unlock(chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 return ret;
1363}
1364
1365
1366static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1367{
1368 struct map_info *map = mtd->priv;
1369 struct cfi_private *cfi = map->fldrv_priv;
1370 int ret = 0;
1371 int chipnum;
1372 unsigned long ofs;
1373
1374 *retlen = 0;
1375 if (!len)
1376 return 0;
1377
1378 chipnum = to >> cfi->chipshift;
1379 ofs = to - (chipnum << cfi->chipshift);
1380
1381 /* If it's not bus-aligned, do the first byte write */
1382 if (ofs & (map_bankwidth(map)-1)) {
1383 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1384 int gap = ofs - bus_ofs;
1385 int n;
1386 map_word datum;
1387
1388 n = min_t(int, len, map_bankwidth(map)-gap);
1389 datum = map_word_ff(map);
1390 datum = map_word_load_partial(map, datum, buf, gap, n);
1391
1392 ret = do_write_oneword(map, &cfi->chips[chipnum],
Nicolas Pitref77814d2005-02-08 17:11:19 +00001393 bus_ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 if (ret)
1395 return ret;
1396
1397 len -= n;
1398 ofs += n;
1399 buf += n;
1400 (*retlen) += n;
1401
1402 if (ofs >> cfi->chipshift) {
1403 chipnum ++;
1404 ofs = 0;
1405 if (chipnum == cfi->numchips)
1406 return 0;
1407 }
1408 }
1409
1410 while(len >= map_bankwidth(map)) {
1411 map_word datum = map_word_load(map, buf);
1412
1413 ret = do_write_oneword(map, &cfi->chips[chipnum],
Nicolas Pitref77814d2005-02-08 17:11:19 +00001414 ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 if (ret)
1416 return ret;
1417
1418 ofs += map_bankwidth(map);
1419 buf += map_bankwidth(map);
1420 (*retlen) += map_bankwidth(map);
1421 len -= map_bankwidth(map);
1422
1423 if (ofs >> cfi->chipshift) {
1424 chipnum ++;
1425 ofs = 0;
1426 if (chipnum == cfi->numchips)
1427 return 0;
1428 }
1429 }
1430
1431 if (len & (map_bankwidth(map)-1)) {
1432 map_word datum;
1433
1434 datum = map_word_ff(map);
1435 datum = map_word_load_partial(map, datum, buf, 0, len);
1436
1437 ret = do_write_oneword(map, &cfi->chips[chipnum],
Nicolas Pitref77814d2005-02-08 17:11:19 +00001438 ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 if (ret)
1440 return ret;
1441
1442 (*retlen) += len;
1443 }
1444
1445 return 0;
1446}
1447
1448
1449static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
Nicolas Pitree102d542005-08-06 05:46:59 +01001450 unsigned long adr, const struct kvec **pvec,
1451 unsigned long *pvec_seek, int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452{
1453 struct cfi_private *cfi = map->fldrv_priv;
Nicolas Pitree102d542005-08-06 05:46:59 +01001454 map_word status, status_OK, write_cmd, datum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 unsigned long cmd_adr, timeo;
Nicolas Pitree102d542005-08-06 05:46:59 +01001456 int wbufsize, z, ret=0, word_gap, words;
1457 const struct kvec *vec;
1458 unsigned long vec_seek;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
1460 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1461 adr += chip->start;
1462 cmd_adr = adr & ~(wbufsize-1);
Nicolas Pitre638d9832005-08-06 05:40:46 +01001463
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 /* Let's determine this according to the interleave only once */
1465 status_OK = CMD(0x80);
Nicolas Pitre638d9832005-08-06 05:40:46 +01001466 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
1468 spin_lock(chip->mutex);
1469 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1470 if (ret) {
1471 spin_unlock(chip->mutex);
1472 return ret;
1473 }
1474
1475 XIP_INVAL_CACHED_RANGE(map, adr, len);
1476 ENABLE_VPP(map);
1477 xip_disable(map, chip, cmd_adr);
1478
1479 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1480 [...], the device will not accept any more Write to Buffer commands".
1481 So we must check here and reset those bits if they're set. Otherwise
1482 we're just pissing in the wind */
1483 if (chip->state != FL_STATUS)
1484 map_write(map, CMD(0x70), cmd_adr);
1485 status = map_read(map, cmd_adr);
1486 if (map_word_bitsset(map, status, CMD(0x30))) {
1487 xip_enable(map, chip, cmd_adr);
1488 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1489 xip_disable(map, chip, cmd_adr);
1490 map_write(map, CMD(0x50), cmd_adr);
1491 map_write(map, CMD(0x70), cmd_adr);
1492 }
1493
1494 chip->state = FL_WRITING_TO_BUFFER;
1495
1496 z = 0;
1497 for (;;) {
Nicolas Pitre638d9832005-08-06 05:40:46 +01001498 map_write(map, write_cmd, cmd_adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499
1500 status = map_read(map, cmd_adr);
1501 if (map_word_andequal(map, status, status_OK, status_OK))
1502 break;
1503
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 UDELAY(map, chip, cmd_adr, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505
1506 if (++z > 20) {
1507 /* Argh. Not ready for write to buffer */
1508 map_word Xstatus;
1509 map_write(map, CMD(0x70), cmd_adr);
1510 chip->state = FL_STATUS;
1511 Xstatus = map_read(map, cmd_adr);
1512 /* Odd. Clear status bits */
1513 map_write(map, CMD(0x50), cmd_adr);
1514 map_write(map, CMD(0x70), cmd_adr);
1515 xip_enable(map, chip, cmd_adr);
Nicolas Pitre48436532005-08-06 05:16:52 +01001516 printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1517 map->name, status.x[0], Xstatus.x[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 ret = -EIO;
1519 goto out;
1520 }
1521 }
1522
Nicolas Pitree102d542005-08-06 05:46:59 +01001523 /* Figure out the number of words to write */
1524 word_gap = (-adr & (map_bankwidth(map)-1));
1525 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1526 if (!word_gap) {
1527 words--;
1528 } else {
1529 word_gap = map_bankwidth(map) - word_gap;
1530 adr -= word_gap;
1531 datum = map_word_ff(map);
1532 }
1533
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 /* Write length of data to come */
Nicolas Pitree102d542005-08-06 05:46:59 +01001535 map_write(map, CMD(words), cmd_adr );
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
1537 /* Write data */
Nicolas Pitree102d542005-08-06 05:46:59 +01001538 vec = *pvec;
1539 vec_seek = *pvec_seek;
1540 do {
1541 int n = map_bankwidth(map) - word_gap;
1542 if (n > vec->iov_len - vec_seek)
1543 n = vec->iov_len - vec_seek;
1544 if (n > len)
1545 n = len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546
Nicolas Pitree102d542005-08-06 05:46:59 +01001547 if (!word_gap && len < map_bankwidth(map))
1548 datum = map_word_ff(map);
1549
1550 datum = map_word_load_partial(map, datum,
1551 vec->iov_base + vec_seek,
1552 word_gap, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
Nicolas Pitree102d542005-08-06 05:46:59 +01001554 len -= n;
1555 word_gap += n;
1556 if (!len || word_gap == map_bankwidth(map)) {
1557 map_write(map, datum, adr);
1558 adr += map_bankwidth(map);
1559 word_gap = 0;
1560 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
Nicolas Pitree102d542005-08-06 05:46:59 +01001562 vec_seek += n;
1563 if (vec_seek == vec->iov_len) {
1564 vec++;
1565 vec_seek = 0;
1566 }
1567 } while (len);
1568 *pvec = vec;
1569 *pvec_seek = vec_seek;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
1571 /* GO GO GO */
1572 map_write(map, CMD(0xd0), cmd_adr);
1573 chip->state = FL_WRITING;
1574
Nicolas Pitre6da70122005-05-19 18:05:47 +01001575 INVALIDATE_CACHE_UDELAY(map, chip,
1576 cmd_adr, len,
1577 chip->buffer_write_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578
1579 timeo = jiffies + (HZ/2);
1580 z = 0;
1581 for (;;) {
1582 if (chip->state != FL_WRITING) {
1583 /* Someone's suspended the write. Sleep */
1584 DECLARE_WAITQUEUE(wait, current);
1585 set_current_state(TASK_UNINTERRUPTIBLE);
1586 add_wait_queue(&chip->wq, &wait);
1587 spin_unlock(chip->mutex);
1588 schedule();
1589 remove_wait_queue(&chip->wq, &wait);
1590 timeo = jiffies + (HZ / 2); /* FIXME */
1591 spin_lock(chip->mutex);
1592 continue;
1593 }
1594
1595 status = map_read(map, cmd_adr);
1596 if (map_word_andequal(map, status, status_OK, status_OK))
1597 break;
1598
1599 /* OK Still waiting */
1600 if (time_after(jiffies, timeo)) {
Nicolas Pitre48436532005-08-06 05:16:52 +01001601 map_write(map, CMD(0x70), cmd_adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 chip->state = FL_STATUS;
1603 xip_enable(map, chip, cmd_adr);
Nicolas Pitre48436532005-08-06 05:16:52 +01001604 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 ret = -EIO;
1606 goto out;
1607 }
1608
1609 /* Latency issues. Drop the lock, wait a while and retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 z++;
Nicolas Pitre6da70122005-05-19 18:05:47 +01001611 UDELAY(map, chip, cmd_adr, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 }
1613 if (!z) {
1614 chip->buffer_write_time--;
1615 if (!chip->buffer_write_time)
Nicolas Pitre48436532005-08-06 05:16:52 +01001616 chip->buffer_write_time = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 }
1618 if (z > 1)
1619 chip->buffer_write_time++;
1620
1621 /* Done and happy. */
1622 chip->state = FL_STATUS;
1623
Nicolas Pitre48436532005-08-06 05:16:52 +01001624 /* check for errors */
1625 if (map_word_bitsset(map, status, CMD(0x1a))) {
1626 unsigned long chipstatus = MERGESTATUS(status);
1627
1628 /* reset status */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 map_write(map, CMD(0x50), cmd_adr);
Nicolas Pitre48436532005-08-06 05:16:52 +01001630 map_write(map, CMD(0x70), cmd_adr);
1631 xip_enable(map, chip, cmd_adr);
1632
1633 if (chipstatus & 0x02) {
1634 ret = -EROFS;
1635 } else if (chipstatus & 0x08) {
1636 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1637 ret = -EIO;
1638 } else {
1639 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1640 ret = -EINVAL;
1641 }
1642
1643 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 }
1645
1646 xip_enable(map, chip, cmd_adr);
1647 out: put_chip(map, chip, cmd_adr);
1648 spin_unlock(chip->mutex);
1649 return ret;
1650}
1651
Nicolas Pitree102d542005-08-06 05:46:59 +01001652static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1653 unsigned long count, loff_t to, size_t *retlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654{
1655 struct map_info *map = mtd->priv;
1656 struct cfi_private *cfi = map->fldrv_priv;
1657 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1658 int ret = 0;
1659 int chipnum;
Nicolas Pitree102d542005-08-06 05:46:59 +01001660 unsigned long ofs, vec_seek, i;
1661 size_t len = 0;
1662
1663 for (i = 0; i < count; i++)
1664 len += vecs[i].iov_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665
1666 *retlen = 0;
1667 if (!len)
1668 return 0;
1669
1670 chipnum = to >> cfi->chipshift;
Nicolas Pitree102d542005-08-06 05:46:59 +01001671 ofs = to - (chipnum << cfi->chipshift);
1672 vec_seek = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
Nicolas Pitree102d542005-08-06 05:46:59 +01001674 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 /* We must not cross write block boundaries */
1676 int size = wbufsize - (ofs & (wbufsize-1));
1677
1678 if (size > len)
1679 size = len;
1680 ret = do_write_buffer(map, &cfi->chips[chipnum],
Nicolas Pitree102d542005-08-06 05:46:59 +01001681 ofs, &vecs, &vec_seek, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 if (ret)
1683 return ret;
1684
1685 ofs += size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 (*retlen) += size;
1687 len -= size;
1688
1689 if (ofs >> cfi->chipshift) {
1690 chipnum ++;
1691 ofs = 0;
1692 if (chipnum == cfi->numchips)
1693 return 0;
1694 }
Nicolas Pitree102d542005-08-06 05:46:59 +01001695 } while (len);
1696
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 return 0;
1698}
1699
Nicolas Pitree102d542005-08-06 05:46:59 +01001700static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1701 size_t len, size_t *retlen, const u_char *buf)
1702{
1703 struct kvec vec;
1704
1705 vec.iov_base = (void *) buf;
1706 vec.iov_len = len;
1707
1708 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1709}
1710
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1712 unsigned long adr, int len, void *thunk)
1713{
1714 struct cfi_private *cfi = map->fldrv_priv;
1715 map_word status, status_OK;
1716 unsigned long timeo;
1717 int retries = 3;
1718 DECLARE_WAITQUEUE(wait, current);
1719 int ret = 0;
1720
1721 adr += chip->start;
1722
1723 /* Let's determine this according to the interleave only once */
1724 status_OK = CMD(0x80);
1725
1726 retry:
1727 spin_lock(chip->mutex);
1728 ret = get_chip(map, chip, adr, FL_ERASING);
1729 if (ret) {
1730 spin_unlock(chip->mutex);
1731 return ret;
1732 }
1733
1734 XIP_INVAL_CACHED_RANGE(map, adr, len);
1735 ENABLE_VPP(map);
1736 xip_disable(map, chip, adr);
1737
1738 /* Clear the status register first */
1739 map_write(map, CMD(0x50), adr);
1740
1741 /* Now erase */
1742 map_write(map, CMD(0x20), adr);
1743 map_write(map, CMD(0xD0), adr);
1744 chip->state = FL_ERASING;
1745 chip->erase_suspended = 0;
1746
Nicolas Pitre6da70122005-05-19 18:05:47 +01001747 INVALIDATE_CACHE_UDELAY(map, chip,
1748 adr, len,
1749 chip->erase_time*1000/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
1751 /* FIXME. Use a timer to check this, and return immediately. */
1752 /* Once the state machine's known to be working I'll do that */
1753
1754 timeo = jiffies + (HZ*20);
1755 for (;;) {
1756 if (chip->state != FL_ERASING) {
1757 /* Someone's suspended the erase. Sleep */
1758 set_current_state(TASK_UNINTERRUPTIBLE);
1759 add_wait_queue(&chip->wq, &wait);
1760 spin_unlock(chip->mutex);
1761 schedule();
1762 remove_wait_queue(&chip->wq, &wait);
1763 spin_lock(chip->mutex);
1764 continue;
1765 }
1766 if (chip->erase_suspended) {
1767 /* This erase was suspended and resumed.
1768 Adjust the timeout */
1769 timeo = jiffies + (HZ*20); /* FIXME */
1770 chip->erase_suspended = 0;
1771 }
1772
1773 status = map_read(map, adr);
1774 if (map_word_andequal(map, status, status_OK, status_OK))
1775 break;
1776
1777 /* OK Still waiting */
1778 if (time_after(jiffies, timeo)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 map_write(map, CMD(0x70), adr);
1780 chip->state = FL_STATUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 xip_enable(map, chip, adr);
Nicolas Pitre48436532005-08-06 05:16:52 +01001782 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 ret = -EIO;
1784 goto out;
1785 }
1786
1787 /* Latency issues. Drop the lock, wait a while and retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 UDELAY(map, chip, adr, 1000000/HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 }
1790
1791 /* We've broken this before. It doesn't hurt to be safe */
1792 map_write(map, CMD(0x70), adr);
1793 chip->state = FL_STATUS;
1794 status = map_read(map, adr);
1795
Nicolas Pitre48436532005-08-06 05:16:52 +01001796 /* check for errors */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 if (map_word_bitsset(map, status, CMD(0x3a))) {
Nicolas Pitre48436532005-08-06 05:16:52 +01001798 unsigned long chipstatus = MERGESTATUS(status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799
1800 /* Reset the error bits */
1801 map_write(map, CMD(0x50), adr);
1802 map_write(map, CMD(0x70), adr);
1803 xip_enable(map, chip, adr);
1804
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 if ((chipstatus & 0x30) == 0x30) {
Nicolas Pitre48436532005-08-06 05:16:52 +01001806 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1807 ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 } else if (chipstatus & 0x02) {
1809 /* Protection bit set */
1810 ret = -EROFS;
1811 } else if (chipstatus & 0x8) {
1812 /* Voltage */
Nicolas Pitre48436532005-08-06 05:16:52 +01001813 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 ret = -EIO;
Nicolas Pitre48436532005-08-06 05:16:52 +01001815 } else if (chipstatus & 0x20 && retries--) {
1816 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1817 timeo = jiffies + HZ;
1818 put_chip(map, chip, adr);
1819 spin_unlock(chip->mutex);
1820 goto retry;
1821 } else {
1822 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 ret = -EIO;
1824 }
Nicolas Pitre48436532005-08-06 05:16:52 +01001825
1826 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 }
1828
Nicolas Pitre48436532005-08-06 05:16:52 +01001829 xip_enable(map, chip, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 out: put_chip(map, chip, adr);
1831 spin_unlock(chip->mutex);
1832 return ret;
1833}
1834
1835int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1836{
1837 unsigned long ofs, len;
1838 int ret;
1839
1840 ofs = instr->addr;
1841 len = instr->len;
1842
1843 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1844 if (ret)
1845 return ret;
1846
1847 instr->state = MTD_ERASE_DONE;
1848 mtd_erase_callback(instr);
1849
1850 return 0;
1851}
1852
1853static void cfi_intelext_sync (struct mtd_info *mtd)
1854{
1855 struct map_info *map = mtd->priv;
1856 struct cfi_private *cfi = map->fldrv_priv;
1857 int i;
1858 struct flchip *chip;
1859 int ret = 0;
1860
1861 for (i=0; !ret && i<cfi->numchips; i++) {
1862 chip = &cfi->chips[i];
1863
1864 spin_lock(chip->mutex);
1865 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1866
1867 if (!ret) {
1868 chip->oldstate = chip->state;
1869 chip->state = FL_SYNCING;
1870 /* No need to wake_up() on this state change -
1871 * as the whole point is that nobody can do anything
1872 * with the chip now anyway.
1873 */
1874 }
1875 spin_unlock(chip->mutex);
1876 }
1877
1878 /* Unlock the chips again */
1879
1880 for (i--; i >=0; i--) {
1881 chip = &cfi->chips[i];
1882
1883 spin_lock(chip->mutex);
1884
1885 if (chip->state == FL_SYNCING) {
1886 chip->state = chip->oldstate;
Nicolas Pitre09c79332005-03-16 22:41:09 +00001887 chip->oldstate = FL_READY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 wake_up(&chip->wq);
1889 }
1890 spin_unlock(chip->mutex);
1891 }
1892}
1893
1894#ifdef DEBUG_LOCK_BITS
1895static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1896 struct flchip *chip,
1897 unsigned long adr,
1898 int len, void *thunk)
1899{
1900 struct cfi_private *cfi = map->fldrv_priv;
1901 int status, ofs_factor = cfi->interleave * cfi->device_type;
1902
Todd Poynorc25bb1f2005-04-27 21:01:52 +01001903 adr += chip->start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 xip_disable(map, chip, adr+(2*ofs_factor));
Todd Poynorc25bb1f2005-04-27 21:01:52 +01001905 map_write(map, CMD(0x90), adr+(2*ofs_factor));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 chip->state = FL_JEDEC_QUERY;
1907 status = cfi_read_query(map, adr+(2*ofs_factor));
1908 xip_enable(map, chip, 0);
1909 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1910 adr, status);
1911 return 0;
1912}
1913#endif
1914
1915#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1916#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1917
1918static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1919 unsigned long adr, int len, void *thunk)
1920{
1921 struct cfi_private *cfi = map->fldrv_priv;
Todd Poynor9a6e73e2005-03-29 23:06:40 +01001922 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 map_word status, status_OK;
1924 unsigned long timeo = jiffies + HZ;
1925 int ret;
1926
1927 adr += chip->start;
1928
1929 /* Let's determine this according to the interleave only once */
1930 status_OK = CMD(0x80);
1931
1932 spin_lock(chip->mutex);
1933 ret = get_chip(map, chip, adr, FL_LOCKING);
1934 if (ret) {
1935 spin_unlock(chip->mutex);
1936 return ret;
1937 }
1938
1939 ENABLE_VPP(map);
1940 xip_disable(map, chip, adr);
1941
1942 map_write(map, CMD(0x60), adr);
1943 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1944 map_write(map, CMD(0x01), adr);
1945 chip->state = FL_LOCKING;
1946 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1947 map_write(map, CMD(0xD0), adr);
1948 chip->state = FL_UNLOCKING;
1949 } else
1950 BUG();
1951
Todd Poynor9a6e73e2005-03-29 23:06:40 +01001952 /*
1953 * If Instant Individual Block Locking supported then no need
1954 * to delay.
1955 */
1956
Nicolas Pitre6da70122005-05-19 18:05:47 +01001957 if (!extp || !(extp->FeatureSupport & (1 << 5)))
Todd Poynor9a6e73e2005-03-29 23:06:40 +01001958 UDELAY(map, chip, adr, 1000000/HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
1960 /* FIXME. Use a timer to check this, and return immediately. */
1961 /* Once the state machine's known to be working I'll do that */
1962
1963 timeo = jiffies + (HZ*20);
1964 for (;;) {
1965
1966 status = map_read(map, adr);
1967 if (map_word_andequal(map, status, status_OK, status_OK))
1968 break;
1969
1970 /* OK Still waiting */
1971 if (time_after(jiffies, timeo)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 map_write(map, CMD(0x70), adr);
1973 chip->state = FL_STATUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 xip_enable(map, chip, adr);
Nicolas Pitre48436532005-08-06 05:16:52 +01001975 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 put_chip(map, chip, adr);
1977 spin_unlock(chip->mutex);
1978 return -EIO;
1979 }
1980
1981 /* Latency issues. Drop the lock, wait a while and retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 UDELAY(map, chip, adr, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 }
1984
1985 /* Done and happy. */
1986 chip->state = FL_STATUS;
1987 xip_enable(map, chip, adr);
1988 put_chip(map, chip, adr);
1989 spin_unlock(chip->mutex);
1990 return 0;
1991}
1992
1993static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1994{
1995 int ret;
1996
1997#ifdef DEBUG_LOCK_BITS
1998 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1999 __FUNCTION__, ofs, len);
2000 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2001 ofs, len, 0);
2002#endif
2003
2004 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2005 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2006
2007#ifdef DEBUG_LOCK_BITS
2008 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2009 __FUNCTION__, ret);
2010 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2011 ofs, len, 0);
2012#endif
2013
2014 return ret;
2015}
2016
2017static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2018{
2019 int ret;
2020
2021#ifdef DEBUG_LOCK_BITS
2022 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2023 __FUNCTION__, ofs, len);
2024 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2025 ofs, len, 0);
2026#endif
2027
2028 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2029 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2030
2031#ifdef DEBUG_LOCK_BITS
2032 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2033 __FUNCTION__, ret);
2034 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2035 ofs, len, 0);
2036#endif
2037
2038 return ret;
2039}
2040
Nicolas Pitref77814d2005-02-08 17:11:19 +00002041#ifdef CONFIG_MTD_OTP
2042
2043typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2044 u_long data_offset, u_char *buf, u_int size,
2045 u_long prot_offset, u_int groupno, u_int groupsize);
2046
2047static int __xipram
2048do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2049 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2050{
2051 struct cfi_private *cfi = map->fldrv_priv;
2052 int ret;
2053
2054 spin_lock(chip->mutex);
2055 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2056 if (ret) {
2057 spin_unlock(chip->mutex);
2058 return ret;
2059 }
2060
2061 /* let's ensure we're not reading back cached data from array mode */
Nicolas Pitre6da70122005-05-19 18:05:47 +01002062 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
Nicolas Pitref77814d2005-02-08 17:11:19 +00002063
2064 xip_disable(map, chip, chip->start);
2065 if (chip->state != FL_JEDEC_QUERY) {
2066 map_write(map, CMD(0x90), chip->start);
2067 chip->state = FL_JEDEC_QUERY;
2068 }
2069 map_copy_from(map, buf, chip->start + offset, size);
2070 xip_enable(map, chip, chip->start);
2071
2072 /* then ensure we don't keep OTP data in the cache */
Nicolas Pitre6da70122005-05-19 18:05:47 +01002073 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
Nicolas Pitref77814d2005-02-08 17:11:19 +00002074
2075 put_chip(map, chip, chip->start);
2076 spin_unlock(chip->mutex);
2077 return 0;
2078}
2079
2080static int
2081do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2082 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2083{
2084 int ret;
2085
2086 while (size) {
2087 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2088 int gap = offset - bus_ofs;
2089 int n = min_t(int, size, map_bankwidth(map)-gap);
2090 map_word datum = map_word_ff(map);
2091
2092 datum = map_word_load_partial(map, datum, buf, gap, n);
2093 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2094 if (ret)
2095 return ret;
2096
2097 offset += n;
2098 buf += n;
2099 size -= n;
2100 }
2101
2102 return 0;
2103}
2104
2105static int
2106do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2107 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2108{
2109 struct cfi_private *cfi = map->fldrv_priv;
2110 map_word datum;
2111
2112 /* make sure area matches group boundaries */
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002113 if (size != grpsz)
Nicolas Pitref77814d2005-02-08 17:11:19 +00002114 return -EXDEV;
2115
2116 datum = map_word_ff(map);
2117 datum = map_word_clr(map, datum, CMD(1 << grpno));
2118 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2119}
2120
2121static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2122 size_t *retlen, u_char *buf,
2123 otp_op_t action, int user_regs)
2124{
2125 struct map_info *map = mtd->priv;
2126 struct cfi_private *cfi = map->fldrv_priv;
2127 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2128 struct flchip *chip;
2129 struct cfi_intelext_otpinfo *otp;
2130 u_long devsize, reg_prot_offset, data_offset;
2131 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2132 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2133 int ret;
2134
2135 *retlen = 0;
2136
2137 /* Check that we actually have some OTP registers */
2138 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2139 return -ENODATA;
2140
2141 /* we need real chips here not virtual ones */
2142 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2143 chip_step = devsize >> cfi->chipshift;
Nicolas Pitredce2b4d2005-04-01 17:36:29 +01002144 chip_num = 0;
Nicolas Pitref77814d2005-02-08 17:11:19 +00002145
Nicolas Pitredce2b4d2005-04-01 17:36:29 +01002146 /* Some chips have OTP located in the _top_ partition only.
2147 For example: Intel 28F256L18T (T means top-parameter device) */
2148 if (cfi->mfr == MANUFACTURER_INTEL) {
2149 switch (cfi->id) {
2150 case 0x880b:
2151 case 0x880c:
2152 case 0x880d:
2153 chip_num = chip_step - 1;
2154 }
2155 }
2156
2157 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
Nicolas Pitref77814d2005-02-08 17:11:19 +00002158 chip = &cfi->chips[chip_num];
2159 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2160
2161 /* first OTP region */
2162 field = 0;
2163 reg_prot_offset = extp->ProtRegAddr;
2164 reg_fact_groups = 1;
2165 reg_fact_size = 1 << extp->FactProtRegSize;
2166 reg_user_groups = 1;
2167 reg_user_size = 1 << extp->UserProtRegSize;
2168
2169 while (len > 0) {
2170 /* flash geometry fixup */
2171 data_offset = reg_prot_offset + 1;
2172 data_offset *= cfi->interleave * cfi->device_type;
2173 reg_prot_offset *= cfi->interleave * cfi->device_type;
2174 reg_fact_size *= cfi->interleave;
2175 reg_user_size *= cfi->interleave;
2176
2177 if (user_regs) {
2178 groups = reg_user_groups;
2179 groupsize = reg_user_size;
2180 /* skip over factory reg area */
2181 groupno = reg_fact_groups;
2182 data_offset += reg_fact_groups * reg_fact_size;
2183 } else {
2184 groups = reg_fact_groups;
2185 groupsize = reg_fact_size;
2186 groupno = 0;
2187 }
2188
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002189 while (len > 0 && groups > 0) {
Nicolas Pitref77814d2005-02-08 17:11:19 +00002190 if (!action) {
2191 /*
2192 * Special case: if action is NULL
2193 * we fill buf with otp_info records.
2194 */
2195 struct otp_info *otpinfo;
2196 map_word lockword;
2197 len -= sizeof(struct otp_info);
2198 if (len <= 0)
2199 return -ENOSPC;
2200 ret = do_otp_read(map, chip,
2201 reg_prot_offset,
2202 (u_char *)&lockword,
2203 map_bankwidth(map),
2204 0, 0, 0);
2205 if (ret)
2206 return ret;
2207 otpinfo = (struct otp_info *)buf;
2208 otpinfo->start = from;
2209 otpinfo->length = groupsize;
2210 otpinfo->locked =
2211 !map_word_bitsset(map, lockword,
2212 CMD(1 << groupno));
2213 from += groupsize;
2214 buf += sizeof(*otpinfo);
2215 *retlen += sizeof(*otpinfo);
2216 } else if (from >= groupsize) {
2217 from -= groupsize;
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002218 data_offset += groupsize;
Nicolas Pitref77814d2005-02-08 17:11:19 +00002219 } else {
2220 int size = groupsize;
2221 data_offset += from;
2222 size -= from;
2223 from = 0;
2224 if (size > len)
2225 size = len;
2226 ret = action(map, chip, data_offset,
2227 buf, size, reg_prot_offset,
2228 groupno, groupsize);
2229 if (ret < 0)
2230 return ret;
2231 buf += size;
2232 len -= size;
2233 *retlen += size;
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002234 data_offset += size;
Nicolas Pitref77814d2005-02-08 17:11:19 +00002235 }
2236 groupno++;
2237 groups--;
2238 }
2239
2240 /* next OTP region */
2241 if (++field == extp->NumProtectionFields)
2242 break;
2243 reg_prot_offset = otp->ProtRegAddr;
2244 reg_fact_groups = otp->FactGroups;
2245 reg_fact_size = 1 << otp->FactProtRegSize;
2246 reg_user_groups = otp->UserGroups;
2247 reg_user_size = 1 << otp->UserProtRegSize;
2248 otp++;
2249 }
2250 }
2251
2252 return 0;
2253}
2254
2255static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2256 size_t len, size_t *retlen,
2257 u_char *buf)
2258{
2259 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2260 buf, do_otp_read, 0);
2261}
2262
2263static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2264 size_t len, size_t *retlen,
2265 u_char *buf)
2266{
2267 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2268 buf, do_otp_read, 1);
2269}
2270
2271static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2272 size_t len, size_t *retlen,
2273 u_char *buf)
2274{
2275 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2276 buf, do_otp_write, 1);
2277}
2278
2279static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2280 loff_t from, size_t len)
2281{
2282 size_t retlen;
2283 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2284 NULL, do_otp_lock, 1);
2285}
2286
2287static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2288 struct otp_info *buf, size_t len)
2289{
2290 size_t retlen;
2291 int ret;
2292
2293 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2294 return ret ? : retlen;
2295}
2296
2297static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2298 struct otp_info *buf, size_t len)
2299{
2300 size_t retlen;
2301 int ret;
2302
2303 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2304 return ret ? : retlen;
2305}
2306
2307#endif
2308
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309static int cfi_intelext_suspend(struct mtd_info *mtd)
2310{
2311 struct map_info *map = mtd->priv;
2312 struct cfi_private *cfi = map->fldrv_priv;
2313 int i;
2314 struct flchip *chip;
2315 int ret = 0;
2316
2317 for (i=0; !ret && i<cfi->numchips; i++) {
2318 chip = &cfi->chips[i];
2319
2320 spin_lock(chip->mutex);
2321
2322 switch (chip->state) {
2323 case FL_READY:
2324 case FL_STATUS:
2325 case FL_CFI_QUERY:
2326 case FL_JEDEC_QUERY:
2327 if (chip->oldstate == FL_READY) {
2328 chip->oldstate = chip->state;
2329 chip->state = FL_PM_SUSPENDED;
2330 /* No need to wake_up() on this state change -
2331 * as the whole point is that nobody can do anything
2332 * with the chip now anyway.
2333 */
2334 } else {
2335 /* There seems to be an operation pending. We must wait for it. */
2336 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2337 ret = -EAGAIN;
2338 }
2339 break;
2340 default:
2341 /* Should we actually wait? Once upon a time these routines weren't
2342 allowed to. Or should we return -EAGAIN, because the upper layers
2343 ought to have already shut down anything which was using the device
2344 anyway? The latter for now. */
2345 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2346 ret = -EAGAIN;
2347 case FL_PM_SUSPENDED:
2348 break;
2349 }
2350 spin_unlock(chip->mutex);
2351 }
2352
2353 /* Unlock the chips again */
2354
2355 if (ret) {
2356 for (i--; i >=0; i--) {
2357 chip = &cfi->chips[i];
2358
2359 spin_lock(chip->mutex);
2360
2361 if (chip->state == FL_PM_SUSPENDED) {
2362 /* No need to force it into a known state here,
2363 because we're returning failure, and it didn't
2364 get power cycled */
2365 chip->state = chip->oldstate;
2366 chip->oldstate = FL_READY;
2367 wake_up(&chip->wq);
2368 }
2369 spin_unlock(chip->mutex);
2370 }
2371 }
2372
2373 return ret;
2374}
2375
2376static void cfi_intelext_resume(struct mtd_info *mtd)
2377{
2378 struct map_info *map = mtd->priv;
2379 struct cfi_private *cfi = map->fldrv_priv;
2380 int i;
2381 struct flchip *chip;
2382
2383 for (i=0; i<cfi->numchips; i++) {
2384
2385 chip = &cfi->chips[i];
2386
2387 spin_lock(chip->mutex);
2388
2389 /* Go to known state. Chip may have been power cycled */
2390 if (chip->state == FL_PM_SUSPENDED) {
2391 map_write(map, CMD(0xFF), cfi->chips[i].start);
2392 chip->oldstate = chip->state = FL_READY;
2393 wake_up(&chip->wq);
2394 }
2395
2396 spin_unlock(chip->mutex);
2397 }
2398}
2399
Nicolas Pitre963a6fb2005-04-01 02:59:56 +01002400static int cfi_intelext_reset(struct mtd_info *mtd)
2401{
2402 struct map_info *map = mtd->priv;
2403 struct cfi_private *cfi = map->fldrv_priv;
2404 int i, ret;
2405
2406 for (i=0; i < cfi->numchips; i++) {
2407 struct flchip *chip = &cfi->chips[i];
2408
2409 /* force the completion of any ongoing operation
2410 and switch to array mode so any bootloader in
2411 flash is accessible for soft reboot. */
2412 spin_lock(chip->mutex);
2413 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2414 if (!ret) {
2415 map_write(map, CMD(0xff), chip->start);
2416 chip->state = FL_READY;
2417 }
2418 spin_unlock(chip->mutex);
2419 }
2420
2421 return 0;
2422}
2423
2424static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2425 void *v)
2426{
2427 struct mtd_info *mtd;
2428
2429 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2430 cfi_intelext_reset(mtd);
2431 return NOTIFY_DONE;
2432}
2433
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434static void cfi_intelext_destroy(struct mtd_info *mtd)
2435{
2436 struct map_info *map = mtd->priv;
2437 struct cfi_private *cfi = map->fldrv_priv;
Nicolas Pitre963a6fb2005-04-01 02:59:56 +01002438 cfi_intelext_reset(mtd);
2439 unregister_reboot_notifier(&mtd->reboot_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 kfree(cfi->cmdset_priv);
2441 kfree(cfi->cfiq);
2442 kfree(cfi->chips[0].priv);
2443 kfree(cfi);
2444 kfree(mtd->eraseregions);
2445}
2446
Nicolas Pitre638d9832005-08-06 05:40:46 +01002447static char im_name_0001[] = "cfi_cmdset_0001";
2448static char im_name_0003[] = "cfi_cmdset_0003";
2449static char im_name_0200[] = "cfi_cmdset_0200";
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450
2451static int __init cfi_intelext_init(void)
2452{
Nicolas Pitre638d9832005-08-06 05:40:46 +01002453 inter_module_register(im_name_0001, THIS_MODULE, &cfi_cmdset_0001);
2454 inter_module_register(im_name_0003, THIS_MODULE, &cfi_cmdset_0001);
2455 inter_module_register(im_name_0200, THIS_MODULE, &cfi_cmdset_0001);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 return 0;
2457}
2458
2459static void __exit cfi_intelext_exit(void)
2460{
Nicolas Pitre638d9832005-08-06 05:40:46 +01002461 inter_module_unregister(im_name_0001);
2462 inter_module_unregister(im_name_0003);
2463 inter_module_unregister(im_name_0200);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464}
2465
2466module_init(cfi_intelext_init);
2467module_exit(cfi_intelext_exit);
2468
2469MODULE_LICENSE("GPL");
2470MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2471MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");