blob: c630d7532f7a8d478fe9b2b3b784558eeebcf723 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
Nicolas Pitre72b56a22005-02-05 02:06:19 +00007 * $Id: cfi_cmdset_0001.c,v 1.165 2005/02/05 02:06:15 nico Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
32#include <linux/mtd/xip.h>
33#include <linux/mtd/map.h>
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/compatmac.h>
36#include <linux/mtd/cfi.h>
37
38/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41// debugging, turns off buffer write mode if set to 1
42#define FORCE_WORD_WRITE 0
43
44#define MANUFACTURER_INTEL 0x0089
45#define I82802AB 0x00ad
46#define I82802AC 0x00ac
47#define MANUFACTURER_ST 0x0020
48#define M50LPW080 0x002F
49
50static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
51//static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52//static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56static void cfi_intelext_sync (struct mtd_info *);
57static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59static int cfi_intelext_suspend (struct mtd_info *);
60static void cfi_intelext_resume (struct mtd_info *);
61
62static void cfi_intelext_destroy(struct mtd_info *);
63
64struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
65
66static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
67static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
68
69static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
70 size_t *retlen, u_char **mtdbuf);
71static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
72 size_t len);
73
74static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
75static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
76#include "fwh_lock.h"
77
78
79
80/*
81 * *********** SETUP AND PROBE BITS ***********
82 */
83
84static struct mtd_chip_driver cfi_intelext_chipdrv = {
85 .probe = NULL, /* Not usable directly */
86 .destroy = cfi_intelext_destroy,
87 .name = "cfi_cmdset_0001",
88 .module = THIS_MODULE
89};
90
91/* #define DEBUG_LOCK_BITS */
92/* #define DEBUG_CFI_FEATURES */
93
94#ifdef DEBUG_CFI_FEATURES
95static void cfi_tell_features(struct cfi_pri_intelext *extp)
96{
97 int i;
98 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
99 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
100 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
101 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
102 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
103 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
104 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
105 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
106 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
107 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
108 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
109 for (i=10; i<32; i++) {
110 if (extp->FeatureSupport & (1<<i))
111 printk(" - Unknown Bit %X: supported\n", i);
112 }
113
114 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
115 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
116 for (i=1; i<8; i++) {
117 if (extp->SuspendCmdSupport & (1<<i))
118 printk(" - Unknown Bit %X: supported\n", i);
119 }
120
121 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
122 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
123 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
124 for (i=2; i<16; i++) {
125 if (extp->BlkStatusRegMask & (1<<i))
126 printk(" - Unknown Bit %X Active: yes\n",i);
127 }
128
129 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
130 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
131 if (extp->VppOptimal)
132 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
133 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
134}
135#endif
136
137#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
138/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
139static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
140{
141 struct map_info *map = mtd->priv;
142 struct cfi_private *cfi = map->fldrv_priv;
143 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
144
145 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
146 "erase on write disabled.\n");
147 extp->SuspendCmdSupport &= ~1;
148}
149#endif
150
151#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
152static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
153{
154 struct map_info *map = mtd->priv;
155 struct cfi_private *cfi = map->fldrv_priv;
156 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
157
158 if (cfip && (cfip->FeatureSupport&4)) {
159 cfip->FeatureSupport &= ~4;
160 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
161 }
162}
163#endif
164
165static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
166{
167 struct map_info *map = mtd->priv;
168 struct cfi_private *cfi = map->fldrv_priv;
169
170 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
171 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
172}
173
174static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
175{
176 struct map_info *map = mtd->priv;
177 struct cfi_private *cfi = map->fldrv_priv;
178
179 /* Note this is done after the region info is endian swapped */
180 cfi->cfiq->EraseRegionInfo[1] =
181 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
182};
183
184static void fixup_use_point(struct mtd_info *mtd, void *param)
185{
186 struct map_info *map = mtd->priv;
187 if (!mtd->point && map_is_linear(map)) {
188 mtd->point = cfi_intelext_point;
189 mtd->unpoint = cfi_intelext_unpoint;
190 }
191}
192
193static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
194{
195 struct map_info *map = mtd->priv;
196 struct cfi_private *cfi = map->fldrv_priv;
197 if (cfi->cfiq->BufWriteTimeoutTyp) {
198 printk(KERN_INFO "Using buffer write method\n" );
199 mtd->write = cfi_intelext_write_buffers;
200 }
201}
202
203static struct cfi_fixup cfi_fixup_table[] = {
204#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
205 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
206#endif
207#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
208 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
209#endif
210#if !FORCE_WORD_WRITE
211 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
212#endif
213 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
214 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
215 { 0, 0, NULL, NULL }
216};
217
218static struct cfi_fixup jedec_fixup_table[] = {
219 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
220 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
221 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
222 { 0, 0, NULL, NULL }
223};
224static struct cfi_fixup fixup_table[] = {
225 /* The CFI vendor ids and the JEDEC vendor IDs appear
226 * to be common. It is like the devices id's are as
227 * well. This table is to pick all cases where
228 * we know that is the case.
229 */
230 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
231 { 0, 0, NULL, NULL }
232};
233
234static inline struct cfi_pri_intelext *
235read_pri_intelext(struct map_info *map, __u16 adr)
236{
237 struct cfi_pri_intelext *extp;
238 unsigned int extp_size = sizeof(*extp);
239
240 again:
241 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
242 if (!extp)
243 return NULL;
244
245 /* Do some byteswapping if necessary */
246 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
247 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
248 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
249
250 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
251 unsigned int extra_size = 0;
252 int nb_parts, i;
253
254 /* Protection Register info */
Nicolas Pitre72b56a22005-02-05 02:06:19 +0000255 extra_size += (extp->NumProtectionFields - 1) *
256 sizeof(struct cfi_intelext_otpinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
258 /* Burst Read info */
259 extra_size += 6;
260
261 /* Number of hardware-partitions */
262 extra_size += 1;
263 if (extp_size < sizeof(*extp) + extra_size)
264 goto need_more;
265 nb_parts = extp->extra[extra_size - 1];
266
267 for (i = 0; i < nb_parts; i++) {
268 struct cfi_intelext_regioninfo *rinfo;
269 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
270 extra_size += sizeof(*rinfo);
271 if (extp_size < sizeof(*extp) + extra_size)
272 goto need_more;
273 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
274 extra_size += (rinfo->NumBlockTypes - 1)
275 * sizeof(struct cfi_intelext_blockinfo);
276 }
277
278 if (extp_size < sizeof(*extp) + extra_size) {
279 need_more:
280 extp_size = sizeof(*extp) + extra_size;
281 kfree(extp);
282 if (extp_size > 4096) {
283 printk(KERN_ERR
284 "%s: cfi_pri_intelext is too fat\n",
285 __FUNCTION__);
286 return NULL;
287 }
288 goto again;
289 }
290 }
291
292 return extp;
293}
294
295/* This routine is made available to other mtd code via
296 * inter_module_register. It must only be accessed through
297 * inter_module_get which will bump the use count of this module. The
298 * addresses passed back in cfi are valid as long as the use count of
299 * this module is non-zero, i.e. between inter_module_get and
300 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
301 */
302struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
303{
304 struct cfi_private *cfi = map->fldrv_priv;
305 struct mtd_info *mtd;
306 int i;
307
308 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
309 if (!mtd) {
310 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
311 return NULL;
312 }
313 memset(mtd, 0, sizeof(*mtd));
314 mtd->priv = map;
315 mtd->type = MTD_NORFLASH;
316
317 /* Fill in the default mtd operations */
318 mtd->erase = cfi_intelext_erase_varsize;
319 mtd->read = cfi_intelext_read;
320 mtd->write = cfi_intelext_write_words;
321 mtd->sync = cfi_intelext_sync;
322 mtd->lock = cfi_intelext_lock;
323 mtd->unlock = cfi_intelext_unlock;
324 mtd->suspend = cfi_intelext_suspend;
325 mtd->resume = cfi_intelext_resume;
326 mtd->flags = MTD_CAP_NORFLASH;
327 mtd->name = map->name;
328
329 if (cfi->cfi_mode == CFI_MODE_CFI) {
330 /*
331 * It's a real CFI chip, not one for which the probe
332 * routine faked a CFI structure. So we read the feature
333 * table from it.
334 */
335 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
336 struct cfi_pri_intelext *extp;
337
338 extp = read_pri_intelext(map, adr);
339 if (!extp) {
340 kfree(mtd);
341 return NULL;
342 }
343
344 /* Install our own private info structure */
345 cfi->cmdset_priv = extp;
346
347 cfi_fixup(mtd, cfi_fixup_table);
348
349#ifdef DEBUG_CFI_FEATURES
350 /* Tell the user about it in lots of lovely detail */
351 cfi_tell_features(extp);
352#endif
353
354 if(extp->SuspendCmdSupport & 1) {
355 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
356 }
357 }
358 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
359 /* Apply jedec specific fixups */
360 cfi_fixup(mtd, jedec_fixup_table);
361 }
362 /* Apply generic fixups */
363 cfi_fixup(mtd, fixup_table);
364
365 for (i=0; i< cfi->numchips; i++) {
366 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
367 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
368 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
369 cfi->chips[i].ref_point_counter = 0;
370 }
371
372 map->fldrv = &cfi_intelext_chipdrv;
373
374 return cfi_intelext_setup(mtd);
375}
376
377static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
378{
379 struct map_info *map = mtd->priv;
380 struct cfi_private *cfi = map->fldrv_priv;
381 unsigned long offset = 0;
382 int i,j;
383 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
384
385 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
386
387 mtd->size = devsize * cfi->numchips;
388
389 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
390 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
391 * mtd->numeraseregions, GFP_KERNEL);
392 if (!mtd->eraseregions) {
393 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
394 goto setup_err;
395 }
396
397 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
398 unsigned long ernum, ersize;
399 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
400 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
401
402 if (mtd->erasesize < ersize) {
403 mtd->erasesize = ersize;
404 }
405 for (j=0; j<cfi->numchips; j++) {
406 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
407 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
408 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
409 }
410 offset += (ersize * ernum);
411 }
412
413 if (offset != devsize) {
414 /* Argh */
415 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
416 goto setup_err;
417 }
418
419 for (i=0; i<mtd->numeraseregions;i++){
420 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
421 i,mtd->eraseregions[i].offset,
422 mtd->eraseregions[i].erasesize,
423 mtd->eraseregions[i].numblocks);
424 }
425
426#if 0
427 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
428 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
429#endif
430
431 /* This function has the potential to distort the reality
432 a bit and therefore should be called last. */
433 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
434 goto setup_err;
435
436 __module_get(THIS_MODULE);
437 return mtd;
438
439 setup_err:
440 if(mtd) {
441 if(mtd->eraseregions)
442 kfree(mtd->eraseregions);
443 kfree(mtd);
444 }
445 kfree(cfi->cmdset_priv);
446 return NULL;
447}
448
449static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
450 struct cfi_private **pcfi)
451{
452 struct map_info *map = mtd->priv;
453 struct cfi_private *cfi = *pcfi;
454 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
455
456 /*
457 * Probing of multi-partition flash ships.
458 *
459 * To support multiple partitions when available, we simply arrange
460 * for each of them to have their own flchip structure even if they
461 * are on the same physical chip. This means completely recreating
462 * a new cfi_private structure right here which is a blatent code
463 * layering violation, but this is still the least intrusive
464 * arrangement at this point. This can be rearranged in the future
465 * if someone feels motivated enough. --nico
466 */
467 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
468 && extp->FeatureSupport & (1 << 9)) {
469 struct cfi_private *newcfi;
470 struct flchip *chip;
471 struct flchip_shared *shared;
472 int offs, numregions, numparts, partshift, numvirtchips, i, j;
473
474 /* Protection Register info */
Nicolas Pitre72b56a22005-02-05 02:06:19 +0000475 offs = (extp->NumProtectionFields - 1) *
476 sizeof(struct cfi_intelext_otpinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
478 /* Burst Read info */
479 offs += 6;
480
481 /* Number of partition regions */
482 numregions = extp->extra[offs];
483 offs += 1;
484
485 /* Number of hardware partitions */
486 numparts = 0;
487 for (i = 0; i < numregions; i++) {
488 struct cfi_intelext_regioninfo *rinfo;
489 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
490 numparts += rinfo->NumIdentPartitions;
491 offs += sizeof(*rinfo)
492 + (rinfo->NumBlockTypes - 1) *
493 sizeof(struct cfi_intelext_blockinfo);
494 }
495
496 /*
497 * All functions below currently rely on all chips having
498 * the same geometry so we'll just assume that all hardware
499 * partitions are of the same size too.
500 */
501 partshift = cfi->chipshift - __ffs(numparts);
502
503 if ((1 << partshift) < mtd->erasesize) {
504 printk( KERN_ERR
505 "%s: bad number of hw partitions (%d)\n",
506 __FUNCTION__, numparts);
507 return -EINVAL;
508 }
509
510 numvirtchips = cfi->numchips * numparts;
511 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
512 if (!newcfi)
513 return -ENOMEM;
514 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
515 if (!shared) {
516 kfree(newcfi);
517 return -ENOMEM;
518 }
519 memcpy(newcfi, cfi, sizeof(struct cfi_private));
520 newcfi->numchips = numvirtchips;
521 newcfi->chipshift = partshift;
522
523 chip = &newcfi->chips[0];
524 for (i = 0; i < cfi->numchips; i++) {
525 shared[i].writing = shared[i].erasing = NULL;
526 spin_lock_init(&shared[i].lock);
527 for (j = 0; j < numparts; j++) {
528 *chip = cfi->chips[i];
529 chip->start += j << partshift;
530 chip->priv = &shared[i];
531 /* those should be reset too since
532 they create memory references. */
533 init_waitqueue_head(&chip->wq);
534 spin_lock_init(&chip->_spinlock);
535 chip->mutex = &chip->_spinlock;
536 chip++;
537 }
538 }
539
540 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
541 "--> %d partitions of %d KiB\n",
542 map->name, cfi->numchips, cfi->interleave,
543 newcfi->numchips, 1<<(newcfi->chipshift-10));
544
545 map->fldrv_priv = newcfi;
546 *pcfi = newcfi;
547 kfree(cfi);
548 }
549
550 return 0;
551}
552
553/*
554 * *********** CHIP ACCESS FUNCTIONS ***********
555 */
556
557static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
558{
559 DECLARE_WAITQUEUE(wait, current);
560 struct cfi_private *cfi = map->fldrv_priv;
561 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
562 unsigned long timeo;
563 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
564
565 resettime:
566 timeo = jiffies + HZ;
567 retry:
568 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)) {
569 /*
570 * OK. We have possibility for contension on the write/erase
571 * operations which are global to the real chip and not per
572 * partition. So let's fight it over in the partition which
573 * currently has authority on the operation.
574 *
575 * The rules are as follows:
576 *
577 * - any write operation must own shared->writing.
578 *
579 * - any erase operation must own _both_ shared->writing and
580 * shared->erasing.
581 *
582 * - contension arbitration is handled in the owner's context.
583 *
584 * The 'shared' struct can be read when its lock is taken.
585 * However any writes to it can only be made when the current
586 * owner's lock is also held.
587 */
588 struct flchip_shared *shared = chip->priv;
589 struct flchip *contender;
590 spin_lock(&shared->lock);
591 contender = shared->writing;
592 if (contender && contender != chip) {
593 /*
594 * The engine to perform desired operation on this
595 * partition is already in use by someone else.
596 * Let's fight over it in the context of the chip
597 * currently using it. If it is possible to suspend,
598 * that other partition will do just that, otherwise
599 * it'll happily send us to sleep. In any case, when
600 * get_chip returns success we're clear to go ahead.
601 */
602 int ret = spin_trylock(contender->mutex);
603 spin_unlock(&shared->lock);
604 if (!ret)
605 goto retry;
606 spin_unlock(chip->mutex);
607 ret = get_chip(map, contender, contender->start, mode);
608 spin_lock(chip->mutex);
609 if (ret) {
610 spin_unlock(contender->mutex);
611 return ret;
612 }
613 timeo = jiffies + HZ;
614 spin_lock(&shared->lock);
615 }
616
617 /* We now own it */
618 shared->writing = chip;
619 if (mode == FL_ERASING)
620 shared->erasing = chip;
621 if (contender && contender != chip)
622 spin_unlock(contender->mutex);
623 spin_unlock(&shared->lock);
624 }
625
626 switch (chip->state) {
627
628 case FL_STATUS:
629 for (;;) {
630 status = map_read(map, adr);
631 if (map_word_andequal(map, status, status_OK, status_OK))
632 break;
633
634 /* At this point we're fine with write operations
635 in other partitions as they don't conflict. */
636 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
637 break;
638
639 if (time_after(jiffies, timeo)) {
640 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
641 status.x[0]);
642 return -EIO;
643 }
644 spin_unlock(chip->mutex);
645 cfi_udelay(1);
646 spin_lock(chip->mutex);
647 /* Someone else might have been playing with it. */
648 goto retry;
649 }
650
651 case FL_READY:
652 case FL_CFI_QUERY:
653 case FL_JEDEC_QUERY:
654 return 0;
655
656 case FL_ERASING:
657 if (!cfip ||
658 !(cfip->FeatureSupport & 2) ||
659 !(mode == FL_READY || mode == FL_POINT ||
660 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
661 goto sleep;
662
663
664 /* Erase suspend */
665 map_write(map, CMD(0xB0), adr);
666
667 /* If the flash has finished erasing, then 'erase suspend'
668 * appears to make some (28F320) flash devices switch to
669 * 'read' mode. Make sure that we switch to 'read status'
670 * mode so we get the right data. --rmk
671 */
672 map_write(map, CMD(0x70), adr);
673 chip->oldstate = FL_ERASING;
674 chip->state = FL_ERASE_SUSPENDING;
675 chip->erase_suspended = 1;
676 for (;;) {
677 status = map_read(map, adr);
678 if (map_word_andequal(map, status, status_OK, status_OK))
679 break;
680
681 if (time_after(jiffies, timeo)) {
682 /* Urgh. Resume and pretend we weren't here. */
683 map_write(map, CMD(0xd0), adr);
684 /* Make sure we're in 'read status' mode if it had finished */
685 map_write(map, CMD(0x70), adr);
686 chip->state = FL_ERASING;
687 chip->oldstate = FL_READY;
688 printk(KERN_ERR "Chip not ready after erase "
689 "suspended: status = 0x%lx\n", status.x[0]);
690 return -EIO;
691 }
692
693 spin_unlock(chip->mutex);
694 cfi_udelay(1);
695 spin_lock(chip->mutex);
696 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
697 So we can just loop here. */
698 }
699 chip->state = FL_STATUS;
700 return 0;
701
702 case FL_XIP_WHILE_ERASING:
703 if (mode != FL_READY && mode != FL_POINT &&
704 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
705 goto sleep;
706 chip->oldstate = chip->state;
707 chip->state = FL_READY;
708 return 0;
709
710 case FL_POINT:
711 /* Only if there's no operation suspended... */
712 if (mode == FL_READY && chip->oldstate == FL_READY)
713 return 0;
714
715 default:
716 sleep:
717 set_current_state(TASK_UNINTERRUPTIBLE);
718 add_wait_queue(&chip->wq, &wait);
719 spin_unlock(chip->mutex);
720 schedule();
721 remove_wait_queue(&chip->wq, &wait);
722 spin_lock(chip->mutex);
723 goto resettime;
724 }
725}
726
727static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
728{
729 struct cfi_private *cfi = map->fldrv_priv;
730
731 if (chip->priv) {
732 struct flchip_shared *shared = chip->priv;
733 spin_lock(&shared->lock);
734 if (shared->writing == chip && chip->oldstate == FL_READY) {
735 /* We own the ability to write, but we're done */
736 shared->writing = shared->erasing;
737 if (shared->writing && shared->writing != chip) {
738 /* give back ownership to who we loaned it from */
739 struct flchip *loaner = shared->writing;
740 spin_lock(loaner->mutex);
741 spin_unlock(&shared->lock);
742 spin_unlock(chip->mutex);
743 put_chip(map, loaner, loaner->start);
744 spin_lock(chip->mutex);
745 spin_unlock(loaner->mutex);
746 wake_up(&chip->wq);
747 return;
748 }
749 shared->erasing = NULL;
750 shared->writing = NULL;
751 } else if (shared->erasing == chip && shared->writing != chip) {
752 /*
753 * We own the ability to erase without the ability
754 * to write, which means the erase was suspended
755 * and some other partition is currently writing.
756 * Don't let the switch below mess things up since
757 * we don't have ownership to resume anything.
758 */
759 spin_unlock(&shared->lock);
760 wake_up(&chip->wq);
761 return;
762 }
763 spin_unlock(&shared->lock);
764 }
765
766 switch(chip->oldstate) {
767 case FL_ERASING:
768 chip->state = chip->oldstate;
769 /* What if one interleaved chip has finished and the
770 other hasn't? The old code would leave the finished
771 one in READY mode. That's bad, and caused -EROFS
772 errors to be returned from do_erase_oneblock because
773 that's the only bit it checked for at the time.
774 As the state machine appears to explicitly allow
775 sending the 0x70 (Read Status) command to an erasing
776 chip and expecting it to be ignored, that's what we
777 do. */
778 map_write(map, CMD(0xd0), adr);
779 map_write(map, CMD(0x70), adr);
780 chip->oldstate = FL_READY;
781 chip->state = FL_ERASING;
782 break;
783
784 case FL_XIP_WHILE_ERASING:
785 chip->state = chip->oldstate;
786 chip->oldstate = FL_READY;
787 break;
788
789 case FL_READY:
790 case FL_STATUS:
791 case FL_JEDEC_QUERY:
792 /* We should really make set_vpp() count, rather than doing this */
793 DISABLE_VPP(map);
794 break;
795 default:
796 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
797 }
798 wake_up(&chip->wq);
799}
800
801#ifdef CONFIG_MTD_XIP
802
803/*
804 * No interrupt what so ever can be serviced while the flash isn't in array
805 * mode. This is ensured by the xip_disable() and xip_enable() functions
806 * enclosing any code path where the flash is known not to be in array mode.
807 * And within a XIP disabled code path, only functions marked with __xipram
808 * may be called and nothing else (it's a good thing to inspect generated
809 * assembly to make sure inline functions were actually inlined and that gcc
810 * didn't emit calls to its own support functions). Also configuring MTD CFI
811 * support to a single buswidth and a single interleave is also recommended.
812 * Note that not only IRQs are disabled but the preemption count is also
813 * increased to prevent other locking primitives (namely spin_unlock) from
814 * decrementing the preempt count to zero and scheduling the CPU away while
815 * not in array mode.
816 */
817
818static void xip_disable(struct map_info *map, struct flchip *chip,
819 unsigned long adr)
820{
821 /* TODO: chips with no XIP use should ignore and return */
822 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
823 preempt_disable();
824 local_irq_disable();
825}
826
827static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
828 unsigned long adr)
829{
830 struct cfi_private *cfi = map->fldrv_priv;
831 if (chip->state != FL_POINT && chip->state != FL_READY) {
832 map_write(map, CMD(0xff), adr);
833 chip->state = FL_READY;
834 }
835 (void) map_read(map, adr);
836 asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
837 local_irq_enable();
838 preempt_enable();
839}
840
841/*
842 * When a delay is required for the flash operation to complete, the
843 * xip_udelay() function is polling for both the given timeout and pending
844 * (but still masked) hardware interrupts. Whenever there is an interrupt
845 * pending then the flash erase or write operation is suspended, array mode
846 * restored and interrupts unmasked. Task scheduling might also happen at that
847 * point. The CPU eventually returns from the interrupt or the call to
848 * schedule() and the suspended flash operation is resumed for the remaining
849 * of the delay period.
850 *
851 * Warning: this function _will_ fool interrupt latency tracing tools.
852 */
853
854static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
855 unsigned long adr, int usec)
856{
857 struct cfi_private *cfi = map->fldrv_priv;
858 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
859 map_word status, OK = CMD(0x80);
860 unsigned long suspended, start = xip_currtime();
861 flstate_t oldstate, newstate;
862
863 do {
864 cpu_relax();
865 if (xip_irqpending() && cfip &&
866 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
867 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
868 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
869 /*
870 * Let's suspend the erase or write operation when
871 * supported. Note that we currently don't try to
872 * suspend interleaved chips if there is already
873 * another operation suspended (imagine what happens
874 * when one chip was already done with the current
875 * operation while another chip suspended it, then
876 * we resume the whole thing at once). Yes, it
877 * can happen!
878 */
879 map_write(map, CMD(0xb0), adr);
880 map_write(map, CMD(0x70), adr);
881 usec -= xip_elapsed_since(start);
882 suspended = xip_currtime();
883 do {
884 if (xip_elapsed_since(suspended) > 100000) {
885 /*
886 * The chip doesn't want to suspend
887 * after waiting for 100 msecs.
888 * This is a critical error but there
889 * is not much we can do here.
890 */
891 return;
892 }
893 status = map_read(map, adr);
894 } while (!map_word_andequal(map, status, OK, OK));
895
896 /* Suspend succeeded */
897 oldstate = chip->state;
898 if (oldstate == FL_ERASING) {
899 if (!map_word_bitsset(map, status, CMD(0x40)))
900 break;
901 newstate = FL_XIP_WHILE_ERASING;
902 chip->erase_suspended = 1;
903 } else {
904 if (!map_word_bitsset(map, status, CMD(0x04)))
905 break;
906 newstate = FL_XIP_WHILE_WRITING;
907 chip->write_suspended = 1;
908 }
909 chip->state = newstate;
910 map_write(map, CMD(0xff), adr);
911 (void) map_read(map, adr);
912 asm volatile (".rep 8; nop; .endr");
913 local_irq_enable();
914 preempt_enable();
915 asm volatile (".rep 8; nop; .endr");
916 cond_resched();
917
918 /*
919 * We're back. However someone else might have
920 * decided to go write to the chip if we are in
921 * a suspended erase state. If so let's wait
922 * until it's done.
923 */
924 preempt_disable();
925 while (chip->state != newstate) {
926 DECLARE_WAITQUEUE(wait, current);
927 set_current_state(TASK_UNINTERRUPTIBLE);
928 add_wait_queue(&chip->wq, &wait);
929 preempt_enable();
930 schedule();
931 remove_wait_queue(&chip->wq, &wait);
932 preempt_disable();
933 }
934 /* Disallow XIP again */
935 local_irq_disable();
936
937 /* Resume the write or erase operation */
938 map_write(map, CMD(0xd0), adr);
939 map_write(map, CMD(0x70), adr);
940 chip->state = oldstate;
941 start = xip_currtime();
942 } else if (usec >= 1000000/HZ) {
943 /*
944 * Try to save on CPU power when waiting delay
945 * is at least a system timer tick period.
946 * No need to be extremely accurate here.
947 */
948 xip_cpu_idle();
949 }
950 status = map_read(map, adr);
951 } while (!map_word_andequal(map, status, OK, OK)
952 && xip_elapsed_since(start) < usec);
953}
954
955#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
956
957/*
958 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
959 * the flash is actively programming or erasing since we have to poll for
960 * the operation to complete anyway. We can't do that in a generic way with
961 * a XIP setup so do it before the actual flash operation in this case.
962 */
963#undef INVALIDATE_CACHED_RANGE
964#define INVALIDATE_CACHED_RANGE(x...)
965#define XIP_INVAL_CACHED_RANGE(map, from, size) \
966 do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
967
968/*
969 * Extra notes:
970 *
971 * Activating this XIP support changes the way the code works a bit. For
972 * example the code to suspend the current process when concurrent access
973 * happens is never executed because xip_udelay() will always return with the
974 * same chip state as it was entered with. This is why there is no care for
975 * the presence of add_wait_queue() or schedule() calls from within a couple
976 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
977 * The queueing and scheduling are always happening within xip_udelay().
978 *
979 * Similarly, get_chip() and put_chip() just happen to always be executed
980 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
981 * is in array mode, therefore never executing many cases therein and not
982 * causing any problem with XIP.
983 */
984
985#else
986
987#define xip_disable(map, chip, adr)
988#define xip_enable(map, chip, adr)
989
990#define UDELAY(map, chip, adr, usec) cfi_udelay(usec)
991
992#define XIP_INVAL_CACHED_RANGE(x...)
993
994#endif
995
996static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
997{
998 unsigned long cmd_addr;
999 struct cfi_private *cfi = map->fldrv_priv;
1000 int ret = 0;
1001
1002 adr += chip->start;
1003
1004 /* Ensure cmd read/writes are aligned. */
1005 cmd_addr = adr & ~(map_bankwidth(map)-1);
1006
1007 spin_lock(chip->mutex);
1008
1009 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1010
1011 if (!ret) {
1012 if (chip->state != FL_POINT && chip->state != FL_READY)
1013 map_write(map, CMD(0xff), cmd_addr);
1014
1015 chip->state = FL_POINT;
1016 chip->ref_point_counter++;
1017 }
1018 spin_unlock(chip->mutex);
1019
1020 return ret;
1021}
1022
1023static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1024{
1025 struct map_info *map = mtd->priv;
1026 struct cfi_private *cfi = map->fldrv_priv;
1027 unsigned long ofs;
1028 int chipnum;
1029 int ret = 0;
1030
1031 if (!map->virt || (from + len > mtd->size))
1032 return -EINVAL;
1033
1034 *mtdbuf = (void *)map->virt + from;
1035 *retlen = 0;
1036
1037 /* Now lock the chip(s) to POINT state */
1038
1039 /* ofs: offset within the first chip that the first read should start */
1040 chipnum = (from >> cfi->chipshift);
1041 ofs = from - (chipnum << cfi->chipshift);
1042
1043 while (len) {
1044 unsigned long thislen;
1045
1046 if (chipnum >= cfi->numchips)
1047 break;
1048
1049 if ((len + ofs -1) >> cfi->chipshift)
1050 thislen = (1<<cfi->chipshift) - ofs;
1051 else
1052 thislen = len;
1053
1054 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1055 if (ret)
1056 break;
1057
1058 *retlen += thislen;
1059 len -= thislen;
1060
1061 ofs = 0;
1062 chipnum++;
1063 }
1064 return 0;
1065}
1066
1067static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1068{
1069 struct map_info *map = mtd->priv;
1070 struct cfi_private *cfi = map->fldrv_priv;
1071 unsigned long ofs;
1072 int chipnum;
1073
1074 /* Now unlock the chip(s) POINT state */
1075
1076 /* ofs: offset within the first chip that the first read should start */
1077 chipnum = (from >> cfi->chipshift);
1078 ofs = from - (chipnum << cfi->chipshift);
1079
1080 while (len) {
1081 unsigned long thislen;
1082 struct flchip *chip;
1083
1084 chip = &cfi->chips[chipnum];
1085 if (chipnum >= cfi->numchips)
1086 break;
1087
1088 if ((len + ofs -1) >> cfi->chipshift)
1089 thislen = (1<<cfi->chipshift) - ofs;
1090 else
1091 thislen = len;
1092
1093 spin_lock(chip->mutex);
1094 if (chip->state == FL_POINT) {
1095 chip->ref_point_counter--;
1096 if(chip->ref_point_counter == 0)
1097 chip->state = FL_READY;
1098 } else
1099 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1100
1101 put_chip(map, chip, chip->start);
1102 spin_unlock(chip->mutex);
1103
1104 len -= thislen;
1105 ofs = 0;
1106 chipnum++;
1107 }
1108}
1109
1110static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1111{
1112 unsigned long cmd_addr;
1113 struct cfi_private *cfi = map->fldrv_priv;
1114 int ret;
1115
1116 adr += chip->start;
1117
1118 /* Ensure cmd read/writes are aligned. */
1119 cmd_addr = adr & ~(map_bankwidth(map)-1);
1120
1121 spin_lock(chip->mutex);
1122 ret = get_chip(map, chip, cmd_addr, FL_READY);
1123 if (ret) {
1124 spin_unlock(chip->mutex);
1125 return ret;
1126 }
1127
1128 if (chip->state != FL_POINT && chip->state != FL_READY) {
1129 map_write(map, CMD(0xff), cmd_addr);
1130
1131 chip->state = FL_READY;
1132 }
1133
1134 map_copy_from(map, buf, adr, len);
1135
1136 put_chip(map, chip, cmd_addr);
1137
1138 spin_unlock(chip->mutex);
1139 return 0;
1140}
1141
1142static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1143{
1144 struct map_info *map = mtd->priv;
1145 struct cfi_private *cfi = map->fldrv_priv;
1146 unsigned long ofs;
1147 int chipnum;
1148 int ret = 0;
1149
1150 /* ofs: offset within the first chip that the first read should start */
1151 chipnum = (from >> cfi->chipshift);
1152 ofs = from - (chipnum << cfi->chipshift);
1153
1154 *retlen = 0;
1155
1156 while (len) {
1157 unsigned long thislen;
1158
1159 if (chipnum >= cfi->numchips)
1160 break;
1161
1162 if ((len + ofs -1) >> cfi->chipshift)
1163 thislen = (1<<cfi->chipshift) - ofs;
1164 else
1165 thislen = len;
1166
1167 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1168 if (ret)
1169 break;
1170
1171 *retlen += thislen;
1172 len -= thislen;
1173 buf += thislen;
1174
1175 ofs = 0;
1176 chipnum++;
1177 }
1178 return ret;
1179}
1180
1181#if 0
1182static int __xipram cfi_intelext_read_prot_reg (struct mtd_info *mtd,
1183 loff_t from, size_t len,
1184 size_t *retlen,
1185 u_char *buf,
1186 int base_offst, int reg_sz)
1187{
1188 struct map_info *map = mtd->priv;
1189 struct cfi_private *cfi = map->fldrv_priv;
1190 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1191 struct flchip *chip;
1192 int ofs_factor = cfi->interleave * cfi->device_type;
1193 int count = len;
1194 int chip_num, offst;
1195 int ret;
1196
1197 chip_num = ((unsigned int)from/reg_sz);
1198 offst = from - (reg_sz*chip_num)+base_offst;
1199
1200 while (count) {
1201 /* Calculate which chip & protection register offset we need */
1202
1203 if (chip_num >= cfi->numchips)
1204 goto out;
1205
1206 chip = &cfi->chips[chip_num];
1207
1208 spin_lock(chip->mutex);
1209 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1210 if (ret) {
1211 spin_unlock(chip->mutex);
1212 return (len-count)?:ret;
1213 }
1214
1215 xip_disable(map, chip, chip->start);
1216
1217 if (chip->state != FL_JEDEC_QUERY) {
1218 map_write(map, CMD(0x90), chip->start);
1219 chip->state = FL_JEDEC_QUERY;
1220 }
1221
1222 while (count && ((offst-base_offst) < reg_sz)) {
1223 *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
1224 buf++;
1225 offst++;
1226 count--;
1227 }
1228
1229 xip_enable(map, chip, chip->start);
1230 put_chip(map, chip, chip->start);
1231 spin_unlock(chip->mutex);
1232
1233 /* Move on to the next chip */
1234 chip_num++;
1235 offst = base_offst;
1236 }
1237
1238 out:
1239 return len-count;
1240}
1241
1242static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1243{
1244 struct map_info *map = mtd->priv;
1245 struct cfi_private *cfi = map->fldrv_priv;
1246 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1247 int base_offst,reg_sz;
1248
1249 /* Check that we actually have some protection registers */
1250 if(!extp || !(extp->FeatureSupport&64)){
1251 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1252 return 0;
1253 }
1254
1255 base_offst=(1<<extp->FactProtRegSize);
1256 reg_sz=(1<<extp->UserProtRegSize);
1257
1258 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1259}
1260
1261static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1262{
1263 struct map_info *map = mtd->priv;
1264 struct cfi_private *cfi = map->fldrv_priv;
1265 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1266 int base_offst,reg_sz;
1267
1268 /* Check that we actually have some protection registers */
1269 if(!extp || !(extp->FeatureSupport&64)){
1270 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1271 return 0;
1272 }
1273
1274 base_offst=0;
1275 reg_sz=(1<<extp->FactProtRegSize);
1276
1277 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1278}
1279#endif
1280
1281static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1282 unsigned long adr, map_word datum)
1283{
1284 struct cfi_private *cfi = map->fldrv_priv;
1285 map_word status, status_OK;
1286 unsigned long timeo;
1287 int z, ret=0;
1288
1289 adr += chip->start;
1290
1291 /* Let's determine this according to the interleave only once */
1292 status_OK = CMD(0x80);
1293
1294 spin_lock(chip->mutex);
1295 ret = get_chip(map, chip, adr, FL_WRITING);
1296 if (ret) {
1297 spin_unlock(chip->mutex);
1298 return ret;
1299 }
1300
1301 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1302 ENABLE_VPP(map);
1303 xip_disable(map, chip, adr);
1304 map_write(map, CMD(0x40), adr);
1305 map_write(map, datum, adr);
1306 chip->state = FL_WRITING;
1307
1308 spin_unlock(chip->mutex);
1309 INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
1310 UDELAY(map, chip, adr, chip->word_write_time);
1311 spin_lock(chip->mutex);
1312
1313 timeo = jiffies + (HZ/2);
1314 z = 0;
1315 for (;;) {
1316 if (chip->state != FL_WRITING) {
1317 /* Someone's suspended the write. Sleep */
1318 DECLARE_WAITQUEUE(wait, current);
1319
1320 set_current_state(TASK_UNINTERRUPTIBLE);
1321 add_wait_queue(&chip->wq, &wait);
1322 spin_unlock(chip->mutex);
1323 schedule();
1324 remove_wait_queue(&chip->wq, &wait);
1325 timeo = jiffies + (HZ / 2); /* FIXME */
1326 spin_lock(chip->mutex);
1327 continue;
1328 }
1329
1330 status = map_read(map, adr);
1331 if (map_word_andequal(map, status, status_OK, status_OK))
1332 break;
1333
1334 /* OK Still waiting */
1335 if (time_after(jiffies, timeo)) {
1336 chip->state = FL_STATUS;
1337 xip_enable(map, chip, adr);
1338 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1339 ret = -EIO;
1340 goto out;
1341 }
1342
1343 /* Latency issues. Drop the lock, wait a while and retry */
1344 spin_unlock(chip->mutex);
1345 z++;
1346 UDELAY(map, chip, adr, 1);
1347 spin_lock(chip->mutex);
1348 }
1349 if (!z) {
1350 chip->word_write_time--;
1351 if (!chip->word_write_time)
1352 chip->word_write_time++;
1353 }
1354 if (z > 1)
1355 chip->word_write_time++;
1356
1357 /* Done and happy. */
1358 chip->state = FL_STATUS;
1359
1360 /* check for lock bit */
1361 if (map_word_bitsset(map, status, CMD(0x02))) {
1362 /* clear status */
1363 map_write(map, CMD(0x50), adr);
1364 /* put back into read status register mode */
1365 map_write(map, CMD(0x70), adr);
1366 ret = -EROFS;
1367 }
1368
1369 xip_enable(map, chip, adr);
1370 out: put_chip(map, chip, adr);
1371 spin_unlock(chip->mutex);
1372
1373 return ret;
1374}
1375
1376
1377static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1378{
1379 struct map_info *map = mtd->priv;
1380 struct cfi_private *cfi = map->fldrv_priv;
1381 int ret = 0;
1382 int chipnum;
1383 unsigned long ofs;
1384
1385 *retlen = 0;
1386 if (!len)
1387 return 0;
1388
1389 chipnum = to >> cfi->chipshift;
1390 ofs = to - (chipnum << cfi->chipshift);
1391
1392 /* If it's not bus-aligned, do the first byte write */
1393 if (ofs & (map_bankwidth(map)-1)) {
1394 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1395 int gap = ofs - bus_ofs;
1396 int n;
1397 map_word datum;
1398
1399 n = min_t(int, len, map_bankwidth(map)-gap);
1400 datum = map_word_ff(map);
1401 datum = map_word_load_partial(map, datum, buf, gap, n);
1402
1403 ret = do_write_oneword(map, &cfi->chips[chipnum],
1404 bus_ofs, datum);
1405 if (ret)
1406 return ret;
1407
1408 len -= n;
1409 ofs += n;
1410 buf += n;
1411 (*retlen) += n;
1412
1413 if (ofs >> cfi->chipshift) {
1414 chipnum ++;
1415 ofs = 0;
1416 if (chipnum == cfi->numchips)
1417 return 0;
1418 }
1419 }
1420
1421 while(len >= map_bankwidth(map)) {
1422 map_word datum = map_word_load(map, buf);
1423
1424 ret = do_write_oneword(map, &cfi->chips[chipnum],
1425 ofs, datum);
1426 if (ret)
1427 return ret;
1428
1429 ofs += map_bankwidth(map);
1430 buf += map_bankwidth(map);
1431 (*retlen) += map_bankwidth(map);
1432 len -= map_bankwidth(map);
1433
1434 if (ofs >> cfi->chipshift) {
1435 chipnum ++;
1436 ofs = 0;
1437 if (chipnum == cfi->numchips)
1438 return 0;
1439 }
1440 }
1441
1442 if (len & (map_bankwidth(map)-1)) {
1443 map_word datum;
1444
1445 datum = map_word_ff(map);
1446 datum = map_word_load_partial(map, datum, buf, 0, len);
1447
1448 ret = do_write_oneword(map, &cfi->chips[chipnum],
1449 ofs, datum);
1450 if (ret)
1451 return ret;
1452
1453 (*retlen) += len;
1454 }
1455
1456 return 0;
1457}
1458
1459
1460static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1461 unsigned long adr, const u_char *buf, int len)
1462{
1463 struct cfi_private *cfi = map->fldrv_priv;
1464 map_word status, status_OK;
1465 unsigned long cmd_adr, timeo;
1466 int wbufsize, z, ret=0, bytes, words;
1467
1468 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1469 adr += chip->start;
1470 cmd_adr = adr & ~(wbufsize-1);
1471
1472 /* Let's determine this according to the interleave only once */
1473 status_OK = CMD(0x80);
1474
1475 spin_lock(chip->mutex);
1476 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1477 if (ret) {
1478 spin_unlock(chip->mutex);
1479 return ret;
1480 }
1481
1482 XIP_INVAL_CACHED_RANGE(map, adr, len);
1483 ENABLE_VPP(map);
1484 xip_disable(map, chip, cmd_adr);
1485
1486 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1487 [...], the device will not accept any more Write to Buffer commands".
1488 So we must check here and reset those bits if they're set. Otherwise
1489 we're just pissing in the wind */
1490 if (chip->state != FL_STATUS)
1491 map_write(map, CMD(0x70), cmd_adr);
1492 status = map_read(map, cmd_adr);
1493 if (map_word_bitsset(map, status, CMD(0x30))) {
1494 xip_enable(map, chip, cmd_adr);
1495 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1496 xip_disable(map, chip, cmd_adr);
1497 map_write(map, CMD(0x50), cmd_adr);
1498 map_write(map, CMD(0x70), cmd_adr);
1499 }
1500
1501 chip->state = FL_WRITING_TO_BUFFER;
1502
1503 z = 0;
1504 for (;;) {
1505 map_write(map, CMD(0xe8), cmd_adr);
1506
1507 status = map_read(map, cmd_adr);
1508 if (map_word_andequal(map, status, status_OK, status_OK))
1509 break;
1510
1511 spin_unlock(chip->mutex);
1512 UDELAY(map, chip, cmd_adr, 1);
1513 spin_lock(chip->mutex);
1514
1515 if (++z > 20) {
1516 /* Argh. Not ready for write to buffer */
1517 map_word Xstatus;
1518 map_write(map, CMD(0x70), cmd_adr);
1519 chip->state = FL_STATUS;
1520 Xstatus = map_read(map, cmd_adr);
1521 /* Odd. Clear status bits */
1522 map_write(map, CMD(0x50), cmd_adr);
1523 map_write(map, CMD(0x70), cmd_adr);
1524 xip_enable(map, chip, cmd_adr);
1525 printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1526 status.x[0], Xstatus.x[0]);
1527 ret = -EIO;
1528 goto out;
1529 }
1530 }
1531
1532 /* Write length of data to come */
1533 bytes = len & (map_bankwidth(map)-1);
1534 words = len / map_bankwidth(map);
1535 map_write(map, CMD(words - !bytes), cmd_adr );
1536
1537 /* Write data */
1538 z = 0;
1539 while(z < words * map_bankwidth(map)) {
1540 map_word datum = map_word_load(map, buf);
1541 map_write(map, datum, adr+z);
1542
1543 z += map_bankwidth(map);
1544 buf += map_bankwidth(map);
1545 }
1546
1547 if (bytes) {
1548 map_word datum;
1549
1550 datum = map_word_ff(map);
1551 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1552 map_write(map, datum, adr+z);
1553 }
1554
1555 /* GO GO GO */
1556 map_write(map, CMD(0xd0), cmd_adr);
1557 chip->state = FL_WRITING;
1558
1559 spin_unlock(chip->mutex);
1560 INVALIDATE_CACHED_RANGE(map, adr, len);
1561 UDELAY(map, chip, cmd_adr, chip->buffer_write_time);
1562 spin_lock(chip->mutex);
1563
1564 timeo = jiffies + (HZ/2);
1565 z = 0;
1566 for (;;) {
1567 if (chip->state != FL_WRITING) {
1568 /* Someone's suspended the write. Sleep */
1569 DECLARE_WAITQUEUE(wait, current);
1570 set_current_state(TASK_UNINTERRUPTIBLE);
1571 add_wait_queue(&chip->wq, &wait);
1572 spin_unlock(chip->mutex);
1573 schedule();
1574 remove_wait_queue(&chip->wq, &wait);
1575 timeo = jiffies + (HZ / 2); /* FIXME */
1576 spin_lock(chip->mutex);
1577 continue;
1578 }
1579
1580 status = map_read(map, cmd_adr);
1581 if (map_word_andequal(map, status, status_OK, status_OK))
1582 break;
1583
1584 /* OK Still waiting */
1585 if (time_after(jiffies, timeo)) {
1586 chip->state = FL_STATUS;
1587 xip_enable(map, chip, cmd_adr);
1588 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1589 ret = -EIO;
1590 goto out;
1591 }
1592
1593 /* Latency issues. Drop the lock, wait a while and retry */
1594 spin_unlock(chip->mutex);
1595 UDELAY(map, chip, cmd_adr, 1);
1596 z++;
1597 spin_lock(chip->mutex);
1598 }
1599 if (!z) {
1600 chip->buffer_write_time--;
1601 if (!chip->buffer_write_time)
1602 chip->buffer_write_time++;
1603 }
1604 if (z > 1)
1605 chip->buffer_write_time++;
1606
1607 /* Done and happy. */
1608 chip->state = FL_STATUS;
1609
1610 /* check for lock bit */
1611 if (map_word_bitsset(map, status, CMD(0x02))) {
1612 /* clear status */
1613 map_write(map, CMD(0x50), cmd_adr);
1614 /* put back into read status register mode */
1615 map_write(map, CMD(0x70), adr);
1616 ret = -EROFS;
1617 }
1618
1619 xip_enable(map, chip, cmd_adr);
1620 out: put_chip(map, chip, cmd_adr);
1621 spin_unlock(chip->mutex);
1622 return ret;
1623}
1624
1625static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1626 size_t len, size_t *retlen, const u_char *buf)
1627{
1628 struct map_info *map = mtd->priv;
1629 struct cfi_private *cfi = map->fldrv_priv;
1630 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1631 int ret = 0;
1632 int chipnum;
1633 unsigned long ofs;
1634
1635 *retlen = 0;
1636 if (!len)
1637 return 0;
1638
1639 chipnum = to >> cfi->chipshift;
1640 ofs = to - (chipnum << cfi->chipshift);
1641
1642 /* If it's not bus-aligned, do the first word write */
1643 if (ofs & (map_bankwidth(map)-1)) {
1644 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1645 if (local_len > len)
1646 local_len = len;
1647 ret = cfi_intelext_write_words(mtd, to, local_len,
1648 retlen, buf);
1649 if (ret)
1650 return ret;
1651 ofs += local_len;
1652 buf += local_len;
1653 len -= local_len;
1654
1655 if (ofs >> cfi->chipshift) {
1656 chipnum ++;
1657 ofs = 0;
1658 if (chipnum == cfi->numchips)
1659 return 0;
1660 }
1661 }
1662
1663 while(len) {
1664 /* We must not cross write block boundaries */
1665 int size = wbufsize - (ofs & (wbufsize-1));
1666
1667 if (size > len)
1668 size = len;
1669 ret = do_write_buffer(map, &cfi->chips[chipnum],
1670 ofs, buf, size);
1671 if (ret)
1672 return ret;
1673
1674 ofs += size;
1675 buf += size;
1676 (*retlen) += size;
1677 len -= size;
1678
1679 if (ofs >> cfi->chipshift) {
1680 chipnum ++;
1681 ofs = 0;
1682 if (chipnum == cfi->numchips)
1683 return 0;
1684 }
1685 }
1686 return 0;
1687}
1688
1689static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1690 unsigned long adr, int len, void *thunk)
1691{
1692 struct cfi_private *cfi = map->fldrv_priv;
1693 map_word status, status_OK;
1694 unsigned long timeo;
1695 int retries = 3;
1696 DECLARE_WAITQUEUE(wait, current);
1697 int ret = 0;
1698
1699 adr += chip->start;
1700
1701 /* Let's determine this according to the interleave only once */
1702 status_OK = CMD(0x80);
1703
1704 retry:
1705 spin_lock(chip->mutex);
1706 ret = get_chip(map, chip, adr, FL_ERASING);
1707 if (ret) {
1708 spin_unlock(chip->mutex);
1709 return ret;
1710 }
1711
1712 XIP_INVAL_CACHED_RANGE(map, adr, len);
1713 ENABLE_VPP(map);
1714 xip_disable(map, chip, adr);
1715
1716 /* Clear the status register first */
1717 map_write(map, CMD(0x50), adr);
1718
1719 /* Now erase */
1720 map_write(map, CMD(0x20), adr);
1721 map_write(map, CMD(0xD0), adr);
1722 chip->state = FL_ERASING;
1723 chip->erase_suspended = 0;
1724
1725 spin_unlock(chip->mutex);
1726 INVALIDATE_CACHED_RANGE(map, adr, len);
1727 UDELAY(map, chip, adr, chip->erase_time*1000/2);
1728 spin_lock(chip->mutex);
1729
1730 /* FIXME. Use a timer to check this, and return immediately. */
1731 /* Once the state machine's known to be working I'll do that */
1732
1733 timeo = jiffies + (HZ*20);
1734 for (;;) {
1735 if (chip->state != FL_ERASING) {
1736 /* Someone's suspended the erase. Sleep */
1737 set_current_state(TASK_UNINTERRUPTIBLE);
1738 add_wait_queue(&chip->wq, &wait);
1739 spin_unlock(chip->mutex);
1740 schedule();
1741 remove_wait_queue(&chip->wq, &wait);
1742 spin_lock(chip->mutex);
1743 continue;
1744 }
1745 if (chip->erase_suspended) {
1746 /* This erase was suspended and resumed.
1747 Adjust the timeout */
1748 timeo = jiffies + (HZ*20); /* FIXME */
1749 chip->erase_suspended = 0;
1750 }
1751
1752 status = map_read(map, adr);
1753 if (map_word_andequal(map, status, status_OK, status_OK))
1754 break;
1755
1756 /* OK Still waiting */
1757 if (time_after(jiffies, timeo)) {
1758 map_word Xstatus;
1759 map_write(map, CMD(0x70), adr);
1760 chip->state = FL_STATUS;
1761 Xstatus = map_read(map, adr);
1762 /* Clear status bits */
1763 map_write(map, CMD(0x50), adr);
1764 map_write(map, CMD(0x70), adr);
1765 xip_enable(map, chip, adr);
1766 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1767 adr, status.x[0], Xstatus.x[0]);
1768 ret = -EIO;
1769 goto out;
1770 }
1771
1772 /* Latency issues. Drop the lock, wait a while and retry */
1773 spin_unlock(chip->mutex);
1774 UDELAY(map, chip, adr, 1000000/HZ);
1775 spin_lock(chip->mutex);
1776 }
1777
1778 /* We've broken this before. It doesn't hurt to be safe */
1779 map_write(map, CMD(0x70), adr);
1780 chip->state = FL_STATUS;
1781 status = map_read(map, adr);
1782
1783 /* check for lock bit */
1784 if (map_word_bitsset(map, status, CMD(0x3a))) {
1785 unsigned char chipstatus;
1786
1787 /* Reset the error bits */
1788 map_write(map, CMD(0x50), adr);
1789 map_write(map, CMD(0x70), adr);
1790 xip_enable(map, chip, adr);
1791
1792 chipstatus = status.x[0];
1793 if (!map_word_equal(map, status, CMD(chipstatus))) {
1794 int i, w;
1795 for (w=0; w<map_words(map); w++) {
1796 for (i = 0; i<cfi_interleave(cfi); i++) {
1797 chipstatus |= status.x[w] >> (cfi->device_type * 8);
1798 }
1799 }
1800 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1801 status.x[0], chipstatus);
1802 }
1803
1804 if ((chipstatus & 0x30) == 0x30) {
1805 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
1806 ret = -EIO;
1807 } else if (chipstatus & 0x02) {
1808 /* Protection bit set */
1809 ret = -EROFS;
1810 } else if (chipstatus & 0x8) {
1811 /* Voltage */
1812 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
1813 ret = -EIO;
1814 } else if (chipstatus & 0x20) {
1815 if (retries--) {
1816 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
1817 timeo = jiffies + HZ;
1818 put_chip(map, chip, adr);
1819 spin_unlock(chip->mutex);
1820 goto retry;
1821 }
1822 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
1823 ret = -EIO;
1824 }
1825 } else {
1826 xip_enable(map, chip, adr);
1827 ret = 0;
1828 }
1829
1830 out: put_chip(map, chip, adr);
1831 spin_unlock(chip->mutex);
1832 return ret;
1833}
1834
1835int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1836{
1837 unsigned long ofs, len;
1838 int ret;
1839
1840 ofs = instr->addr;
1841 len = instr->len;
1842
1843 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1844 if (ret)
1845 return ret;
1846
1847 instr->state = MTD_ERASE_DONE;
1848 mtd_erase_callback(instr);
1849
1850 return 0;
1851}
1852
1853static void cfi_intelext_sync (struct mtd_info *mtd)
1854{
1855 struct map_info *map = mtd->priv;
1856 struct cfi_private *cfi = map->fldrv_priv;
1857 int i;
1858 struct flchip *chip;
1859 int ret = 0;
1860
1861 for (i=0; !ret && i<cfi->numchips; i++) {
1862 chip = &cfi->chips[i];
1863
1864 spin_lock(chip->mutex);
1865 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1866
1867 if (!ret) {
1868 chip->oldstate = chip->state;
1869 chip->state = FL_SYNCING;
1870 /* No need to wake_up() on this state change -
1871 * as the whole point is that nobody can do anything
1872 * with the chip now anyway.
1873 */
1874 }
1875 spin_unlock(chip->mutex);
1876 }
1877
1878 /* Unlock the chips again */
1879
1880 for (i--; i >=0; i--) {
1881 chip = &cfi->chips[i];
1882
1883 spin_lock(chip->mutex);
1884
1885 if (chip->state == FL_SYNCING) {
1886 chip->state = chip->oldstate;
1887 wake_up(&chip->wq);
1888 }
1889 spin_unlock(chip->mutex);
1890 }
1891}
1892
1893#ifdef DEBUG_LOCK_BITS
1894static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1895 struct flchip *chip,
1896 unsigned long adr,
1897 int len, void *thunk)
1898{
1899 struct cfi_private *cfi = map->fldrv_priv;
1900 int status, ofs_factor = cfi->interleave * cfi->device_type;
1901
1902 xip_disable(map, chip, adr+(2*ofs_factor));
1903 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1904 chip->state = FL_JEDEC_QUERY;
1905 status = cfi_read_query(map, adr+(2*ofs_factor));
1906 xip_enable(map, chip, 0);
1907 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1908 adr, status);
1909 return 0;
1910}
1911#endif
1912
1913#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1914#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1915
1916static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1917 unsigned long adr, int len, void *thunk)
1918{
1919 struct cfi_private *cfi = map->fldrv_priv;
1920 map_word status, status_OK;
1921 unsigned long timeo = jiffies + HZ;
1922 int ret;
1923
1924 adr += chip->start;
1925
1926 /* Let's determine this according to the interleave only once */
1927 status_OK = CMD(0x80);
1928
1929 spin_lock(chip->mutex);
1930 ret = get_chip(map, chip, adr, FL_LOCKING);
1931 if (ret) {
1932 spin_unlock(chip->mutex);
1933 return ret;
1934 }
1935
1936 ENABLE_VPP(map);
1937 xip_disable(map, chip, adr);
1938
1939 map_write(map, CMD(0x60), adr);
1940 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1941 map_write(map, CMD(0x01), adr);
1942 chip->state = FL_LOCKING;
1943 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1944 map_write(map, CMD(0xD0), adr);
1945 chip->state = FL_UNLOCKING;
1946 } else
1947 BUG();
1948
1949 spin_unlock(chip->mutex);
1950 UDELAY(map, chip, adr, 1000000/HZ);
1951 spin_lock(chip->mutex);
1952
1953 /* FIXME. Use a timer to check this, and return immediately. */
1954 /* Once the state machine's known to be working I'll do that */
1955
1956 timeo = jiffies + (HZ*20);
1957 for (;;) {
1958
1959 status = map_read(map, adr);
1960 if (map_word_andequal(map, status, status_OK, status_OK))
1961 break;
1962
1963 /* OK Still waiting */
1964 if (time_after(jiffies, timeo)) {
1965 map_word Xstatus;
1966 map_write(map, CMD(0x70), adr);
1967 chip->state = FL_STATUS;
1968 Xstatus = map_read(map, adr);
1969 xip_enable(map, chip, adr);
1970 printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1971 status.x[0], Xstatus.x[0]);
1972 put_chip(map, chip, adr);
1973 spin_unlock(chip->mutex);
1974 return -EIO;
1975 }
1976
1977 /* Latency issues. Drop the lock, wait a while and retry */
1978 spin_unlock(chip->mutex);
1979 UDELAY(map, chip, adr, 1);
1980 spin_lock(chip->mutex);
1981 }
1982
1983 /* Done and happy. */
1984 chip->state = FL_STATUS;
1985 xip_enable(map, chip, adr);
1986 put_chip(map, chip, adr);
1987 spin_unlock(chip->mutex);
1988 return 0;
1989}
1990
1991static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1992{
1993 int ret;
1994
1995#ifdef DEBUG_LOCK_BITS
1996 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1997 __FUNCTION__, ofs, len);
1998 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1999 ofs, len, 0);
2000#endif
2001
2002 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2003 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2004
2005#ifdef DEBUG_LOCK_BITS
2006 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2007 __FUNCTION__, ret);
2008 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2009 ofs, len, 0);
2010#endif
2011
2012 return ret;
2013}
2014
2015static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2016{
2017 int ret;
2018
2019#ifdef DEBUG_LOCK_BITS
2020 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2021 __FUNCTION__, ofs, len);
2022 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2023 ofs, len, 0);
2024#endif
2025
2026 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2027 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2028
2029#ifdef DEBUG_LOCK_BITS
2030 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2031 __FUNCTION__, ret);
2032 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2033 ofs, len, 0);
2034#endif
2035
2036 return ret;
2037}
2038
2039static int cfi_intelext_suspend(struct mtd_info *mtd)
2040{
2041 struct map_info *map = mtd->priv;
2042 struct cfi_private *cfi = map->fldrv_priv;
2043 int i;
2044 struct flchip *chip;
2045 int ret = 0;
2046
2047 for (i=0; !ret && i<cfi->numchips; i++) {
2048 chip = &cfi->chips[i];
2049
2050 spin_lock(chip->mutex);
2051
2052 switch (chip->state) {
2053 case FL_READY:
2054 case FL_STATUS:
2055 case FL_CFI_QUERY:
2056 case FL_JEDEC_QUERY:
2057 if (chip->oldstate == FL_READY) {
2058 chip->oldstate = chip->state;
2059 chip->state = FL_PM_SUSPENDED;
2060 /* No need to wake_up() on this state change -
2061 * as the whole point is that nobody can do anything
2062 * with the chip now anyway.
2063 */
2064 } else {
2065 /* There seems to be an operation pending. We must wait for it. */
2066 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2067 ret = -EAGAIN;
2068 }
2069 break;
2070 default:
2071 /* Should we actually wait? Once upon a time these routines weren't
2072 allowed to. Or should we return -EAGAIN, because the upper layers
2073 ought to have already shut down anything which was using the device
2074 anyway? The latter for now. */
2075 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2076 ret = -EAGAIN;
2077 case FL_PM_SUSPENDED:
2078 break;
2079 }
2080 spin_unlock(chip->mutex);
2081 }
2082
2083 /* Unlock the chips again */
2084
2085 if (ret) {
2086 for (i--; i >=0; i--) {
2087 chip = &cfi->chips[i];
2088
2089 spin_lock(chip->mutex);
2090
2091 if (chip->state == FL_PM_SUSPENDED) {
2092 /* No need to force it into a known state here,
2093 because we're returning failure, and it didn't
2094 get power cycled */
2095 chip->state = chip->oldstate;
2096 chip->oldstate = FL_READY;
2097 wake_up(&chip->wq);
2098 }
2099 spin_unlock(chip->mutex);
2100 }
2101 }
2102
2103 return ret;
2104}
2105
2106static void cfi_intelext_resume(struct mtd_info *mtd)
2107{
2108 struct map_info *map = mtd->priv;
2109 struct cfi_private *cfi = map->fldrv_priv;
2110 int i;
2111 struct flchip *chip;
2112
2113 for (i=0; i<cfi->numchips; i++) {
2114
2115 chip = &cfi->chips[i];
2116
2117 spin_lock(chip->mutex);
2118
2119 /* Go to known state. Chip may have been power cycled */
2120 if (chip->state == FL_PM_SUSPENDED) {
2121 map_write(map, CMD(0xFF), cfi->chips[i].start);
2122 chip->oldstate = chip->state = FL_READY;
2123 wake_up(&chip->wq);
2124 }
2125
2126 spin_unlock(chip->mutex);
2127 }
2128}
2129
2130static void cfi_intelext_destroy(struct mtd_info *mtd)
2131{
2132 struct map_info *map = mtd->priv;
2133 struct cfi_private *cfi = map->fldrv_priv;
2134 kfree(cfi->cmdset_priv);
2135 kfree(cfi->cfiq);
2136 kfree(cfi->chips[0].priv);
2137 kfree(cfi);
2138 kfree(mtd->eraseregions);
2139}
2140
2141static char im_name_1[]="cfi_cmdset_0001";
2142static char im_name_3[]="cfi_cmdset_0003";
2143
2144static int __init cfi_intelext_init(void)
2145{
2146 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2147 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2148 return 0;
2149}
2150
2151static void __exit cfi_intelext_exit(void)
2152{
2153 inter_module_unregister(im_name_1);
2154 inter_module_unregister(im_name_3);
2155}
2156
2157module_init(cfi_intelext_init);
2158module_exit(cfi_intelext_exit);
2159
2160MODULE_LICENSE("GPL");
2161MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2162MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");