blob: b482a4e48e48105271751304f757cccc885d7f74 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
Todd Poynor8048d2f2005-03-31 00:57:33 +01007 * $Id: cfi_cmdset_0001.c,v 1.173 2005/03/30 23:57:30 tpoynor Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
32#include <linux/mtd/xip.h>
33#include <linux/mtd/map.h>
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/compatmac.h>
36#include <linux/mtd/cfi.h>
37
38/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41// debugging, turns off buffer write mode if set to 1
42#define FORCE_WORD_WRITE 0
43
44#define MANUFACTURER_INTEL 0x0089
45#define I82802AB 0x00ad
46#define I82802AC 0x00ac
47#define MANUFACTURER_ST 0x0020
48#define M50LPW080 0x002F
49
50static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
52static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
54static void cfi_intelext_sync (struct mtd_info *);
55static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
56static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
Todd Poynor8048d2f2005-03-31 00:57:33 +010057#ifdef CONFIG_MTD_OTP
Nicolas Pitref77814d2005-02-08 17:11:19 +000058static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
59static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
60static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
62static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
63 struct otp_info *, size_t);
64static int cfi_intelext_get_user_prot_info (struct mtd_info *,
65 struct otp_info *, size_t);
Todd Poynor8048d2f2005-03-31 00:57:33 +010066#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070067static int cfi_intelext_suspend (struct mtd_info *);
68static void cfi_intelext_resume (struct mtd_info *);
69
70static void cfi_intelext_destroy(struct mtd_info *);
71
72struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
73
74static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
75static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
76
77static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
78 size_t *retlen, u_char **mtdbuf);
79static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
80 size_t len);
81
82static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
83static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
84#include "fwh_lock.h"
85
86
87
88/*
89 * *********** SETUP AND PROBE BITS ***********
90 */
91
92static struct mtd_chip_driver cfi_intelext_chipdrv = {
93 .probe = NULL, /* Not usable directly */
94 .destroy = cfi_intelext_destroy,
95 .name = "cfi_cmdset_0001",
96 .module = THIS_MODULE
97};
98
99/* #define DEBUG_LOCK_BITS */
100/* #define DEBUG_CFI_FEATURES */
101
102#ifdef DEBUG_CFI_FEATURES
103static void cfi_tell_features(struct cfi_pri_intelext *extp)
104{
105 int i;
106 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
107 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
108 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
109 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
110 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
111 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
112 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
113 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
114 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
115 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
116 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
117 for (i=10; i<32; i++) {
118 if (extp->FeatureSupport & (1<<i))
119 printk(" - Unknown Bit %X: supported\n", i);
120 }
121
122 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
123 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
124 for (i=1; i<8; i++) {
125 if (extp->SuspendCmdSupport & (1<<i))
126 printk(" - Unknown Bit %X: supported\n", i);
127 }
128
129 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
130 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
131 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
132 for (i=2; i<16; i++) {
133 if (extp->BlkStatusRegMask & (1<<i))
134 printk(" - Unknown Bit %X Active: yes\n",i);
135 }
136
137 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
138 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
139 if (extp->VppOptimal)
140 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
141 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
142}
143#endif
144
145#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
146/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
147static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
148{
149 struct map_info *map = mtd->priv;
150 struct cfi_private *cfi = map->fldrv_priv;
151 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
152
153 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
154 "erase on write disabled.\n");
155 extp->SuspendCmdSupport &= ~1;
156}
157#endif
158
159#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
160static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
161{
162 struct map_info *map = mtd->priv;
163 struct cfi_private *cfi = map->fldrv_priv;
164 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
165
166 if (cfip && (cfip->FeatureSupport&4)) {
167 cfip->FeatureSupport &= ~4;
168 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
169 }
170}
171#endif
172
173static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
174{
175 struct map_info *map = mtd->priv;
176 struct cfi_private *cfi = map->fldrv_priv;
177
178 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
179 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
180}
181
182static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
183{
184 struct map_info *map = mtd->priv;
185 struct cfi_private *cfi = map->fldrv_priv;
186
187 /* Note this is done after the region info is endian swapped */
188 cfi->cfiq->EraseRegionInfo[1] =
189 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
190};
191
192static void fixup_use_point(struct mtd_info *mtd, void *param)
193{
194 struct map_info *map = mtd->priv;
195 if (!mtd->point && map_is_linear(map)) {
196 mtd->point = cfi_intelext_point;
197 mtd->unpoint = cfi_intelext_unpoint;
198 }
199}
200
201static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
202{
203 struct map_info *map = mtd->priv;
204 struct cfi_private *cfi = map->fldrv_priv;
205 if (cfi->cfiq->BufWriteTimeoutTyp) {
206 printk(KERN_INFO "Using buffer write method\n" );
207 mtd->write = cfi_intelext_write_buffers;
208 }
209}
210
211static struct cfi_fixup cfi_fixup_table[] = {
212#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
213 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
214#endif
215#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
216 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
217#endif
218#if !FORCE_WORD_WRITE
219 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
220#endif
221 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
222 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
223 { 0, 0, NULL, NULL }
224};
225
226static struct cfi_fixup jedec_fixup_table[] = {
227 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
228 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
229 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
230 { 0, 0, NULL, NULL }
231};
232static struct cfi_fixup fixup_table[] = {
233 /* The CFI vendor ids and the JEDEC vendor IDs appear
234 * to be common. It is like the devices id's are as
235 * well. This table is to pick all cases where
236 * we know that is the case.
237 */
238 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
239 { 0, 0, NULL, NULL }
240};
241
242static inline struct cfi_pri_intelext *
243read_pri_intelext(struct map_info *map, __u16 adr)
244{
245 struct cfi_pri_intelext *extp;
246 unsigned int extp_size = sizeof(*extp);
247
248 again:
249 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
250 if (!extp)
251 return NULL;
252
253 /* Do some byteswapping if necessary */
254 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
255 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
256 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
257
258 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
259 unsigned int extra_size = 0;
260 int nb_parts, i;
261
262 /* Protection Register info */
Nicolas Pitre72b56a22005-02-05 02:06:19 +0000263 extra_size += (extp->NumProtectionFields - 1) *
264 sizeof(struct cfi_intelext_otpinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
266 /* Burst Read info */
267 extra_size += 6;
268
269 /* Number of hardware-partitions */
270 extra_size += 1;
271 if (extp_size < sizeof(*extp) + extra_size)
272 goto need_more;
273 nb_parts = extp->extra[extra_size - 1];
274
275 for (i = 0; i < nb_parts; i++) {
276 struct cfi_intelext_regioninfo *rinfo;
277 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
278 extra_size += sizeof(*rinfo);
279 if (extp_size < sizeof(*extp) + extra_size)
280 goto need_more;
281 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
282 extra_size += (rinfo->NumBlockTypes - 1)
283 * sizeof(struct cfi_intelext_blockinfo);
284 }
285
286 if (extp_size < sizeof(*extp) + extra_size) {
287 need_more:
288 extp_size = sizeof(*extp) + extra_size;
289 kfree(extp);
290 if (extp_size > 4096) {
291 printk(KERN_ERR
292 "%s: cfi_pri_intelext is too fat\n",
293 __FUNCTION__);
294 return NULL;
295 }
296 goto again;
297 }
298 }
299
300 return extp;
301}
302
303/* This routine is made available to other mtd code via
304 * inter_module_register. It must only be accessed through
305 * inter_module_get which will bump the use count of this module. The
306 * addresses passed back in cfi are valid as long as the use count of
307 * this module is non-zero, i.e. between inter_module_get and
308 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
309 */
310struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
311{
312 struct cfi_private *cfi = map->fldrv_priv;
313 struct mtd_info *mtd;
314 int i;
315
316 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
317 if (!mtd) {
318 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
319 return NULL;
320 }
321 memset(mtd, 0, sizeof(*mtd));
322 mtd->priv = map;
323 mtd->type = MTD_NORFLASH;
324
325 /* Fill in the default mtd operations */
326 mtd->erase = cfi_intelext_erase_varsize;
327 mtd->read = cfi_intelext_read;
328 mtd->write = cfi_intelext_write_words;
329 mtd->sync = cfi_intelext_sync;
330 mtd->lock = cfi_intelext_lock;
331 mtd->unlock = cfi_intelext_unlock;
332 mtd->suspend = cfi_intelext_suspend;
333 mtd->resume = cfi_intelext_resume;
334 mtd->flags = MTD_CAP_NORFLASH;
335 mtd->name = map->name;
336
337 if (cfi->cfi_mode == CFI_MODE_CFI) {
338 /*
339 * It's a real CFI chip, not one for which the probe
340 * routine faked a CFI structure. So we read the feature
341 * table from it.
342 */
343 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
344 struct cfi_pri_intelext *extp;
345
346 extp = read_pri_intelext(map, adr);
347 if (!extp) {
348 kfree(mtd);
349 return NULL;
350 }
351
352 /* Install our own private info structure */
353 cfi->cmdset_priv = extp;
354
355 cfi_fixup(mtd, cfi_fixup_table);
356
357#ifdef DEBUG_CFI_FEATURES
358 /* Tell the user about it in lots of lovely detail */
359 cfi_tell_features(extp);
360#endif
361
362 if(extp->SuspendCmdSupport & 1) {
363 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
364 }
365 }
366 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
367 /* Apply jedec specific fixups */
368 cfi_fixup(mtd, jedec_fixup_table);
369 }
370 /* Apply generic fixups */
371 cfi_fixup(mtd, fixup_table);
372
373 for (i=0; i< cfi->numchips; i++) {
374 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
375 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
376 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
377 cfi->chips[i].ref_point_counter = 0;
378 }
379
380 map->fldrv = &cfi_intelext_chipdrv;
381
382 return cfi_intelext_setup(mtd);
383}
384
385static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
386{
387 struct map_info *map = mtd->priv;
388 struct cfi_private *cfi = map->fldrv_priv;
389 unsigned long offset = 0;
390 int i,j;
391 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
392
393 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
394
395 mtd->size = devsize * cfi->numchips;
396
397 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
398 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
399 * mtd->numeraseregions, GFP_KERNEL);
400 if (!mtd->eraseregions) {
401 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
402 goto setup_err;
403 }
404
405 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
406 unsigned long ernum, ersize;
407 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
408 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
409
410 if (mtd->erasesize < ersize) {
411 mtd->erasesize = ersize;
412 }
413 for (j=0; j<cfi->numchips; j++) {
414 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
415 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
416 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
417 }
418 offset += (ersize * ernum);
419 }
420
421 if (offset != devsize) {
422 /* Argh */
423 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
424 goto setup_err;
425 }
426
427 for (i=0; i<mtd->numeraseregions;i++){
428 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
429 i,mtd->eraseregions[i].offset,
430 mtd->eraseregions[i].erasesize,
431 mtd->eraseregions[i].numblocks);
432 }
433
Nicolas Pitref77814d2005-02-08 17:11:19 +0000434#ifdef CONFIG_MTD_OTP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
Nicolas Pitref77814d2005-02-08 17:11:19 +0000436 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
437 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
438 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
439 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
440 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441#endif
442
443 /* This function has the potential to distort the reality
444 a bit and therefore should be called last. */
445 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
446 goto setup_err;
447
448 __module_get(THIS_MODULE);
449 return mtd;
450
451 setup_err:
452 if(mtd) {
453 if(mtd->eraseregions)
454 kfree(mtd->eraseregions);
455 kfree(mtd);
456 }
457 kfree(cfi->cmdset_priv);
458 return NULL;
459}
460
461static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
462 struct cfi_private **pcfi)
463{
464 struct map_info *map = mtd->priv;
465 struct cfi_private *cfi = *pcfi;
466 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
467
468 /*
469 * Probing of multi-partition flash ships.
470 *
471 * To support multiple partitions when available, we simply arrange
472 * for each of them to have their own flchip structure even if they
473 * are on the same physical chip. This means completely recreating
474 * a new cfi_private structure right here which is a blatent code
475 * layering violation, but this is still the least intrusive
476 * arrangement at this point. This can be rearranged in the future
477 * if someone feels motivated enough. --nico
478 */
479 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
480 && extp->FeatureSupport & (1 << 9)) {
481 struct cfi_private *newcfi;
482 struct flchip *chip;
483 struct flchip_shared *shared;
484 int offs, numregions, numparts, partshift, numvirtchips, i, j;
485
486 /* Protection Register info */
Nicolas Pitre72b56a22005-02-05 02:06:19 +0000487 offs = (extp->NumProtectionFields - 1) *
488 sizeof(struct cfi_intelext_otpinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
490 /* Burst Read info */
491 offs += 6;
492
493 /* Number of partition regions */
494 numregions = extp->extra[offs];
495 offs += 1;
496
497 /* Number of hardware partitions */
498 numparts = 0;
499 for (i = 0; i < numregions; i++) {
500 struct cfi_intelext_regioninfo *rinfo;
501 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
502 numparts += rinfo->NumIdentPartitions;
503 offs += sizeof(*rinfo)
504 + (rinfo->NumBlockTypes - 1) *
505 sizeof(struct cfi_intelext_blockinfo);
506 }
507
508 /*
509 * All functions below currently rely on all chips having
510 * the same geometry so we'll just assume that all hardware
511 * partitions are of the same size too.
512 */
513 partshift = cfi->chipshift - __ffs(numparts);
514
515 if ((1 << partshift) < mtd->erasesize) {
516 printk( KERN_ERR
517 "%s: bad number of hw partitions (%d)\n",
518 __FUNCTION__, numparts);
519 return -EINVAL;
520 }
521
522 numvirtchips = cfi->numchips * numparts;
523 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
524 if (!newcfi)
525 return -ENOMEM;
526 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
527 if (!shared) {
528 kfree(newcfi);
529 return -ENOMEM;
530 }
531 memcpy(newcfi, cfi, sizeof(struct cfi_private));
532 newcfi->numchips = numvirtchips;
533 newcfi->chipshift = partshift;
534
535 chip = &newcfi->chips[0];
536 for (i = 0; i < cfi->numchips; i++) {
537 shared[i].writing = shared[i].erasing = NULL;
538 spin_lock_init(&shared[i].lock);
539 for (j = 0; j < numparts; j++) {
540 *chip = cfi->chips[i];
541 chip->start += j << partshift;
542 chip->priv = &shared[i];
543 /* those should be reset too since
544 they create memory references. */
545 init_waitqueue_head(&chip->wq);
546 spin_lock_init(&chip->_spinlock);
547 chip->mutex = &chip->_spinlock;
548 chip++;
549 }
550 }
551
552 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
553 "--> %d partitions of %d KiB\n",
554 map->name, cfi->numchips, cfi->interleave,
555 newcfi->numchips, 1<<(newcfi->chipshift-10));
556
557 map->fldrv_priv = newcfi;
558 *pcfi = newcfi;
559 kfree(cfi);
560 }
561
562 return 0;
563}
564
565/*
566 * *********** CHIP ACCESS FUNCTIONS ***********
567 */
568
569static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
570{
571 DECLARE_WAITQUEUE(wait, current);
572 struct cfi_private *cfi = map->fldrv_priv;
573 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
574 unsigned long timeo;
575 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
576
577 resettime:
578 timeo = jiffies + HZ;
579 retry:
Nicolas Pitref77814d2005-02-08 17:11:19 +0000580 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 /*
582 * OK. We have possibility for contension on the write/erase
583 * operations which are global to the real chip and not per
584 * partition. So let's fight it over in the partition which
585 * currently has authority on the operation.
586 *
587 * The rules are as follows:
588 *
589 * - any write operation must own shared->writing.
590 *
591 * - any erase operation must own _both_ shared->writing and
592 * shared->erasing.
593 *
594 * - contension arbitration is handled in the owner's context.
595 *
596 * The 'shared' struct can be read when its lock is taken.
597 * However any writes to it can only be made when the current
598 * owner's lock is also held.
599 */
600 struct flchip_shared *shared = chip->priv;
601 struct flchip *contender;
602 spin_lock(&shared->lock);
603 contender = shared->writing;
604 if (contender && contender != chip) {
605 /*
606 * The engine to perform desired operation on this
607 * partition is already in use by someone else.
608 * Let's fight over it in the context of the chip
609 * currently using it. If it is possible to suspend,
610 * that other partition will do just that, otherwise
611 * it'll happily send us to sleep. In any case, when
612 * get_chip returns success we're clear to go ahead.
613 */
614 int ret = spin_trylock(contender->mutex);
615 spin_unlock(&shared->lock);
616 if (!ret)
617 goto retry;
618 spin_unlock(chip->mutex);
619 ret = get_chip(map, contender, contender->start, mode);
620 spin_lock(chip->mutex);
621 if (ret) {
622 spin_unlock(contender->mutex);
623 return ret;
624 }
625 timeo = jiffies + HZ;
626 spin_lock(&shared->lock);
627 }
628
629 /* We now own it */
630 shared->writing = chip;
631 if (mode == FL_ERASING)
632 shared->erasing = chip;
633 if (contender && contender != chip)
634 spin_unlock(contender->mutex);
635 spin_unlock(&shared->lock);
636 }
637
638 switch (chip->state) {
639
640 case FL_STATUS:
641 for (;;) {
642 status = map_read(map, adr);
643 if (map_word_andequal(map, status, status_OK, status_OK))
644 break;
645
646 /* At this point we're fine with write operations
647 in other partitions as they don't conflict. */
648 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
649 break;
650
651 if (time_after(jiffies, timeo)) {
652 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
653 status.x[0]);
654 return -EIO;
655 }
656 spin_unlock(chip->mutex);
657 cfi_udelay(1);
658 spin_lock(chip->mutex);
659 /* Someone else might have been playing with it. */
660 goto retry;
661 }
662
663 case FL_READY:
664 case FL_CFI_QUERY:
665 case FL_JEDEC_QUERY:
666 return 0;
667
668 case FL_ERASING:
669 if (!cfip ||
670 !(cfip->FeatureSupport & 2) ||
671 !(mode == FL_READY || mode == FL_POINT ||
672 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
673 goto sleep;
674
675
676 /* Erase suspend */
677 map_write(map, CMD(0xB0), adr);
678
679 /* If the flash has finished erasing, then 'erase suspend'
680 * appears to make some (28F320) flash devices switch to
681 * 'read' mode. Make sure that we switch to 'read status'
682 * mode so we get the right data. --rmk
683 */
684 map_write(map, CMD(0x70), adr);
685 chip->oldstate = FL_ERASING;
686 chip->state = FL_ERASE_SUSPENDING;
687 chip->erase_suspended = 1;
688 for (;;) {
689 status = map_read(map, adr);
690 if (map_word_andequal(map, status, status_OK, status_OK))
691 break;
692
693 if (time_after(jiffies, timeo)) {
694 /* Urgh. Resume and pretend we weren't here. */
695 map_write(map, CMD(0xd0), adr);
696 /* Make sure we're in 'read status' mode if it had finished */
697 map_write(map, CMD(0x70), adr);
698 chip->state = FL_ERASING;
699 chip->oldstate = FL_READY;
700 printk(KERN_ERR "Chip not ready after erase "
701 "suspended: status = 0x%lx\n", status.x[0]);
702 return -EIO;
703 }
704
705 spin_unlock(chip->mutex);
706 cfi_udelay(1);
707 spin_lock(chip->mutex);
708 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
709 So we can just loop here. */
710 }
711 chip->state = FL_STATUS;
712 return 0;
713
714 case FL_XIP_WHILE_ERASING:
715 if (mode != FL_READY && mode != FL_POINT &&
716 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
717 goto sleep;
718 chip->oldstate = chip->state;
719 chip->state = FL_READY;
720 return 0;
721
722 case FL_POINT:
723 /* Only if there's no operation suspended... */
724 if (mode == FL_READY && chip->oldstate == FL_READY)
725 return 0;
726
727 default:
728 sleep:
729 set_current_state(TASK_UNINTERRUPTIBLE);
730 add_wait_queue(&chip->wq, &wait);
731 spin_unlock(chip->mutex);
732 schedule();
733 remove_wait_queue(&chip->wq, &wait);
734 spin_lock(chip->mutex);
735 goto resettime;
736 }
737}
738
739static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
740{
741 struct cfi_private *cfi = map->fldrv_priv;
742
743 if (chip->priv) {
744 struct flchip_shared *shared = chip->priv;
745 spin_lock(&shared->lock);
746 if (shared->writing == chip && chip->oldstate == FL_READY) {
747 /* We own the ability to write, but we're done */
748 shared->writing = shared->erasing;
749 if (shared->writing && shared->writing != chip) {
750 /* give back ownership to who we loaned it from */
751 struct flchip *loaner = shared->writing;
752 spin_lock(loaner->mutex);
753 spin_unlock(&shared->lock);
754 spin_unlock(chip->mutex);
755 put_chip(map, loaner, loaner->start);
756 spin_lock(chip->mutex);
757 spin_unlock(loaner->mutex);
758 wake_up(&chip->wq);
759 return;
760 }
761 shared->erasing = NULL;
762 shared->writing = NULL;
763 } else if (shared->erasing == chip && shared->writing != chip) {
764 /*
765 * We own the ability to erase without the ability
766 * to write, which means the erase was suspended
767 * and some other partition is currently writing.
768 * Don't let the switch below mess things up since
769 * we don't have ownership to resume anything.
770 */
771 spin_unlock(&shared->lock);
772 wake_up(&chip->wq);
773 return;
774 }
775 spin_unlock(&shared->lock);
776 }
777
778 switch(chip->oldstate) {
779 case FL_ERASING:
780 chip->state = chip->oldstate;
781 /* What if one interleaved chip has finished and the
782 other hasn't? The old code would leave the finished
783 one in READY mode. That's bad, and caused -EROFS
784 errors to be returned from do_erase_oneblock because
785 that's the only bit it checked for at the time.
786 As the state machine appears to explicitly allow
787 sending the 0x70 (Read Status) command to an erasing
788 chip and expecting it to be ignored, that's what we
789 do. */
790 map_write(map, CMD(0xd0), adr);
791 map_write(map, CMD(0x70), adr);
792 chip->oldstate = FL_READY;
793 chip->state = FL_ERASING;
794 break;
795
796 case FL_XIP_WHILE_ERASING:
797 chip->state = chip->oldstate;
798 chip->oldstate = FL_READY;
799 break;
800
801 case FL_READY:
802 case FL_STATUS:
803 case FL_JEDEC_QUERY:
804 /* We should really make set_vpp() count, rather than doing this */
805 DISABLE_VPP(map);
806 break;
807 default:
808 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
809 }
810 wake_up(&chip->wq);
811}
812
813#ifdef CONFIG_MTD_XIP
814
815/*
816 * No interrupt what so ever can be serviced while the flash isn't in array
817 * mode. This is ensured by the xip_disable() and xip_enable() functions
818 * enclosing any code path where the flash is known not to be in array mode.
819 * And within a XIP disabled code path, only functions marked with __xipram
820 * may be called and nothing else (it's a good thing to inspect generated
821 * assembly to make sure inline functions were actually inlined and that gcc
822 * didn't emit calls to its own support functions). Also configuring MTD CFI
823 * support to a single buswidth and a single interleave is also recommended.
824 * Note that not only IRQs are disabled but the preemption count is also
825 * increased to prevent other locking primitives (namely spin_unlock) from
826 * decrementing the preempt count to zero and scheduling the CPU away while
827 * not in array mode.
828 */
829
830static void xip_disable(struct map_info *map, struct flchip *chip,
831 unsigned long adr)
832{
833 /* TODO: chips with no XIP use should ignore and return */
834 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
835 preempt_disable();
836 local_irq_disable();
837}
838
839static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
840 unsigned long adr)
841{
842 struct cfi_private *cfi = map->fldrv_priv;
843 if (chip->state != FL_POINT && chip->state != FL_READY) {
844 map_write(map, CMD(0xff), adr);
845 chip->state = FL_READY;
846 }
847 (void) map_read(map, adr);
848 asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
849 local_irq_enable();
850 preempt_enable();
851}
852
853/*
854 * When a delay is required for the flash operation to complete, the
855 * xip_udelay() function is polling for both the given timeout and pending
856 * (but still masked) hardware interrupts. Whenever there is an interrupt
857 * pending then the flash erase or write operation is suspended, array mode
858 * restored and interrupts unmasked. Task scheduling might also happen at that
859 * point. The CPU eventually returns from the interrupt or the call to
860 * schedule() and the suspended flash operation is resumed for the remaining
861 * of the delay period.
862 *
863 * Warning: this function _will_ fool interrupt latency tracing tools.
864 */
865
866static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
867 unsigned long adr, int usec)
868{
869 struct cfi_private *cfi = map->fldrv_priv;
870 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
871 map_word status, OK = CMD(0x80);
872 unsigned long suspended, start = xip_currtime();
873 flstate_t oldstate, newstate;
874
875 do {
876 cpu_relax();
877 if (xip_irqpending() && cfip &&
878 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
879 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
880 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
881 /*
882 * Let's suspend the erase or write operation when
883 * supported. Note that we currently don't try to
884 * suspend interleaved chips if there is already
885 * another operation suspended (imagine what happens
886 * when one chip was already done with the current
887 * operation while another chip suspended it, then
888 * we resume the whole thing at once). Yes, it
889 * can happen!
890 */
891 map_write(map, CMD(0xb0), adr);
892 map_write(map, CMD(0x70), adr);
893 usec -= xip_elapsed_since(start);
894 suspended = xip_currtime();
895 do {
896 if (xip_elapsed_since(suspended) > 100000) {
897 /*
898 * The chip doesn't want to suspend
899 * after waiting for 100 msecs.
900 * This is a critical error but there
901 * is not much we can do here.
902 */
903 return;
904 }
905 status = map_read(map, adr);
906 } while (!map_word_andequal(map, status, OK, OK));
907
908 /* Suspend succeeded */
909 oldstate = chip->state;
910 if (oldstate == FL_ERASING) {
911 if (!map_word_bitsset(map, status, CMD(0x40)))
912 break;
913 newstate = FL_XIP_WHILE_ERASING;
914 chip->erase_suspended = 1;
915 } else {
916 if (!map_word_bitsset(map, status, CMD(0x04)))
917 break;
918 newstate = FL_XIP_WHILE_WRITING;
919 chip->write_suspended = 1;
920 }
921 chip->state = newstate;
922 map_write(map, CMD(0xff), adr);
923 (void) map_read(map, adr);
924 asm volatile (".rep 8; nop; .endr");
925 local_irq_enable();
926 preempt_enable();
927 asm volatile (".rep 8; nop; .endr");
928 cond_resched();
929
930 /*
931 * We're back. However someone else might have
932 * decided to go write to the chip if we are in
933 * a suspended erase state. If so let's wait
934 * until it's done.
935 */
936 preempt_disable();
937 while (chip->state != newstate) {
938 DECLARE_WAITQUEUE(wait, current);
939 set_current_state(TASK_UNINTERRUPTIBLE);
940 add_wait_queue(&chip->wq, &wait);
941 preempt_enable();
942 schedule();
943 remove_wait_queue(&chip->wq, &wait);
944 preempt_disable();
945 }
946 /* Disallow XIP again */
947 local_irq_disable();
948
949 /* Resume the write or erase operation */
950 map_write(map, CMD(0xd0), adr);
951 map_write(map, CMD(0x70), adr);
952 chip->state = oldstate;
953 start = xip_currtime();
954 } else if (usec >= 1000000/HZ) {
955 /*
956 * Try to save on CPU power when waiting delay
957 * is at least a system timer tick period.
958 * No need to be extremely accurate here.
959 */
960 xip_cpu_idle();
961 }
962 status = map_read(map, adr);
963 } while (!map_word_andequal(map, status, OK, OK)
964 && xip_elapsed_since(start) < usec);
965}
966
967#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
968
969/*
970 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
971 * the flash is actively programming or erasing since we have to poll for
972 * the operation to complete anyway. We can't do that in a generic way with
973 * a XIP setup so do it before the actual flash operation in this case.
974 */
975#undef INVALIDATE_CACHED_RANGE
976#define INVALIDATE_CACHED_RANGE(x...)
977#define XIP_INVAL_CACHED_RANGE(map, from, size) \
978 do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
979
980/*
981 * Extra notes:
982 *
983 * Activating this XIP support changes the way the code works a bit. For
984 * example the code to suspend the current process when concurrent access
985 * happens is never executed because xip_udelay() will always return with the
986 * same chip state as it was entered with. This is why there is no care for
987 * the presence of add_wait_queue() or schedule() calls from within a couple
988 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
989 * The queueing and scheduling are always happening within xip_udelay().
990 *
991 * Similarly, get_chip() and put_chip() just happen to always be executed
992 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
993 * is in array mode, therefore never executing many cases therein and not
994 * causing any problem with XIP.
995 */
996
997#else
998
999#define xip_disable(map, chip, adr)
1000#define xip_enable(map, chip, adr)
1001
1002#define UDELAY(map, chip, adr, usec) cfi_udelay(usec)
1003
1004#define XIP_INVAL_CACHED_RANGE(x...)
1005
1006#endif
1007
1008static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1009{
1010 unsigned long cmd_addr;
1011 struct cfi_private *cfi = map->fldrv_priv;
1012 int ret = 0;
1013
1014 adr += chip->start;
1015
1016 /* Ensure cmd read/writes are aligned. */
1017 cmd_addr = adr & ~(map_bankwidth(map)-1);
1018
1019 spin_lock(chip->mutex);
1020
1021 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1022
1023 if (!ret) {
1024 if (chip->state != FL_POINT && chip->state != FL_READY)
1025 map_write(map, CMD(0xff), cmd_addr);
1026
1027 chip->state = FL_POINT;
1028 chip->ref_point_counter++;
1029 }
1030 spin_unlock(chip->mutex);
1031
1032 return ret;
1033}
1034
1035static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1036{
1037 struct map_info *map = mtd->priv;
1038 struct cfi_private *cfi = map->fldrv_priv;
1039 unsigned long ofs;
1040 int chipnum;
1041 int ret = 0;
1042
1043 if (!map->virt || (from + len > mtd->size))
1044 return -EINVAL;
1045
1046 *mtdbuf = (void *)map->virt + from;
1047 *retlen = 0;
1048
1049 /* Now lock the chip(s) to POINT state */
1050
1051 /* ofs: offset within the first chip that the first read should start */
1052 chipnum = (from >> cfi->chipshift);
1053 ofs = from - (chipnum << cfi->chipshift);
1054
1055 while (len) {
1056 unsigned long thislen;
1057
1058 if (chipnum >= cfi->numchips)
1059 break;
1060
1061 if ((len + ofs -1) >> cfi->chipshift)
1062 thislen = (1<<cfi->chipshift) - ofs;
1063 else
1064 thislen = len;
1065
1066 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1067 if (ret)
1068 break;
1069
1070 *retlen += thislen;
1071 len -= thislen;
1072
1073 ofs = 0;
1074 chipnum++;
1075 }
1076 return 0;
1077}
1078
1079static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1080{
1081 struct map_info *map = mtd->priv;
1082 struct cfi_private *cfi = map->fldrv_priv;
1083 unsigned long ofs;
1084 int chipnum;
1085
1086 /* Now unlock the chip(s) POINT state */
1087
1088 /* ofs: offset within the first chip that the first read should start */
1089 chipnum = (from >> cfi->chipshift);
1090 ofs = from - (chipnum << cfi->chipshift);
1091
1092 while (len) {
1093 unsigned long thislen;
1094 struct flchip *chip;
1095
1096 chip = &cfi->chips[chipnum];
1097 if (chipnum >= cfi->numchips)
1098 break;
1099
1100 if ((len + ofs -1) >> cfi->chipshift)
1101 thislen = (1<<cfi->chipshift) - ofs;
1102 else
1103 thislen = len;
1104
1105 spin_lock(chip->mutex);
1106 if (chip->state == FL_POINT) {
1107 chip->ref_point_counter--;
1108 if(chip->ref_point_counter == 0)
1109 chip->state = FL_READY;
1110 } else
1111 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1112
1113 put_chip(map, chip, chip->start);
1114 spin_unlock(chip->mutex);
1115
1116 len -= thislen;
1117 ofs = 0;
1118 chipnum++;
1119 }
1120}
1121
1122static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1123{
1124 unsigned long cmd_addr;
1125 struct cfi_private *cfi = map->fldrv_priv;
1126 int ret;
1127
1128 adr += chip->start;
1129
1130 /* Ensure cmd read/writes are aligned. */
1131 cmd_addr = adr & ~(map_bankwidth(map)-1);
1132
1133 spin_lock(chip->mutex);
1134 ret = get_chip(map, chip, cmd_addr, FL_READY);
1135 if (ret) {
1136 spin_unlock(chip->mutex);
1137 return ret;
1138 }
1139
1140 if (chip->state != FL_POINT && chip->state != FL_READY) {
1141 map_write(map, CMD(0xff), cmd_addr);
1142
1143 chip->state = FL_READY;
1144 }
1145
1146 map_copy_from(map, buf, adr, len);
1147
1148 put_chip(map, chip, cmd_addr);
1149
1150 spin_unlock(chip->mutex);
1151 return 0;
1152}
1153
1154static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1155{
1156 struct map_info *map = mtd->priv;
1157 struct cfi_private *cfi = map->fldrv_priv;
1158 unsigned long ofs;
1159 int chipnum;
1160 int ret = 0;
1161
1162 /* ofs: offset within the first chip that the first read should start */
1163 chipnum = (from >> cfi->chipshift);
1164 ofs = from - (chipnum << cfi->chipshift);
1165
1166 *retlen = 0;
1167
1168 while (len) {
1169 unsigned long thislen;
1170
1171 if (chipnum >= cfi->numchips)
1172 break;
1173
1174 if ((len + ofs -1) >> cfi->chipshift)
1175 thislen = (1<<cfi->chipshift) - ofs;
1176 else
1177 thislen = len;
1178
1179 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1180 if (ret)
1181 break;
1182
1183 *retlen += thislen;
1184 len -= thislen;
1185 buf += thislen;
1186
1187 ofs = 0;
1188 chipnum++;
1189 }
1190 return ret;
1191}
1192
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
Nicolas Pitref77814d2005-02-08 17:11:19 +00001194 unsigned long adr, map_word datum, int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195{
1196 struct cfi_private *cfi = map->fldrv_priv;
Nicolas Pitref77814d2005-02-08 17:11:19 +00001197 map_word status, status_OK, write_cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 unsigned long timeo;
1199 int z, ret=0;
1200
1201 adr += chip->start;
1202
1203 /* Let's determine this according to the interleave only once */
1204 status_OK = CMD(0x80);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001205 switch (mode) {
1206 case FL_WRITING: write_cmd = CMD(0x40); break;
1207 case FL_OTP_WRITE: write_cmd = CMD(0xc0); break;
1208 default: return -EINVAL;
1209 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
1211 spin_lock(chip->mutex);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001212 ret = get_chip(map, chip, adr, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 if (ret) {
1214 spin_unlock(chip->mutex);
1215 return ret;
1216 }
1217
1218 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1219 ENABLE_VPP(map);
1220 xip_disable(map, chip, adr);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001221 map_write(map, write_cmd, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 map_write(map, datum, adr);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001223 chip->state = mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224
1225 spin_unlock(chip->mutex);
1226 INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
1227 UDELAY(map, chip, adr, chip->word_write_time);
1228 spin_lock(chip->mutex);
1229
1230 timeo = jiffies + (HZ/2);
1231 z = 0;
1232 for (;;) {
Nicolas Pitref77814d2005-02-08 17:11:19 +00001233 if (chip->state != mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 /* Someone's suspended the write. Sleep */
1235 DECLARE_WAITQUEUE(wait, current);
1236
1237 set_current_state(TASK_UNINTERRUPTIBLE);
1238 add_wait_queue(&chip->wq, &wait);
1239 spin_unlock(chip->mutex);
1240 schedule();
1241 remove_wait_queue(&chip->wq, &wait);
1242 timeo = jiffies + (HZ / 2); /* FIXME */
1243 spin_lock(chip->mutex);
1244 continue;
1245 }
1246
1247 status = map_read(map, adr);
1248 if (map_word_andequal(map, status, status_OK, status_OK))
1249 break;
1250
1251 /* OK Still waiting */
1252 if (time_after(jiffies, timeo)) {
1253 chip->state = FL_STATUS;
1254 xip_enable(map, chip, adr);
1255 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1256 ret = -EIO;
1257 goto out;
1258 }
1259
1260 /* Latency issues. Drop the lock, wait a while and retry */
1261 spin_unlock(chip->mutex);
1262 z++;
1263 UDELAY(map, chip, adr, 1);
1264 spin_lock(chip->mutex);
1265 }
1266 if (!z) {
1267 chip->word_write_time--;
1268 if (!chip->word_write_time)
1269 chip->word_write_time++;
1270 }
1271 if (z > 1)
1272 chip->word_write_time++;
1273
1274 /* Done and happy. */
1275 chip->state = FL_STATUS;
1276
1277 /* check for lock bit */
1278 if (map_word_bitsset(map, status, CMD(0x02))) {
1279 /* clear status */
1280 map_write(map, CMD(0x50), adr);
1281 /* put back into read status register mode */
1282 map_write(map, CMD(0x70), adr);
1283 ret = -EROFS;
1284 }
1285
1286 xip_enable(map, chip, adr);
1287 out: put_chip(map, chip, adr);
1288 spin_unlock(chip->mutex);
1289
1290 return ret;
1291}
1292
1293
1294static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1295{
1296 struct map_info *map = mtd->priv;
1297 struct cfi_private *cfi = map->fldrv_priv;
1298 int ret = 0;
1299 int chipnum;
1300 unsigned long ofs;
1301
1302 *retlen = 0;
1303 if (!len)
1304 return 0;
1305
1306 chipnum = to >> cfi->chipshift;
1307 ofs = to - (chipnum << cfi->chipshift);
1308
1309 /* If it's not bus-aligned, do the first byte write */
1310 if (ofs & (map_bankwidth(map)-1)) {
1311 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1312 int gap = ofs - bus_ofs;
1313 int n;
1314 map_word datum;
1315
1316 n = min_t(int, len, map_bankwidth(map)-gap);
1317 datum = map_word_ff(map);
1318 datum = map_word_load_partial(map, datum, buf, gap, n);
1319
1320 ret = do_write_oneword(map, &cfi->chips[chipnum],
Nicolas Pitref77814d2005-02-08 17:11:19 +00001321 bus_ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 if (ret)
1323 return ret;
1324
1325 len -= n;
1326 ofs += n;
1327 buf += n;
1328 (*retlen) += n;
1329
1330 if (ofs >> cfi->chipshift) {
1331 chipnum ++;
1332 ofs = 0;
1333 if (chipnum == cfi->numchips)
1334 return 0;
1335 }
1336 }
1337
1338 while(len >= map_bankwidth(map)) {
1339 map_word datum = map_word_load(map, buf);
1340
1341 ret = do_write_oneword(map, &cfi->chips[chipnum],
Nicolas Pitref77814d2005-02-08 17:11:19 +00001342 ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 if (ret)
1344 return ret;
1345
1346 ofs += map_bankwidth(map);
1347 buf += map_bankwidth(map);
1348 (*retlen) += map_bankwidth(map);
1349 len -= map_bankwidth(map);
1350
1351 if (ofs >> cfi->chipshift) {
1352 chipnum ++;
1353 ofs = 0;
1354 if (chipnum == cfi->numchips)
1355 return 0;
1356 }
1357 }
1358
1359 if (len & (map_bankwidth(map)-1)) {
1360 map_word datum;
1361
1362 datum = map_word_ff(map);
1363 datum = map_word_load_partial(map, datum, buf, 0, len);
1364
1365 ret = do_write_oneword(map, &cfi->chips[chipnum],
Nicolas Pitref77814d2005-02-08 17:11:19 +00001366 ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 if (ret)
1368 return ret;
1369
1370 (*retlen) += len;
1371 }
1372
1373 return 0;
1374}
1375
1376
1377static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1378 unsigned long adr, const u_char *buf, int len)
1379{
1380 struct cfi_private *cfi = map->fldrv_priv;
1381 map_word status, status_OK;
1382 unsigned long cmd_adr, timeo;
1383 int wbufsize, z, ret=0, bytes, words;
1384
1385 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1386 adr += chip->start;
1387 cmd_adr = adr & ~(wbufsize-1);
1388
1389 /* Let's determine this according to the interleave only once */
1390 status_OK = CMD(0x80);
1391
1392 spin_lock(chip->mutex);
1393 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1394 if (ret) {
1395 spin_unlock(chip->mutex);
1396 return ret;
1397 }
1398
1399 XIP_INVAL_CACHED_RANGE(map, adr, len);
1400 ENABLE_VPP(map);
1401 xip_disable(map, chip, cmd_adr);
1402
1403 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1404 [...], the device will not accept any more Write to Buffer commands".
1405 So we must check here and reset those bits if they're set. Otherwise
1406 we're just pissing in the wind */
1407 if (chip->state != FL_STATUS)
1408 map_write(map, CMD(0x70), cmd_adr);
1409 status = map_read(map, cmd_adr);
1410 if (map_word_bitsset(map, status, CMD(0x30))) {
1411 xip_enable(map, chip, cmd_adr);
1412 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1413 xip_disable(map, chip, cmd_adr);
1414 map_write(map, CMD(0x50), cmd_adr);
1415 map_write(map, CMD(0x70), cmd_adr);
1416 }
1417
1418 chip->state = FL_WRITING_TO_BUFFER;
1419
1420 z = 0;
1421 for (;;) {
1422 map_write(map, CMD(0xe8), cmd_adr);
1423
1424 status = map_read(map, cmd_adr);
1425 if (map_word_andequal(map, status, status_OK, status_OK))
1426 break;
1427
1428 spin_unlock(chip->mutex);
1429 UDELAY(map, chip, cmd_adr, 1);
1430 spin_lock(chip->mutex);
1431
1432 if (++z > 20) {
1433 /* Argh. Not ready for write to buffer */
1434 map_word Xstatus;
1435 map_write(map, CMD(0x70), cmd_adr);
1436 chip->state = FL_STATUS;
1437 Xstatus = map_read(map, cmd_adr);
1438 /* Odd. Clear status bits */
1439 map_write(map, CMD(0x50), cmd_adr);
1440 map_write(map, CMD(0x70), cmd_adr);
1441 xip_enable(map, chip, cmd_adr);
1442 printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1443 status.x[0], Xstatus.x[0]);
1444 ret = -EIO;
1445 goto out;
1446 }
1447 }
1448
1449 /* Write length of data to come */
1450 bytes = len & (map_bankwidth(map)-1);
1451 words = len / map_bankwidth(map);
1452 map_write(map, CMD(words - !bytes), cmd_adr );
1453
1454 /* Write data */
1455 z = 0;
1456 while(z < words * map_bankwidth(map)) {
1457 map_word datum = map_word_load(map, buf);
1458 map_write(map, datum, adr+z);
1459
1460 z += map_bankwidth(map);
1461 buf += map_bankwidth(map);
1462 }
1463
1464 if (bytes) {
1465 map_word datum;
1466
1467 datum = map_word_ff(map);
1468 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1469 map_write(map, datum, adr+z);
1470 }
1471
1472 /* GO GO GO */
1473 map_write(map, CMD(0xd0), cmd_adr);
1474 chip->state = FL_WRITING;
1475
1476 spin_unlock(chip->mutex);
1477 INVALIDATE_CACHED_RANGE(map, adr, len);
1478 UDELAY(map, chip, cmd_adr, chip->buffer_write_time);
1479 spin_lock(chip->mutex);
1480
1481 timeo = jiffies + (HZ/2);
1482 z = 0;
1483 for (;;) {
1484 if (chip->state != FL_WRITING) {
1485 /* Someone's suspended the write. Sleep */
1486 DECLARE_WAITQUEUE(wait, current);
1487 set_current_state(TASK_UNINTERRUPTIBLE);
1488 add_wait_queue(&chip->wq, &wait);
1489 spin_unlock(chip->mutex);
1490 schedule();
1491 remove_wait_queue(&chip->wq, &wait);
1492 timeo = jiffies + (HZ / 2); /* FIXME */
1493 spin_lock(chip->mutex);
1494 continue;
1495 }
1496
1497 status = map_read(map, cmd_adr);
1498 if (map_word_andequal(map, status, status_OK, status_OK))
1499 break;
1500
1501 /* OK Still waiting */
1502 if (time_after(jiffies, timeo)) {
1503 chip->state = FL_STATUS;
1504 xip_enable(map, chip, cmd_adr);
1505 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1506 ret = -EIO;
1507 goto out;
1508 }
1509
1510 /* Latency issues. Drop the lock, wait a while and retry */
1511 spin_unlock(chip->mutex);
1512 UDELAY(map, chip, cmd_adr, 1);
1513 z++;
1514 spin_lock(chip->mutex);
1515 }
1516 if (!z) {
1517 chip->buffer_write_time--;
1518 if (!chip->buffer_write_time)
1519 chip->buffer_write_time++;
1520 }
1521 if (z > 1)
1522 chip->buffer_write_time++;
1523
1524 /* Done and happy. */
1525 chip->state = FL_STATUS;
1526
1527 /* check for lock bit */
1528 if (map_word_bitsset(map, status, CMD(0x02))) {
1529 /* clear status */
1530 map_write(map, CMD(0x50), cmd_adr);
1531 /* put back into read status register mode */
1532 map_write(map, CMD(0x70), adr);
1533 ret = -EROFS;
1534 }
1535
1536 xip_enable(map, chip, cmd_adr);
1537 out: put_chip(map, chip, cmd_adr);
1538 spin_unlock(chip->mutex);
1539 return ret;
1540}
1541
1542static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1543 size_t len, size_t *retlen, const u_char *buf)
1544{
1545 struct map_info *map = mtd->priv;
1546 struct cfi_private *cfi = map->fldrv_priv;
1547 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1548 int ret = 0;
1549 int chipnum;
1550 unsigned long ofs;
1551
1552 *retlen = 0;
1553 if (!len)
1554 return 0;
1555
1556 chipnum = to >> cfi->chipshift;
1557 ofs = to - (chipnum << cfi->chipshift);
1558
1559 /* If it's not bus-aligned, do the first word write */
1560 if (ofs & (map_bankwidth(map)-1)) {
1561 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1562 if (local_len > len)
1563 local_len = len;
1564 ret = cfi_intelext_write_words(mtd, to, local_len,
1565 retlen, buf);
1566 if (ret)
1567 return ret;
1568 ofs += local_len;
1569 buf += local_len;
1570 len -= local_len;
1571
1572 if (ofs >> cfi->chipshift) {
1573 chipnum ++;
1574 ofs = 0;
1575 if (chipnum == cfi->numchips)
1576 return 0;
1577 }
1578 }
1579
1580 while(len) {
1581 /* We must not cross write block boundaries */
1582 int size = wbufsize - (ofs & (wbufsize-1));
1583
1584 if (size > len)
1585 size = len;
1586 ret = do_write_buffer(map, &cfi->chips[chipnum],
1587 ofs, buf, size);
1588 if (ret)
1589 return ret;
1590
1591 ofs += size;
1592 buf += size;
1593 (*retlen) += size;
1594 len -= size;
1595
1596 if (ofs >> cfi->chipshift) {
1597 chipnum ++;
1598 ofs = 0;
1599 if (chipnum == cfi->numchips)
1600 return 0;
1601 }
1602 }
1603 return 0;
1604}
1605
1606static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1607 unsigned long adr, int len, void *thunk)
1608{
1609 struct cfi_private *cfi = map->fldrv_priv;
1610 map_word status, status_OK;
1611 unsigned long timeo;
1612 int retries = 3;
1613 DECLARE_WAITQUEUE(wait, current);
1614 int ret = 0;
1615
1616 adr += chip->start;
1617
1618 /* Let's determine this according to the interleave only once */
1619 status_OK = CMD(0x80);
1620
1621 retry:
1622 spin_lock(chip->mutex);
1623 ret = get_chip(map, chip, adr, FL_ERASING);
1624 if (ret) {
1625 spin_unlock(chip->mutex);
1626 return ret;
1627 }
1628
1629 XIP_INVAL_CACHED_RANGE(map, adr, len);
1630 ENABLE_VPP(map);
1631 xip_disable(map, chip, adr);
1632
1633 /* Clear the status register first */
1634 map_write(map, CMD(0x50), adr);
1635
1636 /* Now erase */
1637 map_write(map, CMD(0x20), adr);
1638 map_write(map, CMD(0xD0), adr);
1639 chip->state = FL_ERASING;
1640 chip->erase_suspended = 0;
1641
1642 spin_unlock(chip->mutex);
1643 INVALIDATE_CACHED_RANGE(map, adr, len);
1644 UDELAY(map, chip, adr, chip->erase_time*1000/2);
1645 spin_lock(chip->mutex);
1646
1647 /* FIXME. Use a timer to check this, and return immediately. */
1648 /* Once the state machine's known to be working I'll do that */
1649
1650 timeo = jiffies + (HZ*20);
1651 for (;;) {
1652 if (chip->state != FL_ERASING) {
1653 /* Someone's suspended the erase. Sleep */
1654 set_current_state(TASK_UNINTERRUPTIBLE);
1655 add_wait_queue(&chip->wq, &wait);
1656 spin_unlock(chip->mutex);
1657 schedule();
1658 remove_wait_queue(&chip->wq, &wait);
1659 spin_lock(chip->mutex);
1660 continue;
1661 }
1662 if (chip->erase_suspended) {
1663 /* This erase was suspended and resumed.
1664 Adjust the timeout */
1665 timeo = jiffies + (HZ*20); /* FIXME */
1666 chip->erase_suspended = 0;
1667 }
1668
1669 status = map_read(map, adr);
1670 if (map_word_andequal(map, status, status_OK, status_OK))
1671 break;
1672
1673 /* OK Still waiting */
1674 if (time_after(jiffies, timeo)) {
1675 map_word Xstatus;
1676 map_write(map, CMD(0x70), adr);
1677 chip->state = FL_STATUS;
1678 Xstatus = map_read(map, adr);
1679 /* Clear status bits */
1680 map_write(map, CMD(0x50), adr);
1681 map_write(map, CMD(0x70), adr);
1682 xip_enable(map, chip, adr);
1683 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1684 adr, status.x[0], Xstatus.x[0]);
1685 ret = -EIO;
1686 goto out;
1687 }
1688
1689 /* Latency issues. Drop the lock, wait a while and retry */
1690 spin_unlock(chip->mutex);
1691 UDELAY(map, chip, adr, 1000000/HZ);
1692 spin_lock(chip->mutex);
1693 }
1694
1695 /* We've broken this before. It doesn't hurt to be safe */
1696 map_write(map, CMD(0x70), adr);
1697 chip->state = FL_STATUS;
1698 status = map_read(map, adr);
1699
1700 /* check for lock bit */
1701 if (map_word_bitsset(map, status, CMD(0x3a))) {
Thomas Gleixner3a700252005-03-15 19:07:21 +00001702 unsigned long chipstatus;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
1704 /* Reset the error bits */
1705 map_write(map, CMD(0x50), adr);
1706 map_write(map, CMD(0x70), adr);
1707 xip_enable(map, chip, adr);
1708
Thomas Gleixner3a700252005-03-15 19:07:21 +00001709 chipstatus = MERGESTATUS(status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710
1711 if ((chipstatus & 0x30) == 0x30) {
Thomas Gleixner50da7f62005-03-19 22:39:52 +00001712 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%lx\n", chipstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 ret = -EIO;
1714 } else if (chipstatus & 0x02) {
1715 /* Protection bit set */
1716 ret = -EROFS;
1717 } else if (chipstatus & 0x8) {
1718 /* Voltage */
Thomas Gleixner50da7f62005-03-19 22:39:52 +00001719 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%lx\n", chipstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 ret = -EIO;
1721 } else if (chipstatus & 0x20) {
1722 if (retries--) {
Thomas Gleixner50da7f62005-03-19 22:39:52 +00001723 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 timeo = jiffies + HZ;
1725 put_chip(map, chip, adr);
1726 spin_unlock(chip->mutex);
1727 goto retry;
1728 }
Thomas Gleixner50da7f62005-03-19 22:39:52 +00001729 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx\n", adr, chipstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 ret = -EIO;
1731 }
1732 } else {
1733 xip_enable(map, chip, adr);
1734 ret = 0;
1735 }
1736
1737 out: put_chip(map, chip, adr);
1738 spin_unlock(chip->mutex);
1739 return ret;
1740}
1741
1742int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1743{
1744 unsigned long ofs, len;
1745 int ret;
1746
1747 ofs = instr->addr;
1748 len = instr->len;
1749
1750 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1751 if (ret)
1752 return ret;
1753
1754 instr->state = MTD_ERASE_DONE;
1755 mtd_erase_callback(instr);
1756
1757 return 0;
1758}
1759
1760static void cfi_intelext_sync (struct mtd_info *mtd)
1761{
1762 struct map_info *map = mtd->priv;
1763 struct cfi_private *cfi = map->fldrv_priv;
1764 int i;
1765 struct flchip *chip;
1766 int ret = 0;
1767
1768 for (i=0; !ret && i<cfi->numchips; i++) {
1769 chip = &cfi->chips[i];
1770
1771 spin_lock(chip->mutex);
1772 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1773
1774 if (!ret) {
1775 chip->oldstate = chip->state;
1776 chip->state = FL_SYNCING;
1777 /* No need to wake_up() on this state change -
1778 * as the whole point is that nobody can do anything
1779 * with the chip now anyway.
1780 */
1781 }
1782 spin_unlock(chip->mutex);
1783 }
1784
1785 /* Unlock the chips again */
1786
1787 for (i--; i >=0; i--) {
1788 chip = &cfi->chips[i];
1789
1790 spin_lock(chip->mutex);
1791
1792 if (chip->state == FL_SYNCING) {
1793 chip->state = chip->oldstate;
Nicolas Pitre09c79332005-03-16 22:41:09 +00001794 chip->oldstate = FL_READY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 wake_up(&chip->wq);
1796 }
1797 spin_unlock(chip->mutex);
1798 }
1799}
1800
1801#ifdef DEBUG_LOCK_BITS
1802static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1803 struct flchip *chip,
1804 unsigned long adr,
1805 int len, void *thunk)
1806{
1807 struct cfi_private *cfi = map->fldrv_priv;
1808 int status, ofs_factor = cfi->interleave * cfi->device_type;
1809
1810 xip_disable(map, chip, adr+(2*ofs_factor));
1811 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1812 chip->state = FL_JEDEC_QUERY;
1813 status = cfi_read_query(map, adr+(2*ofs_factor));
1814 xip_enable(map, chip, 0);
1815 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1816 adr, status);
1817 return 0;
1818}
1819#endif
1820
1821#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1822#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1823
1824static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1825 unsigned long adr, int len, void *thunk)
1826{
1827 struct cfi_private *cfi = map->fldrv_priv;
Todd Poynor9a6e73e2005-03-29 23:06:40 +01001828 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 map_word status, status_OK;
1830 unsigned long timeo = jiffies + HZ;
1831 int ret;
1832
1833 adr += chip->start;
1834
1835 /* Let's determine this according to the interleave only once */
1836 status_OK = CMD(0x80);
1837
1838 spin_lock(chip->mutex);
1839 ret = get_chip(map, chip, adr, FL_LOCKING);
1840 if (ret) {
1841 spin_unlock(chip->mutex);
1842 return ret;
1843 }
1844
1845 ENABLE_VPP(map);
1846 xip_disable(map, chip, adr);
1847
1848 map_write(map, CMD(0x60), adr);
1849 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1850 map_write(map, CMD(0x01), adr);
1851 chip->state = FL_LOCKING;
1852 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1853 map_write(map, CMD(0xD0), adr);
1854 chip->state = FL_UNLOCKING;
1855 } else
1856 BUG();
1857
Todd Poynor9a6e73e2005-03-29 23:06:40 +01001858 /*
1859 * If Instant Individual Block Locking supported then no need
1860 * to delay.
1861 */
1862
1863 if (!extp || !(extp->FeatureSupport & (1 << 5))) {
1864 spin_unlock(chip->mutex);
1865 UDELAY(map, chip, adr, 1000000/HZ);
1866 spin_lock(chip->mutex);
1867 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868
1869 /* FIXME. Use a timer to check this, and return immediately. */
1870 /* Once the state machine's known to be working I'll do that */
1871
1872 timeo = jiffies + (HZ*20);
1873 for (;;) {
1874
1875 status = map_read(map, adr);
1876 if (map_word_andequal(map, status, status_OK, status_OK))
1877 break;
1878
1879 /* OK Still waiting */
1880 if (time_after(jiffies, timeo)) {
1881 map_word Xstatus;
1882 map_write(map, CMD(0x70), adr);
1883 chip->state = FL_STATUS;
1884 Xstatus = map_read(map, adr);
1885 xip_enable(map, chip, adr);
1886 printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1887 status.x[0], Xstatus.x[0]);
1888 put_chip(map, chip, adr);
1889 spin_unlock(chip->mutex);
1890 return -EIO;
1891 }
1892
1893 /* Latency issues. Drop the lock, wait a while and retry */
1894 spin_unlock(chip->mutex);
1895 UDELAY(map, chip, adr, 1);
1896 spin_lock(chip->mutex);
1897 }
1898
1899 /* Done and happy. */
1900 chip->state = FL_STATUS;
1901 xip_enable(map, chip, adr);
1902 put_chip(map, chip, adr);
1903 spin_unlock(chip->mutex);
1904 return 0;
1905}
1906
1907static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1908{
1909 int ret;
1910
1911#ifdef DEBUG_LOCK_BITS
1912 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1913 __FUNCTION__, ofs, len);
1914 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1915 ofs, len, 0);
1916#endif
1917
1918 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1919 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1920
1921#ifdef DEBUG_LOCK_BITS
1922 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1923 __FUNCTION__, ret);
1924 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1925 ofs, len, 0);
1926#endif
1927
1928 return ret;
1929}
1930
1931static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1932{
1933 int ret;
1934
1935#ifdef DEBUG_LOCK_BITS
1936 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1937 __FUNCTION__, ofs, len);
1938 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1939 ofs, len, 0);
1940#endif
1941
1942 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1943 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1944
1945#ifdef DEBUG_LOCK_BITS
1946 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1947 __FUNCTION__, ret);
1948 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1949 ofs, len, 0);
1950#endif
1951
1952 return ret;
1953}
1954
Nicolas Pitref77814d2005-02-08 17:11:19 +00001955#ifdef CONFIG_MTD_OTP
1956
1957typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1958 u_long data_offset, u_char *buf, u_int size,
1959 u_long prot_offset, u_int groupno, u_int groupsize);
1960
1961static int __xipram
1962do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1963 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1964{
1965 struct cfi_private *cfi = map->fldrv_priv;
1966 int ret;
1967
1968 spin_lock(chip->mutex);
1969 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1970 if (ret) {
1971 spin_unlock(chip->mutex);
1972 return ret;
1973 }
1974
1975 /* let's ensure we're not reading back cached data from array mode */
1976 if (map->inval_cache)
1977 map->inval_cache(map, chip->start + offset, size);
1978
1979 xip_disable(map, chip, chip->start);
1980 if (chip->state != FL_JEDEC_QUERY) {
1981 map_write(map, CMD(0x90), chip->start);
1982 chip->state = FL_JEDEC_QUERY;
1983 }
1984 map_copy_from(map, buf, chip->start + offset, size);
1985 xip_enable(map, chip, chip->start);
1986
1987 /* then ensure we don't keep OTP data in the cache */
1988 if (map->inval_cache)
1989 map->inval_cache(map, chip->start + offset, size);
1990
1991 put_chip(map, chip, chip->start);
1992 spin_unlock(chip->mutex);
1993 return 0;
1994}
1995
1996static int
1997do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
1998 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1999{
2000 int ret;
2001
2002 while (size) {
2003 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2004 int gap = offset - bus_ofs;
2005 int n = min_t(int, size, map_bankwidth(map)-gap);
2006 map_word datum = map_word_ff(map);
2007
2008 datum = map_word_load_partial(map, datum, buf, gap, n);
2009 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2010 if (ret)
2011 return ret;
2012
2013 offset += n;
2014 buf += n;
2015 size -= n;
2016 }
2017
2018 return 0;
2019}
2020
2021static int
2022do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2023 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2024{
2025 struct cfi_private *cfi = map->fldrv_priv;
2026 map_word datum;
2027
2028 /* make sure area matches group boundaries */
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002029 if (size != grpsz)
Nicolas Pitref77814d2005-02-08 17:11:19 +00002030 return -EXDEV;
2031
2032 datum = map_word_ff(map);
2033 datum = map_word_clr(map, datum, CMD(1 << grpno));
2034 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2035}
2036
2037static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2038 size_t *retlen, u_char *buf,
2039 otp_op_t action, int user_regs)
2040{
2041 struct map_info *map = mtd->priv;
2042 struct cfi_private *cfi = map->fldrv_priv;
2043 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2044 struct flchip *chip;
2045 struct cfi_intelext_otpinfo *otp;
2046 u_long devsize, reg_prot_offset, data_offset;
2047 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2048 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2049 int ret;
2050
2051 *retlen = 0;
2052
2053 /* Check that we actually have some OTP registers */
2054 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2055 return -ENODATA;
2056
2057 /* we need real chips here not virtual ones */
2058 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2059 chip_step = devsize >> cfi->chipshift;
2060
2061 for (chip_num = 0; chip_num < cfi->numchips; chip_num += chip_step) {
2062 chip = &cfi->chips[chip_num];
2063 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2064
2065 /* first OTP region */
2066 field = 0;
2067 reg_prot_offset = extp->ProtRegAddr;
2068 reg_fact_groups = 1;
2069 reg_fact_size = 1 << extp->FactProtRegSize;
2070 reg_user_groups = 1;
2071 reg_user_size = 1 << extp->UserProtRegSize;
2072
2073 while (len > 0) {
2074 /* flash geometry fixup */
2075 data_offset = reg_prot_offset + 1;
2076 data_offset *= cfi->interleave * cfi->device_type;
2077 reg_prot_offset *= cfi->interleave * cfi->device_type;
2078 reg_fact_size *= cfi->interleave;
2079 reg_user_size *= cfi->interleave;
2080
2081 if (user_regs) {
2082 groups = reg_user_groups;
2083 groupsize = reg_user_size;
2084 /* skip over factory reg area */
2085 groupno = reg_fact_groups;
2086 data_offset += reg_fact_groups * reg_fact_size;
2087 } else {
2088 groups = reg_fact_groups;
2089 groupsize = reg_fact_size;
2090 groupno = 0;
2091 }
2092
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002093 while (len > 0 && groups > 0) {
Nicolas Pitref77814d2005-02-08 17:11:19 +00002094 if (!action) {
2095 /*
2096 * Special case: if action is NULL
2097 * we fill buf with otp_info records.
2098 */
2099 struct otp_info *otpinfo;
2100 map_word lockword;
2101 len -= sizeof(struct otp_info);
2102 if (len <= 0)
2103 return -ENOSPC;
2104 ret = do_otp_read(map, chip,
2105 reg_prot_offset,
2106 (u_char *)&lockword,
2107 map_bankwidth(map),
2108 0, 0, 0);
2109 if (ret)
2110 return ret;
2111 otpinfo = (struct otp_info *)buf;
2112 otpinfo->start = from;
2113 otpinfo->length = groupsize;
2114 otpinfo->locked =
2115 !map_word_bitsset(map, lockword,
2116 CMD(1 << groupno));
2117 from += groupsize;
2118 buf += sizeof(*otpinfo);
2119 *retlen += sizeof(*otpinfo);
2120 } else if (from >= groupsize) {
2121 from -= groupsize;
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002122 data_offset += groupsize;
Nicolas Pitref77814d2005-02-08 17:11:19 +00002123 } else {
2124 int size = groupsize;
2125 data_offset += from;
2126 size -= from;
2127 from = 0;
2128 if (size > len)
2129 size = len;
2130 ret = action(map, chip, data_offset,
2131 buf, size, reg_prot_offset,
2132 groupno, groupsize);
2133 if (ret < 0)
2134 return ret;
2135 buf += size;
2136 len -= size;
2137 *retlen += size;
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002138 data_offset += size;
Nicolas Pitref77814d2005-02-08 17:11:19 +00002139 }
2140 groupno++;
2141 groups--;
2142 }
2143
2144 /* next OTP region */
2145 if (++field == extp->NumProtectionFields)
2146 break;
2147 reg_prot_offset = otp->ProtRegAddr;
2148 reg_fact_groups = otp->FactGroups;
2149 reg_fact_size = 1 << otp->FactProtRegSize;
2150 reg_user_groups = otp->UserGroups;
2151 reg_user_size = 1 << otp->UserProtRegSize;
2152 otp++;
2153 }
2154 }
2155
2156 return 0;
2157}
2158
2159static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2160 size_t len, size_t *retlen,
2161 u_char *buf)
2162{
2163 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2164 buf, do_otp_read, 0);
2165}
2166
2167static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2168 size_t len, size_t *retlen,
2169 u_char *buf)
2170{
2171 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2172 buf, do_otp_read, 1);
2173}
2174
2175static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2176 size_t len, size_t *retlen,
2177 u_char *buf)
2178{
2179 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2180 buf, do_otp_write, 1);
2181}
2182
2183static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2184 loff_t from, size_t len)
2185{
2186 size_t retlen;
2187 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2188 NULL, do_otp_lock, 1);
2189}
2190
2191static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2192 struct otp_info *buf, size_t len)
2193{
2194 size_t retlen;
2195 int ret;
2196
2197 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2198 return ret ? : retlen;
2199}
2200
2201static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2202 struct otp_info *buf, size_t len)
2203{
2204 size_t retlen;
2205 int ret;
2206
2207 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2208 return ret ? : retlen;
2209}
2210
2211#endif
2212
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213static int cfi_intelext_suspend(struct mtd_info *mtd)
2214{
2215 struct map_info *map = mtd->priv;
2216 struct cfi_private *cfi = map->fldrv_priv;
2217 int i;
2218 struct flchip *chip;
2219 int ret = 0;
2220
2221 for (i=0; !ret && i<cfi->numchips; i++) {
2222 chip = &cfi->chips[i];
2223
2224 spin_lock(chip->mutex);
2225
2226 switch (chip->state) {
2227 case FL_READY:
2228 case FL_STATUS:
2229 case FL_CFI_QUERY:
2230 case FL_JEDEC_QUERY:
2231 if (chip->oldstate == FL_READY) {
2232 chip->oldstate = chip->state;
2233 chip->state = FL_PM_SUSPENDED;
2234 /* No need to wake_up() on this state change -
2235 * as the whole point is that nobody can do anything
2236 * with the chip now anyway.
2237 */
2238 } else {
2239 /* There seems to be an operation pending. We must wait for it. */
2240 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2241 ret = -EAGAIN;
2242 }
2243 break;
2244 default:
2245 /* Should we actually wait? Once upon a time these routines weren't
2246 allowed to. Or should we return -EAGAIN, because the upper layers
2247 ought to have already shut down anything which was using the device
2248 anyway? The latter for now. */
2249 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2250 ret = -EAGAIN;
2251 case FL_PM_SUSPENDED:
2252 break;
2253 }
2254 spin_unlock(chip->mutex);
2255 }
2256
2257 /* Unlock the chips again */
2258
2259 if (ret) {
2260 for (i--; i >=0; i--) {
2261 chip = &cfi->chips[i];
2262
2263 spin_lock(chip->mutex);
2264
2265 if (chip->state == FL_PM_SUSPENDED) {
2266 /* No need to force it into a known state here,
2267 because we're returning failure, and it didn't
2268 get power cycled */
2269 chip->state = chip->oldstate;
2270 chip->oldstate = FL_READY;
2271 wake_up(&chip->wq);
2272 }
2273 spin_unlock(chip->mutex);
2274 }
2275 }
2276
2277 return ret;
2278}
2279
2280static void cfi_intelext_resume(struct mtd_info *mtd)
2281{
2282 struct map_info *map = mtd->priv;
2283 struct cfi_private *cfi = map->fldrv_priv;
2284 int i;
2285 struct flchip *chip;
2286
2287 for (i=0; i<cfi->numchips; i++) {
2288
2289 chip = &cfi->chips[i];
2290
2291 spin_lock(chip->mutex);
2292
2293 /* Go to known state. Chip may have been power cycled */
2294 if (chip->state == FL_PM_SUSPENDED) {
2295 map_write(map, CMD(0xFF), cfi->chips[i].start);
2296 chip->oldstate = chip->state = FL_READY;
2297 wake_up(&chip->wq);
2298 }
2299
2300 spin_unlock(chip->mutex);
2301 }
2302}
2303
2304static void cfi_intelext_destroy(struct mtd_info *mtd)
2305{
2306 struct map_info *map = mtd->priv;
2307 struct cfi_private *cfi = map->fldrv_priv;
2308 kfree(cfi->cmdset_priv);
2309 kfree(cfi->cfiq);
2310 kfree(cfi->chips[0].priv);
2311 kfree(cfi);
2312 kfree(mtd->eraseregions);
2313}
2314
2315static char im_name_1[]="cfi_cmdset_0001";
2316static char im_name_3[]="cfi_cmdset_0003";
2317
2318static int __init cfi_intelext_init(void)
2319{
2320 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2321 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2322 return 0;
2323}
2324
2325static void __exit cfi_intelext_exit(void)
2326{
2327 inter_module_unregister(im_name_1);
2328 inter_module_unregister(im_name_3);
2329}
2330
2331module_init(cfi_intelext_init);
2332module_exit(cfi_intelext_exit);
2333
2334MODULE_LICENSE("GPL");
2335MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2336MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");