blob: 71fad160144454e6aa4db15e6706866c2d866973 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
Todd Poynorc25bb1f2005-04-27 21:01:52 +01007 * $Id: cfi_cmdset_0001.c,v 1.176 2005/04/27 20:01:49 tpoynor Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
Nicolas Pitre963a6fb2005-04-01 02:59:56 +010032#include <linux/reboot.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/mtd/xip.h>
34#include <linux/mtd/map.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/compatmac.h>
37#include <linux/mtd/cfi.h>
38
39/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42// debugging, turns off buffer write mode if set to 1
43#define FORCE_WORD_WRITE 0
44
45#define MANUFACTURER_INTEL 0x0089
46#define I82802AB 0x00ad
47#define I82802AC 0x00ac
48#define MANUFACTURER_ST 0x0020
49#define M50LPW080 0x002F
50
51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
55static void cfi_intelext_sync (struct mtd_info *);
56static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
57static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
Todd Poynor8048d2f2005-03-31 00:57:33 +010058#ifdef CONFIG_MTD_OTP
Nicolas Pitref77814d2005-02-08 17:11:19 +000059static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
60static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
63static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
64 struct otp_info *, size_t);
65static int cfi_intelext_get_user_prot_info (struct mtd_info *,
66 struct otp_info *, size_t);
Todd Poynor8048d2f2005-03-31 00:57:33 +010067#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070068static int cfi_intelext_suspend (struct mtd_info *);
69static void cfi_intelext_resume (struct mtd_info *);
Nicolas Pitre963a6fb2005-04-01 02:59:56 +010070static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
72static void cfi_intelext_destroy(struct mtd_info *);
73
74struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
75
76static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
77static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
78
79static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
80 size_t *retlen, u_char **mtdbuf);
81static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
82 size_t len);
83
84static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
85static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
86#include "fwh_lock.h"
87
88
89
90/*
91 * *********** SETUP AND PROBE BITS ***********
92 */
93
94static struct mtd_chip_driver cfi_intelext_chipdrv = {
95 .probe = NULL, /* Not usable directly */
96 .destroy = cfi_intelext_destroy,
97 .name = "cfi_cmdset_0001",
98 .module = THIS_MODULE
99};
100
101/* #define DEBUG_LOCK_BITS */
102/* #define DEBUG_CFI_FEATURES */
103
104#ifdef DEBUG_CFI_FEATURES
105static void cfi_tell_features(struct cfi_pri_intelext *extp)
106{
107 int i;
108 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
109 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
110 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
111 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
112 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
113 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
114 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
115 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
116 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
117 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
118 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
119 for (i=10; i<32; i++) {
120 if (extp->FeatureSupport & (1<<i))
121 printk(" - Unknown Bit %X: supported\n", i);
122 }
123
124 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
125 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
126 for (i=1; i<8; i++) {
127 if (extp->SuspendCmdSupport & (1<<i))
128 printk(" - Unknown Bit %X: supported\n", i);
129 }
130
131 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
132 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
133 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
134 for (i=2; i<16; i++) {
135 if (extp->BlkStatusRegMask & (1<<i))
136 printk(" - Unknown Bit %X Active: yes\n",i);
137 }
138
139 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
140 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
141 if (extp->VppOptimal)
142 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
143 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
144}
145#endif
146
147#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
148/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
149static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
150{
151 struct map_info *map = mtd->priv;
152 struct cfi_private *cfi = map->fldrv_priv;
153 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
154
155 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
156 "erase on write disabled.\n");
157 extp->SuspendCmdSupport &= ~1;
158}
159#endif
160
161#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
162static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
163{
164 struct map_info *map = mtd->priv;
165 struct cfi_private *cfi = map->fldrv_priv;
166 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
167
168 if (cfip && (cfip->FeatureSupport&4)) {
169 cfip->FeatureSupport &= ~4;
170 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
171 }
172}
173#endif
174
175static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
176{
177 struct map_info *map = mtd->priv;
178 struct cfi_private *cfi = map->fldrv_priv;
179
180 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
181 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
182}
183
184static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
185{
186 struct map_info *map = mtd->priv;
187 struct cfi_private *cfi = map->fldrv_priv;
188
189 /* Note this is done after the region info is endian swapped */
190 cfi->cfiq->EraseRegionInfo[1] =
191 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
192};
193
194static void fixup_use_point(struct mtd_info *mtd, void *param)
195{
196 struct map_info *map = mtd->priv;
197 if (!mtd->point && map_is_linear(map)) {
198 mtd->point = cfi_intelext_point;
199 mtd->unpoint = cfi_intelext_unpoint;
200 }
201}
202
203static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
204{
205 struct map_info *map = mtd->priv;
206 struct cfi_private *cfi = map->fldrv_priv;
207 if (cfi->cfiq->BufWriteTimeoutTyp) {
208 printk(KERN_INFO "Using buffer write method\n" );
209 mtd->write = cfi_intelext_write_buffers;
210 }
211}
212
213static struct cfi_fixup cfi_fixup_table[] = {
214#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
215 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
216#endif
217#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
218 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
219#endif
220#if !FORCE_WORD_WRITE
221 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
222#endif
223 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
224 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
225 { 0, 0, NULL, NULL }
226};
227
228static struct cfi_fixup jedec_fixup_table[] = {
229 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
230 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
231 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
232 { 0, 0, NULL, NULL }
233};
234static struct cfi_fixup fixup_table[] = {
235 /* The CFI vendor ids and the JEDEC vendor IDs appear
236 * to be common. It is like the devices id's are as
237 * well. This table is to pick all cases where
238 * we know that is the case.
239 */
240 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
241 { 0, 0, NULL, NULL }
242};
243
244static inline struct cfi_pri_intelext *
245read_pri_intelext(struct map_info *map, __u16 adr)
246{
247 struct cfi_pri_intelext *extp;
248 unsigned int extp_size = sizeof(*extp);
249
250 again:
251 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
252 if (!extp)
253 return NULL;
254
255 /* Do some byteswapping if necessary */
256 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
257 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
258 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
259
260 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
261 unsigned int extra_size = 0;
262 int nb_parts, i;
263
264 /* Protection Register info */
Nicolas Pitre72b56a22005-02-05 02:06:19 +0000265 extra_size += (extp->NumProtectionFields - 1) *
266 sizeof(struct cfi_intelext_otpinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268 /* Burst Read info */
269 extra_size += 6;
270
271 /* Number of hardware-partitions */
272 extra_size += 1;
273 if (extp_size < sizeof(*extp) + extra_size)
274 goto need_more;
275 nb_parts = extp->extra[extra_size - 1];
276
277 for (i = 0; i < nb_parts; i++) {
278 struct cfi_intelext_regioninfo *rinfo;
279 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
280 extra_size += sizeof(*rinfo);
281 if (extp_size < sizeof(*extp) + extra_size)
282 goto need_more;
283 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
284 extra_size += (rinfo->NumBlockTypes - 1)
285 * sizeof(struct cfi_intelext_blockinfo);
286 }
287
288 if (extp_size < sizeof(*extp) + extra_size) {
289 need_more:
290 extp_size = sizeof(*extp) + extra_size;
291 kfree(extp);
292 if (extp_size > 4096) {
293 printk(KERN_ERR
294 "%s: cfi_pri_intelext is too fat\n",
295 __FUNCTION__);
296 return NULL;
297 }
298 goto again;
299 }
300 }
301
302 return extp;
303}
304
305/* This routine is made available to other mtd code via
306 * inter_module_register. It must only be accessed through
307 * inter_module_get which will bump the use count of this module. The
308 * addresses passed back in cfi are valid as long as the use count of
309 * this module is non-zero, i.e. between inter_module_get and
310 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
311 */
312struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
313{
314 struct cfi_private *cfi = map->fldrv_priv;
315 struct mtd_info *mtd;
316 int i;
317
318 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
319 if (!mtd) {
320 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
321 return NULL;
322 }
323 memset(mtd, 0, sizeof(*mtd));
324 mtd->priv = map;
325 mtd->type = MTD_NORFLASH;
326
327 /* Fill in the default mtd operations */
328 mtd->erase = cfi_intelext_erase_varsize;
329 mtd->read = cfi_intelext_read;
330 mtd->write = cfi_intelext_write_words;
331 mtd->sync = cfi_intelext_sync;
332 mtd->lock = cfi_intelext_lock;
333 mtd->unlock = cfi_intelext_unlock;
334 mtd->suspend = cfi_intelext_suspend;
335 mtd->resume = cfi_intelext_resume;
336 mtd->flags = MTD_CAP_NORFLASH;
337 mtd->name = map->name;
Nicolas Pitre963a6fb2005-04-01 02:59:56 +0100338
339 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 if (cfi->cfi_mode == CFI_MODE_CFI) {
342 /*
343 * It's a real CFI chip, not one for which the probe
344 * routine faked a CFI structure. So we read the feature
345 * table from it.
346 */
347 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
348 struct cfi_pri_intelext *extp;
349
350 extp = read_pri_intelext(map, adr);
351 if (!extp) {
352 kfree(mtd);
353 return NULL;
354 }
355
356 /* Install our own private info structure */
357 cfi->cmdset_priv = extp;
358
359 cfi_fixup(mtd, cfi_fixup_table);
360
361#ifdef DEBUG_CFI_FEATURES
362 /* Tell the user about it in lots of lovely detail */
363 cfi_tell_features(extp);
364#endif
365
366 if(extp->SuspendCmdSupport & 1) {
367 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
368 }
369 }
370 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
371 /* Apply jedec specific fixups */
372 cfi_fixup(mtd, jedec_fixup_table);
373 }
374 /* Apply generic fixups */
375 cfi_fixup(mtd, fixup_table);
376
377 for (i=0; i< cfi->numchips; i++) {
378 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
379 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
380 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
381 cfi->chips[i].ref_point_counter = 0;
382 }
383
384 map->fldrv = &cfi_intelext_chipdrv;
385
386 return cfi_intelext_setup(mtd);
387}
388
389static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
390{
391 struct map_info *map = mtd->priv;
392 struct cfi_private *cfi = map->fldrv_priv;
393 unsigned long offset = 0;
394 int i,j;
395 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
396
397 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
398
399 mtd->size = devsize * cfi->numchips;
400
401 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
402 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
403 * mtd->numeraseregions, GFP_KERNEL);
404 if (!mtd->eraseregions) {
405 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
406 goto setup_err;
407 }
408
409 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
410 unsigned long ernum, ersize;
411 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
412 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
413
414 if (mtd->erasesize < ersize) {
415 mtd->erasesize = ersize;
416 }
417 for (j=0; j<cfi->numchips; j++) {
418 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
419 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
420 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
421 }
422 offset += (ersize * ernum);
423 }
424
425 if (offset != devsize) {
426 /* Argh */
427 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
428 goto setup_err;
429 }
430
431 for (i=0; i<mtd->numeraseregions;i++){
432 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
433 i,mtd->eraseregions[i].offset,
434 mtd->eraseregions[i].erasesize,
435 mtd->eraseregions[i].numblocks);
436 }
437
Nicolas Pitref77814d2005-02-08 17:11:19 +0000438#ifdef CONFIG_MTD_OTP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
Nicolas Pitref77814d2005-02-08 17:11:19 +0000440 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
441 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
442 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
443 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
444 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445#endif
446
447 /* This function has the potential to distort the reality
448 a bit and therefore should be called last. */
449 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
450 goto setup_err;
451
452 __module_get(THIS_MODULE);
Nicolas Pitre963a6fb2005-04-01 02:59:56 +0100453 register_reboot_notifier(&mtd->reboot_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 return mtd;
455
456 setup_err:
457 if(mtd) {
458 if(mtd->eraseregions)
459 kfree(mtd->eraseregions);
460 kfree(mtd);
461 }
462 kfree(cfi->cmdset_priv);
463 return NULL;
464}
465
466static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
467 struct cfi_private **pcfi)
468{
469 struct map_info *map = mtd->priv;
470 struct cfi_private *cfi = *pcfi;
471 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
472
473 /*
474 * Probing of multi-partition flash ships.
475 *
476 * To support multiple partitions when available, we simply arrange
477 * for each of them to have their own flchip structure even if they
478 * are on the same physical chip. This means completely recreating
479 * a new cfi_private structure right here which is a blatent code
480 * layering violation, but this is still the least intrusive
481 * arrangement at this point. This can be rearranged in the future
482 * if someone feels motivated enough. --nico
483 */
484 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
485 && extp->FeatureSupport & (1 << 9)) {
486 struct cfi_private *newcfi;
487 struct flchip *chip;
488 struct flchip_shared *shared;
489 int offs, numregions, numparts, partshift, numvirtchips, i, j;
490
491 /* Protection Register info */
Nicolas Pitre72b56a22005-02-05 02:06:19 +0000492 offs = (extp->NumProtectionFields - 1) *
493 sizeof(struct cfi_intelext_otpinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
495 /* Burst Read info */
496 offs += 6;
497
498 /* Number of partition regions */
499 numregions = extp->extra[offs];
500 offs += 1;
501
502 /* Number of hardware partitions */
503 numparts = 0;
504 for (i = 0; i < numregions; i++) {
505 struct cfi_intelext_regioninfo *rinfo;
506 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
507 numparts += rinfo->NumIdentPartitions;
508 offs += sizeof(*rinfo)
509 + (rinfo->NumBlockTypes - 1) *
510 sizeof(struct cfi_intelext_blockinfo);
511 }
512
513 /*
514 * All functions below currently rely on all chips having
515 * the same geometry so we'll just assume that all hardware
516 * partitions are of the same size too.
517 */
518 partshift = cfi->chipshift - __ffs(numparts);
519
520 if ((1 << partshift) < mtd->erasesize) {
521 printk( KERN_ERR
522 "%s: bad number of hw partitions (%d)\n",
523 __FUNCTION__, numparts);
524 return -EINVAL;
525 }
526
527 numvirtchips = cfi->numchips * numparts;
528 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
529 if (!newcfi)
530 return -ENOMEM;
531 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
532 if (!shared) {
533 kfree(newcfi);
534 return -ENOMEM;
535 }
536 memcpy(newcfi, cfi, sizeof(struct cfi_private));
537 newcfi->numchips = numvirtchips;
538 newcfi->chipshift = partshift;
539
540 chip = &newcfi->chips[0];
541 for (i = 0; i < cfi->numchips; i++) {
542 shared[i].writing = shared[i].erasing = NULL;
543 spin_lock_init(&shared[i].lock);
544 for (j = 0; j < numparts; j++) {
545 *chip = cfi->chips[i];
546 chip->start += j << partshift;
547 chip->priv = &shared[i];
548 /* those should be reset too since
549 they create memory references. */
550 init_waitqueue_head(&chip->wq);
551 spin_lock_init(&chip->_spinlock);
552 chip->mutex = &chip->_spinlock;
553 chip++;
554 }
555 }
556
557 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
558 "--> %d partitions of %d KiB\n",
559 map->name, cfi->numchips, cfi->interleave,
560 newcfi->numchips, 1<<(newcfi->chipshift-10));
561
562 map->fldrv_priv = newcfi;
563 *pcfi = newcfi;
564 kfree(cfi);
565 }
566
567 return 0;
568}
569
570/*
571 * *********** CHIP ACCESS FUNCTIONS ***********
572 */
573
574static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
575{
576 DECLARE_WAITQUEUE(wait, current);
577 struct cfi_private *cfi = map->fldrv_priv;
578 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
579 unsigned long timeo;
580 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
581
582 resettime:
583 timeo = jiffies + HZ;
584 retry:
Nicolas Pitref77814d2005-02-08 17:11:19 +0000585 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 /*
587 * OK. We have possibility for contension on the write/erase
588 * operations which are global to the real chip and not per
589 * partition. So let's fight it over in the partition which
590 * currently has authority on the operation.
591 *
592 * The rules are as follows:
593 *
594 * - any write operation must own shared->writing.
595 *
596 * - any erase operation must own _both_ shared->writing and
597 * shared->erasing.
598 *
599 * - contension arbitration is handled in the owner's context.
600 *
601 * The 'shared' struct can be read when its lock is taken.
602 * However any writes to it can only be made when the current
603 * owner's lock is also held.
604 */
605 struct flchip_shared *shared = chip->priv;
606 struct flchip *contender;
607 spin_lock(&shared->lock);
608 contender = shared->writing;
609 if (contender && contender != chip) {
610 /*
611 * The engine to perform desired operation on this
612 * partition is already in use by someone else.
613 * Let's fight over it in the context of the chip
614 * currently using it. If it is possible to suspend,
615 * that other partition will do just that, otherwise
616 * it'll happily send us to sleep. In any case, when
617 * get_chip returns success we're clear to go ahead.
618 */
619 int ret = spin_trylock(contender->mutex);
620 spin_unlock(&shared->lock);
621 if (!ret)
622 goto retry;
623 spin_unlock(chip->mutex);
624 ret = get_chip(map, contender, contender->start, mode);
625 spin_lock(chip->mutex);
626 if (ret) {
627 spin_unlock(contender->mutex);
628 return ret;
629 }
630 timeo = jiffies + HZ;
631 spin_lock(&shared->lock);
632 }
633
634 /* We now own it */
635 shared->writing = chip;
636 if (mode == FL_ERASING)
637 shared->erasing = chip;
638 if (contender && contender != chip)
639 spin_unlock(contender->mutex);
640 spin_unlock(&shared->lock);
641 }
642
643 switch (chip->state) {
644
645 case FL_STATUS:
646 for (;;) {
647 status = map_read(map, adr);
648 if (map_word_andequal(map, status, status_OK, status_OK))
649 break;
650
651 /* At this point we're fine with write operations
652 in other partitions as they don't conflict. */
653 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
654 break;
655
656 if (time_after(jiffies, timeo)) {
657 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
658 status.x[0]);
659 return -EIO;
660 }
661 spin_unlock(chip->mutex);
662 cfi_udelay(1);
663 spin_lock(chip->mutex);
664 /* Someone else might have been playing with it. */
665 goto retry;
666 }
667
668 case FL_READY:
669 case FL_CFI_QUERY:
670 case FL_JEDEC_QUERY:
671 return 0;
672
673 case FL_ERASING:
674 if (!cfip ||
675 !(cfip->FeatureSupport & 2) ||
676 !(mode == FL_READY || mode == FL_POINT ||
677 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
678 goto sleep;
679
680
681 /* Erase suspend */
682 map_write(map, CMD(0xB0), adr);
683
684 /* If the flash has finished erasing, then 'erase suspend'
685 * appears to make some (28F320) flash devices switch to
686 * 'read' mode. Make sure that we switch to 'read status'
687 * mode so we get the right data. --rmk
688 */
689 map_write(map, CMD(0x70), adr);
690 chip->oldstate = FL_ERASING;
691 chip->state = FL_ERASE_SUSPENDING;
692 chip->erase_suspended = 1;
693 for (;;) {
694 status = map_read(map, adr);
695 if (map_word_andequal(map, status, status_OK, status_OK))
696 break;
697
698 if (time_after(jiffies, timeo)) {
699 /* Urgh. Resume and pretend we weren't here. */
700 map_write(map, CMD(0xd0), adr);
701 /* Make sure we're in 'read status' mode if it had finished */
702 map_write(map, CMD(0x70), adr);
703 chip->state = FL_ERASING;
704 chip->oldstate = FL_READY;
705 printk(KERN_ERR "Chip not ready after erase "
706 "suspended: status = 0x%lx\n", status.x[0]);
707 return -EIO;
708 }
709
710 spin_unlock(chip->mutex);
711 cfi_udelay(1);
712 spin_lock(chip->mutex);
713 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
714 So we can just loop here. */
715 }
716 chip->state = FL_STATUS;
717 return 0;
718
719 case FL_XIP_WHILE_ERASING:
720 if (mode != FL_READY && mode != FL_POINT &&
721 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
722 goto sleep;
723 chip->oldstate = chip->state;
724 chip->state = FL_READY;
725 return 0;
726
727 case FL_POINT:
728 /* Only if there's no operation suspended... */
729 if (mode == FL_READY && chip->oldstate == FL_READY)
730 return 0;
731
732 default:
733 sleep:
734 set_current_state(TASK_UNINTERRUPTIBLE);
735 add_wait_queue(&chip->wq, &wait);
736 spin_unlock(chip->mutex);
737 schedule();
738 remove_wait_queue(&chip->wq, &wait);
739 spin_lock(chip->mutex);
740 goto resettime;
741 }
742}
743
744static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
745{
746 struct cfi_private *cfi = map->fldrv_priv;
747
748 if (chip->priv) {
749 struct flchip_shared *shared = chip->priv;
750 spin_lock(&shared->lock);
751 if (shared->writing == chip && chip->oldstate == FL_READY) {
752 /* We own the ability to write, but we're done */
753 shared->writing = shared->erasing;
754 if (shared->writing && shared->writing != chip) {
755 /* give back ownership to who we loaned it from */
756 struct flchip *loaner = shared->writing;
757 spin_lock(loaner->mutex);
758 spin_unlock(&shared->lock);
759 spin_unlock(chip->mutex);
760 put_chip(map, loaner, loaner->start);
761 spin_lock(chip->mutex);
762 spin_unlock(loaner->mutex);
763 wake_up(&chip->wq);
764 return;
765 }
766 shared->erasing = NULL;
767 shared->writing = NULL;
768 } else if (shared->erasing == chip && shared->writing != chip) {
769 /*
770 * We own the ability to erase without the ability
771 * to write, which means the erase was suspended
772 * and some other partition is currently writing.
773 * Don't let the switch below mess things up since
774 * we don't have ownership to resume anything.
775 */
776 spin_unlock(&shared->lock);
777 wake_up(&chip->wq);
778 return;
779 }
780 spin_unlock(&shared->lock);
781 }
782
783 switch(chip->oldstate) {
784 case FL_ERASING:
785 chip->state = chip->oldstate;
786 /* What if one interleaved chip has finished and the
787 other hasn't? The old code would leave the finished
788 one in READY mode. That's bad, and caused -EROFS
789 errors to be returned from do_erase_oneblock because
790 that's the only bit it checked for at the time.
791 As the state machine appears to explicitly allow
792 sending the 0x70 (Read Status) command to an erasing
793 chip and expecting it to be ignored, that's what we
794 do. */
795 map_write(map, CMD(0xd0), adr);
796 map_write(map, CMD(0x70), adr);
797 chip->oldstate = FL_READY;
798 chip->state = FL_ERASING;
799 break;
800
801 case FL_XIP_WHILE_ERASING:
802 chip->state = chip->oldstate;
803 chip->oldstate = FL_READY;
804 break;
805
806 case FL_READY:
807 case FL_STATUS:
808 case FL_JEDEC_QUERY:
809 /* We should really make set_vpp() count, rather than doing this */
810 DISABLE_VPP(map);
811 break;
812 default:
813 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
814 }
815 wake_up(&chip->wq);
816}
817
818#ifdef CONFIG_MTD_XIP
819
820/*
821 * No interrupt what so ever can be serviced while the flash isn't in array
822 * mode. This is ensured by the xip_disable() and xip_enable() functions
823 * enclosing any code path where the flash is known not to be in array mode.
824 * And within a XIP disabled code path, only functions marked with __xipram
825 * may be called and nothing else (it's a good thing to inspect generated
826 * assembly to make sure inline functions were actually inlined and that gcc
827 * didn't emit calls to its own support functions). Also configuring MTD CFI
828 * support to a single buswidth and a single interleave is also recommended.
829 * Note that not only IRQs are disabled but the preemption count is also
830 * increased to prevent other locking primitives (namely spin_unlock) from
831 * decrementing the preempt count to zero and scheduling the CPU away while
832 * not in array mode.
833 */
834
835static void xip_disable(struct map_info *map, struct flchip *chip,
836 unsigned long adr)
837{
838 /* TODO: chips with no XIP use should ignore and return */
839 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
840 preempt_disable();
841 local_irq_disable();
842}
843
844static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
845 unsigned long adr)
846{
847 struct cfi_private *cfi = map->fldrv_priv;
848 if (chip->state != FL_POINT && chip->state != FL_READY) {
849 map_write(map, CMD(0xff), adr);
850 chip->state = FL_READY;
851 }
852 (void) map_read(map, adr);
853 asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
854 local_irq_enable();
855 preempt_enable();
856}
857
858/*
859 * When a delay is required for the flash operation to complete, the
860 * xip_udelay() function is polling for both the given timeout and pending
861 * (but still masked) hardware interrupts. Whenever there is an interrupt
862 * pending then the flash erase or write operation is suspended, array mode
863 * restored and interrupts unmasked. Task scheduling might also happen at that
864 * point. The CPU eventually returns from the interrupt or the call to
865 * schedule() and the suspended flash operation is resumed for the remaining
866 * of the delay period.
867 *
868 * Warning: this function _will_ fool interrupt latency tracing tools.
869 */
870
871static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
872 unsigned long adr, int usec)
873{
874 struct cfi_private *cfi = map->fldrv_priv;
875 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
876 map_word status, OK = CMD(0x80);
877 unsigned long suspended, start = xip_currtime();
878 flstate_t oldstate, newstate;
879
880 do {
881 cpu_relax();
882 if (xip_irqpending() && cfip &&
883 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
884 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
885 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
886 /*
887 * Let's suspend the erase or write operation when
888 * supported. Note that we currently don't try to
889 * suspend interleaved chips if there is already
890 * another operation suspended (imagine what happens
891 * when one chip was already done with the current
892 * operation while another chip suspended it, then
893 * we resume the whole thing at once). Yes, it
894 * can happen!
895 */
896 map_write(map, CMD(0xb0), adr);
897 map_write(map, CMD(0x70), adr);
898 usec -= xip_elapsed_since(start);
899 suspended = xip_currtime();
900 do {
901 if (xip_elapsed_since(suspended) > 100000) {
902 /*
903 * The chip doesn't want to suspend
904 * after waiting for 100 msecs.
905 * This is a critical error but there
906 * is not much we can do here.
907 */
908 return;
909 }
910 status = map_read(map, adr);
911 } while (!map_word_andequal(map, status, OK, OK));
912
913 /* Suspend succeeded */
914 oldstate = chip->state;
915 if (oldstate == FL_ERASING) {
916 if (!map_word_bitsset(map, status, CMD(0x40)))
917 break;
918 newstate = FL_XIP_WHILE_ERASING;
919 chip->erase_suspended = 1;
920 } else {
921 if (!map_word_bitsset(map, status, CMD(0x04)))
922 break;
923 newstate = FL_XIP_WHILE_WRITING;
924 chip->write_suspended = 1;
925 }
926 chip->state = newstate;
927 map_write(map, CMD(0xff), adr);
928 (void) map_read(map, adr);
929 asm volatile (".rep 8; nop; .endr");
930 local_irq_enable();
931 preempt_enable();
932 asm volatile (".rep 8; nop; .endr");
933 cond_resched();
934
935 /*
936 * We're back. However someone else might have
937 * decided to go write to the chip if we are in
938 * a suspended erase state. If so let's wait
939 * until it's done.
940 */
941 preempt_disable();
942 while (chip->state != newstate) {
943 DECLARE_WAITQUEUE(wait, current);
944 set_current_state(TASK_UNINTERRUPTIBLE);
945 add_wait_queue(&chip->wq, &wait);
946 preempt_enable();
947 schedule();
948 remove_wait_queue(&chip->wq, &wait);
949 preempt_disable();
950 }
951 /* Disallow XIP again */
952 local_irq_disable();
953
954 /* Resume the write or erase operation */
955 map_write(map, CMD(0xd0), adr);
956 map_write(map, CMD(0x70), adr);
957 chip->state = oldstate;
958 start = xip_currtime();
959 } else if (usec >= 1000000/HZ) {
960 /*
961 * Try to save on CPU power when waiting delay
962 * is at least a system timer tick period.
963 * No need to be extremely accurate here.
964 */
965 xip_cpu_idle();
966 }
967 status = map_read(map, adr);
968 } while (!map_word_andequal(map, status, OK, OK)
969 && xip_elapsed_since(start) < usec);
970}
971
972#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
973
974/*
975 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
976 * the flash is actively programming or erasing since we have to poll for
977 * the operation to complete anyway. We can't do that in a generic way with
978 * a XIP setup so do it before the actual flash operation in this case.
979 */
980#undef INVALIDATE_CACHED_RANGE
981#define INVALIDATE_CACHED_RANGE(x...)
982#define XIP_INVAL_CACHED_RANGE(map, from, size) \
983 do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
984
985/*
986 * Extra notes:
987 *
988 * Activating this XIP support changes the way the code works a bit. For
989 * example the code to suspend the current process when concurrent access
990 * happens is never executed because xip_udelay() will always return with the
991 * same chip state as it was entered with. This is why there is no care for
992 * the presence of add_wait_queue() or schedule() calls from within a couple
993 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
994 * The queueing and scheduling are always happening within xip_udelay().
995 *
996 * Similarly, get_chip() and put_chip() just happen to always be executed
997 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
998 * is in array mode, therefore never executing many cases therein and not
999 * causing any problem with XIP.
1000 */
1001
1002#else
1003
1004#define xip_disable(map, chip, adr)
1005#define xip_enable(map, chip, adr)
1006
1007#define UDELAY(map, chip, adr, usec) cfi_udelay(usec)
1008
1009#define XIP_INVAL_CACHED_RANGE(x...)
1010
1011#endif
1012
1013static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1014{
1015 unsigned long cmd_addr;
1016 struct cfi_private *cfi = map->fldrv_priv;
1017 int ret = 0;
1018
1019 adr += chip->start;
1020
1021 /* Ensure cmd read/writes are aligned. */
1022 cmd_addr = adr & ~(map_bankwidth(map)-1);
1023
1024 spin_lock(chip->mutex);
1025
1026 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1027
1028 if (!ret) {
1029 if (chip->state != FL_POINT && chip->state != FL_READY)
1030 map_write(map, CMD(0xff), cmd_addr);
1031
1032 chip->state = FL_POINT;
1033 chip->ref_point_counter++;
1034 }
1035 spin_unlock(chip->mutex);
1036
1037 return ret;
1038}
1039
1040static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1041{
1042 struct map_info *map = mtd->priv;
1043 struct cfi_private *cfi = map->fldrv_priv;
1044 unsigned long ofs;
1045 int chipnum;
1046 int ret = 0;
1047
1048 if (!map->virt || (from + len > mtd->size))
1049 return -EINVAL;
1050
1051 *mtdbuf = (void *)map->virt + from;
1052 *retlen = 0;
1053
1054 /* Now lock the chip(s) to POINT state */
1055
1056 /* ofs: offset within the first chip that the first read should start */
1057 chipnum = (from >> cfi->chipshift);
1058 ofs = from - (chipnum << cfi->chipshift);
1059
1060 while (len) {
1061 unsigned long thislen;
1062
1063 if (chipnum >= cfi->numchips)
1064 break;
1065
1066 if ((len + ofs -1) >> cfi->chipshift)
1067 thislen = (1<<cfi->chipshift) - ofs;
1068 else
1069 thislen = len;
1070
1071 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1072 if (ret)
1073 break;
1074
1075 *retlen += thislen;
1076 len -= thislen;
1077
1078 ofs = 0;
1079 chipnum++;
1080 }
1081 return 0;
1082}
1083
1084static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1085{
1086 struct map_info *map = mtd->priv;
1087 struct cfi_private *cfi = map->fldrv_priv;
1088 unsigned long ofs;
1089 int chipnum;
1090
1091 /* Now unlock the chip(s) POINT state */
1092
1093 /* ofs: offset within the first chip that the first read should start */
1094 chipnum = (from >> cfi->chipshift);
1095 ofs = from - (chipnum << cfi->chipshift);
1096
1097 while (len) {
1098 unsigned long thislen;
1099 struct flchip *chip;
1100
1101 chip = &cfi->chips[chipnum];
1102 if (chipnum >= cfi->numchips)
1103 break;
1104
1105 if ((len + ofs -1) >> cfi->chipshift)
1106 thislen = (1<<cfi->chipshift) - ofs;
1107 else
1108 thislen = len;
1109
1110 spin_lock(chip->mutex);
1111 if (chip->state == FL_POINT) {
1112 chip->ref_point_counter--;
1113 if(chip->ref_point_counter == 0)
1114 chip->state = FL_READY;
1115 } else
1116 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1117
1118 put_chip(map, chip, chip->start);
1119 spin_unlock(chip->mutex);
1120
1121 len -= thislen;
1122 ofs = 0;
1123 chipnum++;
1124 }
1125}
1126
1127static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1128{
1129 unsigned long cmd_addr;
1130 struct cfi_private *cfi = map->fldrv_priv;
1131 int ret;
1132
1133 adr += chip->start;
1134
1135 /* Ensure cmd read/writes are aligned. */
1136 cmd_addr = adr & ~(map_bankwidth(map)-1);
1137
1138 spin_lock(chip->mutex);
1139 ret = get_chip(map, chip, cmd_addr, FL_READY);
1140 if (ret) {
1141 spin_unlock(chip->mutex);
1142 return ret;
1143 }
1144
1145 if (chip->state != FL_POINT && chip->state != FL_READY) {
1146 map_write(map, CMD(0xff), cmd_addr);
1147
1148 chip->state = FL_READY;
1149 }
1150
1151 map_copy_from(map, buf, adr, len);
1152
1153 put_chip(map, chip, cmd_addr);
1154
1155 spin_unlock(chip->mutex);
1156 return 0;
1157}
1158
1159static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1160{
1161 struct map_info *map = mtd->priv;
1162 struct cfi_private *cfi = map->fldrv_priv;
1163 unsigned long ofs;
1164 int chipnum;
1165 int ret = 0;
1166
1167 /* ofs: offset within the first chip that the first read should start */
1168 chipnum = (from >> cfi->chipshift);
1169 ofs = from - (chipnum << cfi->chipshift);
1170
1171 *retlen = 0;
1172
1173 while (len) {
1174 unsigned long thislen;
1175
1176 if (chipnum >= cfi->numchips)
1177 break;
1178
1179 if ((len + ofs -1) >> cfi->chipshift)
1180 thislen = (1<<cfi->chipshift) - ofs;
1181 else
1182 thislen = len;
1183
1184 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1185 if (ret)
1186 break;
1187
1188 *retlen += thislen;
1189 len -= thislen;
1190 buf += thislen;
1191
1192 ofs = 0;
1193 chipnum++;
1194 }
1195 return ret;
1196}
1197
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
Nicolas Pitref77814d2005-02-08 17:11:19 +00001199 unsigned long adr, map_word datum, int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200{
1201 struct cfi_private *cfi = map->fldrv_priv;
Nicolas Pitref77814d2005-02-08 17:11:19 +00001202 map_word status, status_OK, write_cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 unsigned long timeo;
1204 int z, ret=0;
1205
1206 adr += chip->start;
1207
1208 /* Let's determine this according to the interleave only once */
1209 status_OK = CMD(0x80);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001210 switch (mode) {
1211 case FL_WRITING: write_cmd = CMD(0x40); break;
1212 case FL_OTP_WRITE: write_cmd = CMD(0xc0); break;
1213 default: return -EINVAL;
1214 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
1216 spin_lock(chip->mutex);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001217 ret = get_chip(map, chip, adr, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 if (ret) {
1219 spin_unlock(chip->mutex);
1220 return ret;
1221 }
1222
1223 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1224 ENABLE_VPP(map);
1225 xip_disable(map, chip, adr);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001226 map_write(map, write_cmd, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 map_write(map, datum, adr);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001228 chip->state = mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229
1230 spin_unlock(chip->mutex);
1231 INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
1232 UDELAY(map, chip, adr, chip->word_write_time);
1233 spin_lock(chip->mutex);
1234
1235 timeo = jiffies + (HZ/2);
1236 z = 0;
1237 for (;;) {
Nicolas Pitref77814d2005-02-08 17:11:19 +00001238 if (chip->state != mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 /* Someone's suspended the write. Sleep */
1240 DECLARE_WAITQUEUE(wait, current);
1241
1242 set_current_state(TASK_UNINTERRUPTIBLE);
1243 add_wait_queue(&chip->wq, &wait);
1244 spin_unlock(chip->mutex);
1245 schedule();
1246 remove_wait_queue(&chip->wq, &wait);
1247 timeo = jiffies + (HZ / 2); /* FIXME */
1248 spin_lock(chip->mutex);
1249 continue;
1250 }
1251
1252 status = map_read(map, adr);
1253 if (map_word_andequal(map, status, status_OK, status_OK))
1254 break;
1255
1256 /* OK Still waiting */
1257 if (time_after(jiffies, timeo)) {
1258 chip->state = FL_STATUS;
1259 xip_enable(map, chip, adr);
1260 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1261 ret = -EIO;
1262 goto out;
1263 }
1264
1265 /* Latency issues. Drop the lock, wait a while and retry */
1266 spin_unlock(chip->mutex);
1267 z++;
1268 UDELAY(map, chip, adr, 1);
1269 spin_lock(chip->mutex);
1270 }
1271 if (!z) {
1272 chip->word_write_time--;
1273 if (!chip->word_write_time)
1274 chip->word_write_time++;
1275 }
1276 if (z > 1)
1277 chip->word_write_time++;
1278
1279 /* Done and happy. */
1280 chip->state = FL_STATUS;
1281
1282 /* check for lock bit */
1283 if (map_word_bitsset(map, status, CMD(0x02))) {
1284 /* clear status */
1285 map_write(map, CMD(0x50), adr);
1286 /* put back into read status register mode */
1287 map_write(map, CMD(0x70), adr);
1288 ret = -EROFS;
1289 }
1290
1291 xip_enable(map, chip, adr);
1292 out: put_chip(map, chip, adr);
1293 spin_unlock(chip->mutex);
1294
1295 return ret;
1296}
1297
1298
1299static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1300{
1301 struct map_info *map = mtd->priv;
1302 struct cfi_private *cfi = map->fldrv_priv;
1303 int ret = 0;
1304 int chipnum;
1305 unsigned long ofs;
1306
1307 *retlen = 0;
1308 if (!len)
1309 return 0;
1310
1311 chipnum = to >> cfi->chipshift;
1312 ofs = to - (chipnum << cfi->chipshift);
1313
1314 /* If it's not bus-aligned, do the first byte write */
1315 if (ofs & (map_bankwidth(map)-1)) {
1316 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1317 int gap = ofs - bus_ofs;
1318 int n;
1319 map_word datum;
1320
1321 n = min_t(int, len, map_bankwidth(map)-gap);
1322 datum = map_word_ff(map);
1323 datum = map_word_load_partial(map, datum, buf, gap, n);
1324
1325 ret = do_write_oneword(map, &cfi->chips[chipnum],
Nicolas Pitref77814d2005-02-08 17:11:19 +00001326 bus_ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 if (ret)
1328 return ret;
1329
1330 len -= n;
1331 ofs += n;
1332 buf += n;
1333 (*retlen) += n;
1334
1335 if (ofs >> cfi->chipshift) {
1336 chipnum ++;
1337 ofs = 0;
1338 if (chipnum == cfi->numchips)
1339 return 0;
1340 }
1341 }
1342
1343 while(len >= map_bankwidth(map)) {
1344 map_word datum = map_word_load(map, buf);
1345
1346 ret = do_write_oneword(map, &cfi->chips[chipnum],
Nicolas Pitref77814d2005-02-08 17:11:19 +00001347 ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 if (ret)
1349 return ret;
1350
1351 ofs += map_bankwidth(map);
1352 buf += map_bankwidth(map);
1353 (*retlen) += map_bankwidth(map);
1354 len -= map_bankwidth(map);
1355
1356 if (ofs >> cfi->chipshift) {
1357 chipnum ++;
1358 ofs = 0;
1359 if (chipnum == cfi->numchips)
1360 return 0;
1361 }
1362 }
1363
1364 if (len & (map_bankwidth(map)-1)) {
1365 map_word datum;
1366
1367 datum = map_word_ff(map);
1368 datum = map_word_load_partial(map, datum, buf, 0, len);
1369
1370 ret = do_write_oneword(map, &cfi->chips[chipnum],
Nicolas Pitref77814d2005-02-08 17:11:19 +00001371 ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 if (ret)
1373 return ret;
1374
1375 (*retlen) += len;
1376 }
1377
1378 return 0;
1379}
1380
1381
1382static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1383 unsigned long adr, const u_char *buf, int len)
1384{
1385 struct cfi_private *cfi = map->fldrv_priv;
1386 map_word status, status_OK;
1387 unsigned long cmd_adr, timeo;
1388 int wbufsize, z, ret=0, bytes, words;
1389
1390 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1391 adr += chip->start;
1392 cmd_adr = adr & ~(wbufsize-1);
1393
1394 /* Let's determine this according to the interleave only once */
1395 status_OK = CMD(0x80);
1396
1397 spin_lock(chip->mutex);
1398 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1399 if (ret) {
1400 spin_unlock(chip->mutex);
1401 return ret;
1402 }
1403
1404 XIP_INVAL_CACHED_RANGE(map, adr, len);
1405 ENABLE_VPP(map);
1406 xip_disable(map, chip, cmd_adr);
1407
1408 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1409 [...], the device will not accept any more Write to Buffer commands".
1410 So we must check here and reset those bits if they're set. Otherwise
1411 we're just pissing in the wind */
1412 if (chip->state != FL_STATUS)
1413 map_write(map, CMD(0x70), cmd_adr);
1414 status = map_read(map, cmd_adr);
1415 if (map_word_bitsset(map, status, CMD(0x30))) {
1416 xip_enable(map, chip, cmd_adr);
1417 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1418 xip_disable(map, chip, cmd_adr);
1419 map_write(map, CMD(0x50), cmd_adr);
1420 map_write(map, CMD(0x70), cmd_adr);
1421 }
1422
1423 chip->state = FL_WRITING_TO_BUFFER;
1424
1425 z = 0;
1426 for (;;) {
1427 map_write(map, CMD(0xe8), cmd_adr);
1428
1429 status = map_read(map, cmd_adr);
1430 if (map_word_andequal(map, status, status_OK, status_OK))
1431 break;
1432
1433 spin_unlock(chip->mutex);
1434 UDELAY(map, chip, cmd_adr, 1);
1435 spin_lock(chip->mutex);
1436
1437 if (++z > 20) {
1438 /* Argh. Not ready for write to buffer */
1439 map_word Xstatus;
1440 map_write(map, CMD(0x70), cmd_adr);
1441 chip->state = FL_STATUS;
1442 Xstatus = map_read(map, cmd_adr);
1443 /* Odd. Clear status bits */
1444 map_write(map, CMD(0x50), cmd_adr);
1445 map_write(map, CMD(0x70), cmd_adr);
1446 xip_enable(map, chip, cmd_adr);
1447 printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1448 status.x[0], Xstatus.x[0]);
1449 ret = -EIO;
1450 goto out;
1451 }
1452 }
1453
1454 /* Write length of data to come */
1455 bytes = len & (map_bankwidth(map)-1);
1456 words = len / map_bankwidth(map);
1457 map_write(map, CMD(words - !bytes), cmd_adr );
1458
1459 /* Write data */
1460 z = 0;
1461 while(z < words * map_bankwidth(map)) {
1462 map_word datum = map_word_load(map, buf);
1463 map_write(map, datum, adr+z);
1464
1465 z += map_bankwidth(map);
1466 buf += map_bankwidth(map);
1467 }
1468
1469 if (bytes) {
1470 map_word datum;
1471
1472 datum = map_word_ff(map);
1473 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1474 map_write(map, datum, adr+z);
1475 }
1476
1477 /* GO GO GO */
1478 map_write(map, CMD(0xd0), cmd_adr);
1479 chip->state = FL_WRITING;
1480
1481 spin_unlock(chip->mutex);
1482 INVALIDATE_CACHED_RANGE(map, adr, len);
1483 UDELAY(map, chip, cmd_adr, chip->buffer_write_time);
1484 spin_lock(chip->mutex);
1485
1486 timeo = jiffies + (HZ/2);
1487 z = 0;
1488 for (;;) {
1489 if (chip->state != FL_WRITING) {
1490 /* Someone's suspended the write. Sleep */
1491 DECLARE_WAITQUEUE(wait, current);
1492 set_current_state(TASK_UNINTERRUPTIBLE);
1493 add_wait_queue(&chip->wq, &wait);
1494 spin_unlock(chip->mutex);
1495 schedule();
1496 remove_wait_queue(&chip->wq, &wait);
1497 timeo = jiffies + (HZ / 2); /* FIXME */
1498 spin_lock(chip->mutex);
1499 continue;
1500 }
1501
1502 status = map_read(map, cmd_adr);
1503 if (map_word_andequal(map, status, status_OK, status_OK))
1504 break;
1505
1506 /* OK Still waiting */
1507 if (time_after(jiffies, timeo)) {
1508 chip->state = FL_STATUS;
1509 xip_enable(map, chip, cmd_adr);
1510 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1511 ret = -EIO;
1512 goto out;
1513 }
1514
1515 /* Latency issues. Drop the lock, wait a while and retry */
1516 spin_unlock(chip->mutex);
1517 UDELAY(map, chip, cmd_adr, 1);
1518 z++;
1519 spin_lock(chip->mutex);
1520 }
1521 if (!z) {
1522 chip->buffer_write_time--;
1523 if (!chip->buffer_write_time)
1524 chip->buffer_write_time++;
1525 }
1526 if (z > 1)
1527 chip->buffer_write_time++;
1528
1529 /* Done and happy. */
1530 chip->state = FL_STATUS;
1531
1532 /* check for lock bit */
1533 if (map_word_bitsset(map, status, CMD(0x02))) {
1534 /* clear status */
1535 map_write(map, CMD(0x50), cmd_adr);
1536 /* put back into read status register mode */
1537 map_write(map, CMD(0x70), adr);
1538 ret = -EROFS;
1539 }
1540
1541 xip_enable(map, chip, cmd_adr);
1542 out: put_chip(map, chip, cmd_adr);
1543 spin_unlock(chip->mutex);
1544 return ret;
1545}
1546
1547static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1548 size_t len, size_t *retlen, const u_char *buf)
1549{
1550 struct map_info *map = mtd->priv;
1551 struct cfi_private *cfi = map->fldrv_priv;
1552 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1553 int ret = 0;
1554 int chipnum;
1555 unsigned long ofs;
1556
1557 *retlen = 0;
1558 if (!len)
1559 return 0;
1560
1561 chipnum = to >> cfi->chipshift;
1562 ofs = to - (chipnum << cfi->chipshift);
1563
1564 /* If it's not bus-aligned, do the first word write */
1565 if (ofs & (map_bankwidth(map)-1)) {
1566 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1567 if (local_len > len)
1568 local_len = len;
1569 ret = cfi_intelext_write_words(mtd, to, local_len,
1570 retlen, buf);
1571 if (ret)
1572 return ret;
1573 ofs += local_len;
1574 buf += local_len;
1575 len -= local_len;
1576
1577 if (ofs >> cfi->chipshift) {
1578 chipnum ++;
1579 ofs = 0;
1580 if (chipnum == cfi->numchips)
1581 return 0;
1582 }
1583 }
1584
1585 while(len) {
1586 /* We must not cross write block boundaries */
1587 int size = wbufsize - (ofs & (wbufsize-1));
1588
1589 if (size > len)
1590 size = len;
1591 ret = do_write_buffer(map, &cfi->chips[chipnum],
1592 ofs, buf, size);
1593 if (ret)
1594 return ret;
1595
1596 ofs += size;
1597 buf += size;
1598 (*retlen) += size;
1599 len -= size;
1600
1601 if (ofs >> cfi->chipshift) {
1602 chipnum ++;
1603 ofs = 0;
1604 if (chipnum == cfi->numchips)
1605 return 0;
1606 }
1607 }
1608 return 0;
1609}
1610
1611static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1612 unsigned long adr, int len, void *thunk)
1613{
1614 struct cfi_private *cfi = map->fldrv_priv;
1615 map_word status, status_OK;
1616 unsigned long timeo;
1617 int retries = 3;
1618 DECLARE_WAITQUEUE(wait, current);
1619 int ret = 0;
1620
1621 adr += chip->start;
1622
1623 /* Let's determine this according to the interleave only once */
1624 status_OK = CMD(0x80);
1625
1626 retry:
1627 spin_lock(chip->mutex);
1628 ret = get_chip(map, chip, adr, FL_ERASING);
1629 if (ret) {
1630 spin_unlock(chip->mutex);
1631 return ret;
1632 }
1633
1634 XIP_INVAL_CACHED_RANGE(map, adr, len);
1635 ENABLE_VPP(map);
1636 xip_disable(map, chip, adr);
1637
1638 /* Clear the status register first */
1639 map_write(map, CMD(0x50), adr);
1640
1641 /* Now erase */
1642 map_write(map, CMD(0x20), adr);
1643 map_write(map, CMD(0xD0), adr);
1644 chip->state = FL_ERASING;
1645 chip->erase_suspended = 0;
1646
1647 spin_unlock(chip->mutex);
1648 INVALIDATE_CACHED_RANGE(map, adr, len);
1649 UDELAY(map, chip, adr, chip->erase_time*1000/2);
1650 spin_lock(chip->mutex);
1651
1652 /* FIXME. Use a timer to check this, and return immediately. */
1653 /* Once the state machine's known to be working I'll do that */
1654
1655 timeo = jiffies + (HZ*20);
1656 for (;;) {
1657 if (chip->state != FL_ERASING) {
1658 /* Someone's suspended the erase. Sleep */
1659 set_current_state(TASK_UNINTERRUPTIBLE);
1660 add_wait_queue(&chip->wq, &wait);
1661 spin_unlock(chip->mutex);
1662 schedule();
1663 remove_wait_queue(&chip->wq, &wait);
1664 spin_lock(chip->mutex);
1665 continue;
1666 }
1667 if (chip->erase_suspended) {
1668 /* This erase was suspended and resumed.
1669 Adjust the timeout */
1670 timeo = jiffies + (HZ*20); /* FIXME */
1671 chip->erase_suspended = 0;
1672 }
1673
1674 status = map_read(map, adr);
1675 if (map_word_andequal(map, status, status_OK, status_OK))
1676 break;
1677
1678 /* OK Still waiting */
1679 if (time_after(jiffies, timeo)) {
1680 map_word Xstatus;
1681 map_write(map, CMD(0x70), adr);
1682 chip->state = FL_STATUS;
1683 Xstatus = map_read(map, adr);
1684 /* Clear status bits */
1685 map_write(map, CMD(0x50), adr);
1686 map_write(map, CMD(0x70), adr);
1687 xip_enable(map, chip, adr);
1688 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1689 adr, status.x[0], Xstatus.x[0]);
1690 ret = -EIO;
1691 goto out;
1692 }
1693
1694 /* Latency issues. Drop the lock, wait a while and retry */
1695 spin_unlock(chip->mutex);
1696 UDELAY(map, chip, adr, 1000000/HZ);
1697 spin_lock(chip->mutex);
1698 }
1699
1700 /* We've broken this before. It doesn't hurt to be safe */
1701 map_write(map, CMD(0x70), adr);
1702 chip->state = FL_STATUS;
1703 status = map_read(map, adr);
1704
1705 /* check for lock bit */
1706 if (map_word_bitsset(map, status, CMD(0x3a))) {
Thomas Gleixner3a700252005-03-15 19:07:21 +00001707 unsigned long chipstatus;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
1709 /* Reset the error bits */
1710 map_write(map, CMD(0x50), adr);
1711 map_write(map, CMD(0x70), adr);
1712 xip_enable(map, chip, adr);
1713
Thomas Gleixner3a700252005-03-15 19:07:21 +00001714 chipstatus = MERGESTATUS(status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715
1716 if ((chipstatus & 0x30) == 0x30) {
Thomas Gleixner50da7f62005-03-19 22:39:52 +00001717 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%lx\n", chipstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 ret = -EIO;
1719 } else if (chipstatus & 0x02) {
1720 /* Protection bit set */
1721 ret = -EROFS;
1722 } else if (chipstatus & 0x8) {
1723 /* Voltage */
Thomas Gleixner50da7f62005-03-19 22:39:52 +00001724 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%lx\n", chipstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 ret = -EIO;
1726 } else if (chipstatus & 0x20) {
1727 if (retries--) {
Thomas Gleixner50da7f62005-03-19 22:39:52 +00001728 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 timeo = jiffies + HZ;
1730 put_chip(map, chip, adr);
1731 spin_unlock(chip->mutex);
1732 goto retry;
1733 }
Thomas Gleixner50da7f62005-03-19 22:39:52 +00001734 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx\n", adr, chipstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 ret = -EIO;
1736 }
1737 } else {
1738 xip_enable(map, chip, adr);
1739 ret = 0;
1740 }
1741
1742 out: put_chip(map, chip, adr);
1743 spin_unlock(chip->mutex);
1744 return ret;
1745}
1746
1747int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1748{
1749 unsigned long ofs, len;
1750 int ret;
1751
1752 ofs = instr->addr;
1753 len = instr->len;
1754
1755 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1756 if (ret)
1757 return ret;
1758
1759 instr->state = MTD_ERASE_DONE;
1760 mtd_erase_callback(instr);
1761
1762 return 0;
1763}
1764
1765static void cfi_intelext_sync (struct mtd_info *mtd)
1766{
1767 struct map_info *map = mtd->priv;
1768 struct cfi_private *cfi = map->fldrv_priv;
1769 int i;
1770 struct flchip *chip;
1771 int ret = 0;
1772
1773 for (i=0; !ret && i<cfi->numchips; i++) {
1774 chip = &cfi->chips[i];
1775
1776 spin_lock(chip->mutex);
1777 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1778
1779 if (!ret) {
1780 chip->oldstate = chip->state;
1781 chip->state = FL_SYNCING;
1782 /* No need to wake_up() on this state change -
1783 * as the whole point is that nobody can do anything
1784 * with the chip now anyway.
1785 */
1786 }
1787 spin_unlock(chip->mutex);
1788 }
1789
1790 /* Unlock the chips again */
1791
1792 for (i--; i >=0; i--) {
1793 chip = &cfi->chips[i];
1794
1795 spin_lock(chip->mutex);
1796
1797 if (chip->state == FL_SYNCING) {
1798 chip->state = chip->oldstate;
Nicolas Pitre09c79332005-03-16 22:41:09 +00001799 chip->oldstate = FL_READY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 wake_up(&chip->wq);
1801 }
1802 spin_unlock(chip->mutex);
1803 }
1804}
1805
1806#ifdef DEBUG_LOCK_BITS
1807static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1808 struct flchip *chip,
1809 unsigned long adr,
1810 int len, void *thunk)
1811{
1812 struct cfi_private *cfi = map->fldrv_priv;
1813 int status, ofs_factor = cfi->interleave * cfi->device_type;
1814
Todd Poynorc25bb1f2005-04-27 21:01:52 +01001815 adr += chip->start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 xip_disable(map, chip, adr+(2*ofs_factor));
Todd Poynorc25bb1f2005-04-27 21:01:52 +01001817 map_write(map, CMD(0x90), adr+(2*ofs_factor));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 chip->state = FL_JEDEC_QUERY;
1819 status = cfi_read_query(map, adr+(2*ofs_factor));
1820 xip_enable(map, chip, 0);
1821 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1822 adr, status);
1823 return 0;
1824}
1825#endif
1826
1827#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1828#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1829
1830static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1831 unsigned long adr, int len, void *thunk)
1832{
1833 struct cfi_private *cfi = map->fldrv_priv;
Todd Poynor9a6e73e2005-03-29 23:06:40 +01001834 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 map_word status, status_OK;
1836 unsigned long timeo = jiffies + HZ;
1837 int ret;
1838
1839 adr += chip->start;
1840
1841 /* Let's determine this according to the interleave only once */
1842 status_OK = CMD(0x80);
1843
1844 spin_lock(chip->mutex);
1845 ret = get_chip(map, chip, adr, FL_LOCKING);
1846 if (ret) {
1847 spin_unlock(chip->mutex);
1848 return ret;
1849 }
1850
1851 ENABLE_VPP(map);
1852 xip_disable(map, chip, adr);
1853
1854 map_write(map, CMD(0x60), adr);
1855 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1856 map_write(map, CMD(0x01), adr);
1857 chip->state = FL_LOCKING;
1858 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1859 map_write(map, CMD(0xD0), adr);
1860 chip->state = FL_UNLOCKING;
1861 } else
1862 BUG();
1863
Todd Poynor9a6e73e2005-03-29 23:06:40 +01001864 /*
1865 * If Instant Individual Block Locking supported then no need
1866 * to delay.
1867 */
1868
1869 if (!extp || !(extp->FeatureSupport & (1 << 5))) {
1870 spin_unlock(chip->mutex);
1871 UDELAY(map, chip, adr, 1000000/HZ);
1872 spin_lock(chip->mutex);
1873 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874
1875 /* FIXME. Use a timer to check this, and return immediately. */
1876 /* Once the state machine's known to be working I'll do that */
1877
1878 timeo = jiffies + (HZ*20);
1879 for (;;) {
1880
1881 status = map_read(map, adr);
1882 if (map_word_andequal(map, status, status_OK, status_OK))
1883 break;
1884
1885 /* OK Still waiting */
1886 if (time_after(jiffies, timeo)) {
1887 map_word Xstatus;
1888 map_write(map, CMD(0x70), adr);
1889 chip->state = FL_STATUS;
1890 Xstatus = map_read(map, adr);
1891 xip_enable(map, chip, adr);
1892 printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1893 status.x[0], Xstatus.x[0]);
1894 put_chip(map, chip, adr);
1895 spin_unlock(chip->mutex);
1896 return -EIO;
1897 }
1898
1899 /* Latency issues. Drop the lock, wait a while and retry */
1900 spin_unlock(chip->mutex);
1901 UDELAY(map, chip, adr, 1);
1902 spin_lock(chip->mutex);
1903 }
1904
1905 /* Done and happy. */
1906 chip->state = FL_STATUS;
1907 xip_enable(map, chip, adr);
1908 put_chip(map, chip, adr);
1909 spin_unlock(chip->mutex);
1910 return 0;
1911}
1912
1913static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1914{
1915 int ret;
1916
1917#ifdef DEBUG_LOCK_BITS
1918 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1919 __FUNCTION__, ofs, len);
1920 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1921 ofs, len, 0);
1922#endif
1923
1924 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1925 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1926
1927#ifdef DEBUG_LOCK_BITS
1928 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1929 __FUNCTION__, ret);
1930 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1931 ofs, len, 0);
1932#endif
1933
1934 return ret;
1935}
1936
1937static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1938{
1939 int ret;
1940
1941#ifdef DEBUG_LOCK_BITS
1942 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1943 __FUNCTION__, ofs, len);
1944 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1945 ofs, len, 0);
1946#endif
1947
1948 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1949 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1950
1951#ifdef DEBUG_LOCK_BITS
1952 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1953 __FUNCTION__, ret);
1954 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1955 ofs, len, 0);
1956#endif
1957
1958 return ret;
1959}
1960
Nicolas Pitref77814d2005-02-08 17:11:19 +00001961#ifdef CONFIG_MTD_OTP
1962
1963typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1964 u_long data_offset, u_char *buf, u_int size,
1965 u_long prot_offset, u_int groupno, u_int groupsize);
1966
1967static int __xipram
1968do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1969 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1970{
1971 struct cfi_private *cfi = map->fldrv_priv;
1972 int ret;
1973
1974 spin_lock(chip->mutex);
1975 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1976 if (ret) {
1977 spin_unlock(chip->mutex);
1978 return ret;
1979 }
1980
1981 /* let's ensure we're not reading back cached data from array mode */
1982 if (map->inval_cache)
1983 map->inval_cache(map, chip->start + offset, size);
1984
1985 xip_disable(map, chip, chip->start);
1986 if (chip->state != FL_JEDEC_QUERY) {
1987 map_write(map, CMD(0x90), chip->start);
1988 chip->state = FL_JEDEC_QUERY;
1989 }
1990 map_copy_from(map, buf, chip->start + offset, size);
1991 xip_enable(map, chip, chip->start);
1992
1993 /* then ensure we don't keep OTP data in the cache */
1994 if (map->inval_cache)
1995 map->inval_cache(map, chip->start + offset, size);
1996
1997 put_chip(map, chip, chip->start);
1998 spin_unlock(chip->mutex);
1999 return 0;
2000}
2001
2002static int
2003do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2004 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2005{
2006 int ret;
2007
2008 while (size) {
2009 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2010 int gap = offset - bus_ofs;
2011 int n = min_t(int, size, map_bankwidth(map)-gap);
2012 map_word datum = map_word_ff(map);
2013
2014 datum = map_word_load_partial(map, datum, buf, gap, n);
2015 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2016 if (ret)
2017 return ret;
2018
2019 offset += n;
2020 buf += n;
2021 size -= n;
2022 }
2023
2024 return 0;
2025}
2026
2027static int
2028do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2029 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2030{
2031 struct cfi_private *cfi = map->fldrv_priv;
2032 map_word datum;
2033
2034 /* make sure area matches group boundaries */
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002035 if (size != grpsz)
Nicolas Pitref77814d2005-02-08 17:11:19 +00002036 return -EXDEV;
2037
2038 datum = map_word_ff(map);
2039 datum = map_word_clr(map, datum, CMD(1 << grpno));
2040 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2041}
2042
2043static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2044 size_t *retlen, u_char *buf,
2045 otp_op_t action, int user_regs)
2046{
2047 struct map_info *map = mtd->priv;
2048 struct cfi_private *cfi = map->fldrv_priv;
2049 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2050 struct flchip *chip;
2051 struct cfi_intelext_otpinfo *otp;
2052 u_long devsize, reg_prot_offset, data_offset;
2053 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2054 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2055 int ret;
2056
2057 *retlen = 0;
2058
2059 /* Check that we actually have some OTP registers */
2060 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2061 return -ENODATA;
2062
2063 /* we need real chips here not virtual ones */
2064 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2065 chip_step = devsize >> cfi->chipshift;
Nicolas Pitredce2b4d2005-04-01 17:36:29 +01002066 chip_num = 0;
Nicolas Pitref77814d2005-02-08 17:11:19 +00002067
Nicolas Pitredce2b4d2005-04-01 17:36:29 +01002068 /* Some chips have OTP located in the _top_ partition only.
2069 For example: Intel 28F256L18T (T means top-parameter device) */
2070 if (cfi->mfr == MANUFACTURER_INTEL) {
2071 switch (cfi->id) {
2072 case 0x880b:
2073 case 0x880c:
2074 case 0x880d:
2075 chip_num = chip_step - 1;
2076 }
2077 }
2078
2079 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
Nicolas Pitref77814d2005-02-08 17:11:19 +00002080 chip = &cfi->chips[chip_num];
2081 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2082
2083 /* first OTP region */
2084 field = 0;
2085 reg_prot_offset = extp->ProtRegAddr;
2086 reg_fact_groups = 1;
2087 reg_fact_size = 1 << extp->FactProtRegSize;
2088 reg_user_groups = 1;
2089 reg_user_size = 1 << extp->UserProtRegSize;
2090
2091 while (len > 0) {
2092 /* flash geometry fixup */
2093 data_offset = reg_prot_offset + 1;
2094 data_offset *= cfi->interleave * cfi->device_type;
2095 reg_prot_offset *= cfi->interleave * cfi->device_type;
2096 reg_fact_size *= cfi->interleave;
2097 reg_user_size *= cfi->interleave;
2098
2099 if (user_regs) {
2100 groups = reg_user_groups;
2101 groupsize = reg_user_size;
2102 /* skip over factory reg area */
2103 groupno = reg_fact_groups;
2104 data_offset += reg_fact_groups * reg_fact_size;
2105 } else {
2106 groups = reg_fact_groups;
2107 groupsize = reg_fact_size;
2108 groupno = 0;
2109 }
2110
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002111 while (len > 0 && groups > 0) {
Nicolas Pitref77814d2005-02-08 17:11:19 +00002112 if (!action) {
2113 /*
2114 * Special case: if action is NULL
2115 * we fill buf with otp_info records.
2116 */
2117 struct otp_info *otpinfo;
2118 map_word lockword;
2119 len -= sizeof(struct otp_info);
2120 if (len <= 0)
2121 return -ENOSPC;
2122 ret = do_otp_read(map, chip,
2123 reg_prot_offset,
2124 (u_char *)&lockword,
2125 map_bankwidth(map),
2126 0, 0, 0);
2127 if (ret)
2128 return ret;
2129 otpinfo = (struct otp_info *)buf;
2130 otpinfo->start = from;
2131 otpinfo->length = groupsize;
2132 otpinfo->locked =
2133 !map_word_bitsset(map, lockword,
2134 CMD(1 << groupno));
2135 from += groupsize;
2136 buf += sizeof(*otpinfo);
2137 *retlen += sizeof(*otpinfo);
2138 } else if (from >= groupsize) {
2139 from -= groupsize;
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002140 data_offset += groupsize;
Nicolas Pitref77814d2005-02-08 17:11:19 +00002141 } else {
2142 int size = groupsize;
2143 data_offset += from;
2144 size -= from;
2145 from = 0;
2146 if (size > len)
2147 size = len;
2148 ret = action(map, chip, data_offset,
2149 buf, size, reg_prot_offset,
2150 groupno, groupsize);
2151 if (ret < 0)
2152 return ret;
2153 buf += size;
2154 len -= size;
2155 *retlen += size;
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002156 data_offset += size;
Nicolas Pitref77814d2005-02-08 17:11:19 +00002157 }
2158 groupno++;
2159 groups--;
2160 }
2161
2162 /* next OTP region */
2163 if (++field == extp->NumProtectionFields)
2164 break;
2165 reg_prot_offset = otp->ProtRegAddr;
2166 reg_fact_groups = otp->FactGroups;
2167 reg_fact_size = 1 << otp->FactProtRegSize;
2168 reg_user_groups = otp->UserGroups;
2169 reg_user_size = 1 << otp->UserProtRegSize;
2170 otp++;
2171 }
2172 }
2173
2174 return 0;
2175}
2176
2177static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2178 size_t len, size_t *retlen,
2179 u_char *buf)
2180{
2181 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2182 buf, do_otp_read, 0);
2183}
2184
2185static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2186 size_t len, size_t *retlen,
2187 u_char *buf)
2188{
2189 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2190 buf, do_otp_read, 1);
2191}
2192
2193static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2194 size_t len, size_t *retlen,
2195 u_char *buf)
2196{
2197 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2198 buf, do_otp_write, 1);
2199}
2200
2201static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2202 loff_t from, size_t len)
2203{
2204 size_t retlen;
2205 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2206 NULL, do_otp_lock, 1);
2207}
2208
2209static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2210 struct otp_info *buf, size_t len)
2211{
2212 size_t retlen;
2213 int ret;
2214
2215 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2216 return ret ? : retlen;
2217}
2218
2219static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2220 struct otp_info *buf, size_t len)
2221{
2222 size_t retlen;
2223 int ret;
2224
2225 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2226 return ret ? : retlen;
2227}
2228
2229#endif
2230
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231static int cfi_intelext_suspend(struct mtd_info *mtd)
2232{
2233 struct map_info *map = mtd->priv;
2234 struct cfi_private *cfi = map->fldrv_priv;
2235 int i;
2236 struct flchip *chip;
2237 int ret = 0;
2238
2239 for (i=0; !ret && i<cfi->numchips; i++) {
2240 chip = &cfi->chips[i];
2241
2242 spin_lock(chip->mutex);
2243
2244 switch (chip->state) {
2245 case FL_READY:
2246 case FL_STATUS:
2247 case FL_CFI_QUERY:
2248 case FL_JEDEC_QUERY:
2249 if (chip->oldstate == FL_READY) {
2250 chip->oldstate = chip->state;
2251 chip->state = FL_PM_SUSPENDED;
2252 /* No need to wake_up() on this state change -
2253 * as the whole point is that nobody can do anything
2254 * with the chip now anyway.
2255 */
2256 } else {
2257 /* There seems to be an operation pending. We must wait for it. */
2258 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2259 ret = -EAGAIN;
2260 }
2261 break;
2262 default:
2263 /* Should we actually wait? Once upon a time these routines weren't
2264 allowed to. Or should we return -EAGAIN, because the upper layers
2265 ought to have already shut down anything which was using the device
2266 anyway? The latter for now. */
2267 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2268 ret = -EAGAIN;
2269 case FL_PM_SUSPENDED:
2270 break;
2271 }
2272 spin_unlock(chip->mutex);
2273 }
2274
2275 /* Unlock the chips again */
2276
2277 if (ret) {
2278 for (i--; i >=0; i--) {
2279 chip = &cfi->chips[i];
2280
2281 spin_lock(chip->mutex);
2282
2283 if (chip->state == FL_PM_SUSPENDED) {
2284 /* No need to force it into a known state here,
2285 because we're returning failure, and it didn't
2286 get power cycled */
2287 chip->state = chip->oldstate;
2288 chip->oldstate = FL_READY;
2289 wake_up(&chip->wq);
2290 }
2291 spin_unlock(chip->mutex);
2292 }
2293 }
2294
2295 return ret;
2296}
2297
2298static void cfi_intelext_resume(struct mtd_info *mtd)
2299{
2300 struct map_info *map = mtd->priv;
2301 struct cfi_private *cfi = map->fldrv_priv;
2302 int i;
2303 struct flchip *chip;
2304
2305 for (i=0; i<cfi->numchips; i++) {
2306
2307 chip = &cfi->chips[i];
2308
2309 spin_lock(chip->mutex);
2310
2311 /* Go to known state. Chip may have been power cycled */
2312 if (chip->state == FL_PM_SUSPENDED) {
2313 map_write(map, CMD(0xFF), cfi->chips[i].start);
2314 chip->oldstate = chip->state = FL_READY;
2315 wake_up(&chip->wq);
2316 }
2317
2318 spin_unlock(chip->mutex);
2319 }
2320}
2321
Nicolas Pitre963a6fb2005-04-01 02:59:56 +01002322static int cfi_intelext_reset(struct mtd_info *mtd)
2323{
2324 struct map_info *map = mtd->priv;
2325 struct cfi_private *cfi = map->fldrv_priv;
2326 int i, ret;
2327
2328 for (i=0; i < cfi->numchips; i++) {
2329 struct flchip *chip = &cfi->chips[i];
2330
2331 /* force the completion of any ongoing operation
2332 and switch to array mode so any bootloader in
2333 flash is accessible for soft reboot. */
2334 spin_lock(chip->mutex);
2335 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2336 if (!ret) {
2337 map_write(map, CMD(0xff), chip->start);
2338 chip->state = FL_READY;
2339 }
2340 spin_unlock(chip->mutex);
2341 }
2342
2343 return 0;
2344}
2345
2346static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2347 void *v)
2348{
2349 struct mtd_info *mtd;
2350
2351 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2352 cfi_intelext_reset(mtd);
2353 return NOTIFY_DONE;
2354}
2355
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356static void cfi_intelext_destroy(struct mtd_info *mtd)
2357{
2358 struct map_info *map = mtd->priv;
2359 struct cfi_private *cfi = map->fldrv_priv;
Nicolas Pitre963a6fb2005-04-01 02:59:56 +01002360 cfi_intelext_reset(mtd);
2361 unregister_reboot_notifier(&mtd->reboot_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 kfree(cfi->cmdset_priv);
2363 kfree(cfi->cfiq);
2364 kfree(cfi->chips[0].priv);
2365 kfree(cfi);
2366 kfree(mtd->eraseregions);
2367}
2368
2369static char im_name_1[]="cfi_cmdset_0001";
2370static char im_name_3[]="cfi_cmdset_0003";
2371
2372static int __init cfi_intelext_init(void)
2373{
2374 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2375 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2376 return 0;
2377}
2378
2379static void __exit cfi_intelext_exit(void)
2380{
2381 inter_module_unregister(im_name_1);
2382 inter_module_unregister(im_name_3);
2383}
2384
2385module_init(cfi_intelext_init);
2386module_exit(cfi_intelext_exit);
2387
2388MODULE_LICENSE("GPL");
2389MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2390MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");