blob: 51675bb2f8306287d46a2ce12409b6884a5c3d20 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
Todd Poynor9a6e73e2005-03-29 23:06:40 +01007 * $Id: cfi_cmdset_0001.c,v 1.172 2005/03/29 22:06:37 tpoynor Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
32#include <linux/mtd/xip.h>
33#include <linux/mtd/map.h>
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/compatmac.h>
36#include <linux/mtd/cfi.h>
37
38/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41// debugging, turns off buffer write mode if set to 1
42#define FORCE_WORD_WRITE 0
43
44#define MANUFACTURER_INTEL 0x0089
45#define I82802AB 0x00ad
46#define I82802AC 0x00ac
47#define MANUFACTURER_ST 0x0020
48#define M50LPW080 0x002F
49
50static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
52static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
54static void cfi_intelext_sync (struct mtd_info *);
55static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
56static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
Nicolas Pitref77814d2005-02-08 17:11:19 +000057static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
58static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
59static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
60static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
61static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
62 struct otp_info *, size_t);
63static int cfi_intelext_get_user_prot_info (struct mtd_info *,
64 struct otp_info *, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -070065static int cfi_intelext_suspend (struct mtd_info *);
66static void cfi_intelext_resume (struct mtd_info *);
67
68static void cfi_intelext_destroy(struct mtd_info *);
69
70struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
71
72static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
73static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
74
75static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
76 size_t *retlen, u_char **mtdbuf);
77static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
78 size_t len);
79
80static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
81static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
82#include "fwh_lock.h"
83
84
85
86/*
87 * *********** SETUP AND PROBE BITS ***********
88 */
89
90static struct mtd_chip_driver cfi_intelext_chipdrv = {
91 .probe = NULL, /* Not usable directly */
92 .destroy = cfi_intelext_destroy,
93 .name = "cfi_cmdset_0001",
94 .module = THIS_MODULE
95};
96
97/* #define DEBUG_LOCK_BITS */
98/* #define DEBUG_CFI_FEATURES */
99
100#ifdef DEBUG_CFI_FEATURES
101static void cfi_tell_features(struct cfi_pri_intelext *extp)
102{
103 int i;
104 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
105 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
106 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
107 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
108 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
109 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
110 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
111 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
112 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
113 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
114 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
115 for (i=10; i<32; i++) {
116 if (extp->FeatureSupport & (1<<i))
117 printk(" - Unknown Bit %X: supported\n", i);
118 }
119
120 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
121 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
122 for (i=1; i<8; i++) {
123 if (extp->SuspendCmdSupport & (1<<i))
124 printk(" - Unknown Bit %X: supported\n", i);
125 }
126
127 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
128 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
129 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
130 for (i=2; i<16; i++) {
131 if (extp->BlkStatusRegMask & (1<<i))
132 printk(" - Unknown Bit %X Active: yes\n",i);
133 }
134
135 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
136 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
137 if (extp->VppOptimal)
138 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
139 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
140}
141#endif
142
143#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
144/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
145static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
146{
147 struct map_info *map = mtd->priv;
148 struct cfi_private *cfi = map->fldrv_priv;
149 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
150
151 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
152 "erase on write disabled.\n");
153 extp->SuspendCmdSupport &= ~1;
154}
155#endif
156
157#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
158static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
159{
160 struct map_info *map = mtd->priv;
161 struct cfi_private *cfi = map->fldrv_priv;
162 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
163
164 if (cfip && (cfip->FeatureSupport&4)) {
165 cfip->FeatureSupport &= ~4;
166 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
167 }
168}
169#endif
170
171static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
172{
173 struct map_info *map = mtd->priv;
174 struct cfi_private *cfi = map->fldrv_priv;
175
176 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
177 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
178}
179
180static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
181{
182 struct map_info *map = mtd->priv;
183 struct cfi_private *cfi = map->fldrv_priv;
184
185 /* Note this is done after the region info is endian swapped */
186 cfi->cfiq->EraseRegionInfo[1] =
187 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
188};
189
190static void fixup_use_point(struct mtd_info *mtd, void *param)
191{
192 struct map_info *map = mtd->priv;
193 if (!mtd->point && map_is_linear(map)) {
194 mtd->point = cfi_intelext_point;
195 mtd->unpoint = cfi_intelext_unpoint;
196 }
197}
198
199static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
200{
201 struct map_info *map = mtd->priv;
202 struct cfi_private *cfi = map->fldrv_priv;
203 if (cfi->cfiq->BufWriteTimeoutTyp) {
204 printk(KERN_INFO "Using buffer write method\n" );
205 mtd->write = cfi_intelext_write_buffers;
206 }
207}
208
209static struct cfi_fixup cfi_fixup_table[] = {
210#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
211 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
212#endif
213#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
214 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
215#endif
216#if !FORCE_WORD_WRITE
217 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
218#endif
219 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
220 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
221 { 0, 0, NULL, NULL }
222};
223
224static struct cfi_fixup jedec_fixup_table[] = {
225 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
226 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
227 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
228 { 0, 0, NULL, NULL }
229};
230static struct cfi_fixup fixup_table[] = {
231 /* The CFI vendor ids and the JEDEC vendor IDs appear
232 * to be common. It is like the devices id's are as
233 * well. This table is to pick all cases where
234 * we know that is the case.
235 */
236 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
237 { 0, 0, NULL, NULL }
238};
239
240static inline struct cfi_pri_intelext *
241read_pri_intelext(struct map_info *map, __u16 adr)
242{
243 struct cfi_pri_intelext *extp;
244 unsigned int extp_size = sizeof(*extp);
245
246 again:
247 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
248 if (!extp)
249 return NULL;
250
251 /* Do some byteswapping if necessary */
252 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
253 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
254 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
255
256 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
257 unsigned int extra_size = 0;
258 int nb_parts, i;
259
260 /* Protection Register info */
Nicolas Pitre72b56a22005-02-05 02:06:19 +0000261 extra_size += (extp->NumProtectionFields - 1) *
262 sizeof(struct cfi_intelext_otpinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
264 /* Burst Read info */
265 extra_size += 6;
266
267 /* Number of hardware-partitions */
268 extra_size += 1;
269 if (extp_size < sizeof(*extp) + extra_size)
270 goto need_more;
271 nb_parts = extp->extra[extra_size - 1];
272
273 for (i = 0; i < nb_parts; i++) {
274 struct cfi_intelext_regioninfo *rinfo;
275 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
276 extra_size += sizeof(*rinfo);
277 if (extp_size < sizeof(*extp) + extra_size)
278 goto need_more;
279 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
280 extra_size += (rinfo->NumBlockTypes - 1)
281 * sizeof(struct cfi_intelext_blockinfo);
282 }
283
284 if (extp_size < sizeof(*extp) + extra_size) {
285 need_more:
286 extp_size = sizeof(*extp) + extra_size;
287 kfree(extp);
288 if (extp_size > 4096) {
289 printk(KERN_ERR
290 "%s: cfi_pri_intelext is too fat\n",
291 __FUNCTION__);
292 return NULL;
293 }
294 goto again;
295 }
296 }
297
298 return extp;
299}
300
301/* This routine is made available to other mtd code via
302 * inter_module_register. It must only be accessed through
303 * inter_module_get which will bump the use count of this module. The
304 * addresses passed back in cfi are valid as long as the use count of
305 * this module is non-zero, i.e. between inter_module_get and
306 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
307 */
308struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
309{
310 struct cfi_private *cfi = map->fldrv_priv;
311 struct mtd_info *mtd;
312 int i;
313
314 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
315 if (!mtd) {
316 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
317 return NULL;
318 }
319 memset(mtd, 0, sizeof(*mtd));
320 mtd->priv = map;
321 mtd->type = MTD_NORFLASH;
322
323 /* Fill in the default mtd operations */
324 mtd->erase = cfi_intelext_erase_varsize;
325 mtd->read = cfi_intelext_read;
326 mtd->write = cfi_intelext_write_words;
327 mtd->sync = cfi_intelext_sync;
328 mtd->lock = cfi_intelext_lock;
329 mtd->unlock = cfi_intelext_unlock;
330 mtd->suspend = cfi_intelext_suspend;
331 mtd->resume = cfi_intelext_resume;
332 mtd->flags = MTD_CAP_NORFLASH;
333 mtd->name = map->name;
334
335 if (cfi->cfi_mode == CFI_MODE_CFI) {
336 /*
337 * It's a real CFI chip, not one for which the probe
338 * routine faked a CFI structure. So we read the feature
339 * table from it.
340 */
341 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
342 struct cfi_pri_intelext *extp;
343
344 extp = read_pri_intelext(map, adr);
345 if (!extp) {
346 kfree(mtd);
347 return NULL;
348 }
349
350 /* Install our own private info structure */
351 cfi->cmdset_priv = extp;
352
353 cfi_fixup(mtd, cfi_fixup_table);
354
355#ifdef DEBUG_CFI_FEATURES
356 /* Tell the user about it in lots of lovely detail */
357 cfi_tell_features(extp);
358#endif
359
360 if(extp->SuspendCmdSupport & 1) {
361 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
362 }
363 }
364 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
365 /* Apply jedec specific fixups */
366 cfi_fixup(mtd, jedec_fixup_table);
367 }
368 /* Apply generic fixups */
369 cfi_fixup(mtd, fixup_table);
370
371 for (i=0; i< cfi->numchips; i++) {
372 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
373 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
374 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
375 cfi->chips[i].ref_point_counter = 0;
376 }
377
378 map->fldrv = &cfi_intelext_chipdrv;
379
380 return cfi_intelext_setup(mtd);
381}
382
383static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
384{
385 struct map_info *map = mtd->priv;
386 struct cfi_private *cfi = map->fldrv_priv;
387 unsigned long offset = 0;
388 int i,j;
389 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
390
391 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
392
393 mtd->size = devsize * cfi->numchips;
394
395 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
396 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
397 * mtd->numeraseregions, GFP_KERNEL);
398 if (!mtd->eraseregions) {
399 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
400 goto setup_err;
401 }
402
403 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
404 unsigned long ernum, ersize;
405 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
406 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
407
408 if (mtd->erasesize < ersize) {
409 mtd->erasesize = ersize;
410 }
411 for (j=0; j<cfi->numchips; j++) {
412 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
413 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
414 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
415 }
416 offset += (ersize * ernum);
417 }
418
419 if (offset != devsize) {
420 /* Argh */
421 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
422 goto setup_err;
423 }
424
425 for (i=0; i<mtd->numeraseregions;i++){
426 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
427 i,mtd->eraseregions[i].offset,
428 mtd->eraseregions[i].erasesize,
429 mtd->eraseregions[i].numblocks);
430 }
431
Nicolas Pitref77814d2005-02-08 17:11:19 +0000432#ifdef CONFIG_MTD_OTP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
Nicolas Pitref77814d2005-02-08 17:11:19 +0000434 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
435 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
436 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
437 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
438 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439#endif
440
441 /* This function has the potential to distort the reality
442 a bit and therefore should be called last. */
443 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
444 goto setup_err;
445
446 __module_get(THIS_MODULE);
447 return mtd;
448
449 setup_err:
450 if(mtd) {
451 if(mtd->eraseregions)
452 kfree(mtd->eraseregions);
453 kfree(mtd);
454 }
455 kfree(cfi->cmdset_priv);
456 return NULL;
457}
458
459static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
460 struct cfi_private **pcfi)
461{
462 struct map_info *map = mtd->priv;
463 struct cfi_private *cfi = *pcfi;
464 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
465
466 /*
467 * Probing of multi-partition flash ships.
468 *
469 * To support multiple partitions when available, we simply arrange
470 * for each of them to have their own flchip structure even if they
471 * are on the same physical chip. This means completely recreating
472 * a new cfi_private structure right here which is a blatent code
473 * layering violation, but this is still the least intrusive
474 * arrangement at this point. This can be rearranged in the future
475 * if someone feels motivated enough. --nico
476 */
477 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
478 && extp->FeatureSupport & (1 << 9)) {
479 struct cfi_private *newcfi;
480 struct flchip *chip;
481 struct flchip_shared *shared;
482 int offs, numregions, numparts, partshift, numvirtchips, i, j;
483
484 /* Protection Register info */
Nicolas Pitre72b56a22005-02-05 02:06:19 +0000485 offs = (extp->NumProtectionFields - 1) *
486 sizeof(struct cfi_intelext_otpinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
488 /* Burst Read info */
489 offs += 6;
490
491 /* Number of partition regions */
492 numregions = extp->extra[offs];
493 offs += 1;
494
495 /* Number of hardware partitions */
496 numparts = 0;
497 for (i = 0; i < numregions; i++) {
498 struct cfi_intelext_regioninfo *rinfo;
499 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
500 numparts += rinfo->NumIdentPartitions;
501 offs += sizeof(*rinfo)
502 + (rinfo->NumBlockTypes - 1) *
503 sizeof(struct cfi_intelext_blockinfo);
504 }
505
506 /*
507 * All functions below currently rely on all chips having
508 * the same geometry so we'll just assume that all hardware
509 * partitions are of the same size too.
510 */
511 partshift = cfi->chipshift - __ffs(numparts);
512
513 if ((1 << partshift) < mtd->erasesize) {
514 printk( KERN_ERR
515 "%s: bad number of hw partitions (%d)\n",
516 __FUNCTION__, numparts);
517 return -EINVAL;
518 }
519
520 numvirtchips = cfi->numchips * numparts;
521 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
522 if (!newcfi)
523 return -ENOMEM;
524 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
525 if (!shared) {
526 kfree(newcfi);
527 return -ENOMEM;
528 }
529 memcpy(newcfi, cfi, sizeof(struct cfi_private));
530 newcfi->numchips = numvirtchips;
531 newcfi->chipshift = partshift;
532
533 chip = &newcfi->chips[0];
534 for (i = 0; i < cfi->numchips; i++) {
535 shared[i].writing = shared[i].erasing = NULL;
536 spin_lock_init(&shared[i].lock);
537 for (j = 0; j < numparts; j++) {
538 *chip = cfi->chips[i];
539 chip->start += j << partshift;
540 chip->priv = &shared[i];
541 /* those should be reset too since
542 they create memory references. */
543 init_waitqueue_head(&chip->wq);
544 spin_lock_init(&chip->_spinlock);
545 chip->mutex = &chip->_spinlock;
546 chip++;
547 }
548 }
549
550 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
551 "--> %d partitions of %d KiB\n",
552 map->name, cfi->numchips, cfi->interleave,
553 newcfi->numchips, 1<<(newcfi->chipshift-10));
554
555 map->fldrv_priv = newcfi;
556 *pcfi = newcfi;
557 kfree(cfi);
558 }
559
560 return 0;
561}
562
563/*
564 * *********** CHIP ACCESS FUNCTIONS ***********
565 */
566
567static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
568{
569 DECLARE_WAITQUEUE(wait, current);
570 struct cfi_private *cfi = map->fldrv_priv;
571 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
572 unsigned long timeo;
573 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
574
575 resettime:
576 timeo = jiffies + HZ;
577 retry:
Nicolas Pitref77814d2005-02-08 17:11:19 +0000578 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 /*
580 * OK. We have possibility for contension on the write/erase
581 * operations which are global to the real chip and not per
582 * partition. So let's fight it over in the partition which
583 * currently has authority on the operation.
584 *
585 * The rules are as follows:
586 *
587 * - any write operation must own shared->writing.
588 *
589 * - any erase operation must own _both_ shared->writing and
590 * shared->erasing.
591 *
592 * - contension arbitration is handled in the owner's context.
593 *
594 * The 'shared' struct can be read when its lock is taken.
595 * However any writes to it can only be made when the current
596 * owner's lock is also held.
597 */
598 struct flchip_shared *shared = chip->priv;
599 struct flchip *contender;
600 spin_lock(&shared->lock);
601 contender = shared->writing;
602 if (contender && contender != chip) {
603 /*
604 * The engine to perform desired operation on this
605 * partition is already in use by someone else.
606 * Let's fight over it in the context of the chip
607 * currently using it. If it is possible to suspend,
608 * that other partition will do just that, otherwise
609 * it'll happily send us to sleep. In any case, when
610 * get_chip returns success we're clear to go ahead.
611 */
612 int ret = spin_trylock(contender->mutex);
613 spin_unlock(&shared->lock);
614 if (!ret)
615 goto retry;
616 spin_unlock(chip->mutex);
617 ret = get_chip(map, contender, contender->start, mode);
618 spin_lock(chip->mutex);
619 if (ret) {
620 spin_unlock(contender->mutex);
621 return ret;
622 }
623 timeo = jiffies + HZ;
624 spin_lock(&shared->lock);
625 }
626
627 /* We now own it */
628 shared->writing = chip;
629 if (mode == FL_ERASING)
630 shared->erasing = chip;
631 if (contender && contender != chip)
632 spin_unlock(contender->mutex);
633 spin_unlock(&shared->lock);
634 }
635
636 switch (chip->state) {
637
638 case FL_STATUS:
639 for (;;) {
640 status = map_read(map, adr);
641 if (map_word_andequal(map, status, status_OK, status_OK))
642 break;
643
644 /* At this point we're fine with write operations
645 in other partitions as they don't conflict. */
646 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
647 break;
648
649 if (time_after(jiffies, timeo)) {
650 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
651 status.x[0]);
652 return -EIO;
653 }
654 spin_unlock(chip->mutex);
655 cfi_udelay(1);
656 spin_lock(chip->mutex);
657 /* Someone else might have been playing with it. */
658 goto retry;
659 }
660
661 case FL_READY:
662 case FL_CFI_QUERY:
663 case FL_JEDEC_QUERY:
664 return 0;
665
666 case FL_ERASING:
667 if (!cfip ||
668 !(cfip->FeatureSupport & 2) ||
669 !(mode == FL_READY || mode == FL_POINT ||
670 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
671 goto sleep;
672
673
674 /* Erase suspend */
675 map_write(map, CMD(0xB0), adr);
676
677 /* If the flash has finished erasing, then 'erase suspend'
678 * appears to make some (28F320) flash devices switch to
679 * 'read' mode. Make sure that we switch to 'read status'
680 * mode so we get the right data. --rmk
681 */
682 map_write(map, CMD(0x70), adr);
683 chip->oldstate = FL_ERASING;
684 chip->state = FL_ERASE_SUSPENDING;
685 chip->erase_suspended = 1;
686 for (;;) {
687 status = map_read(map, adr);
688 if (map_word_andequal(map, status, status_OK, status_OK))
689 break;
690
691 if (time_after(jiffies, timeo)) {
692 /* Urgh. Resume and pretend we weren't here. */
693 map_write(map, CMD(0xd0), adr);
694 /* Make sure we're in 'read status' mode if it had finished */
695 map_write(map, CMD(0x70), adr);
696 chip->state = FL_ERASING;
697 chip->oldstate = FL_READY;
698 printk(KERN_ERR "Chip not ready after erase "
699 "suspended: status = 0x%lx\n", status.x[0]);
700 return -EIO;
701 }
702
703 spin_unlock(chip->mutex);
704 cfi_udelay(1);
705 spin_lock(chip->mutex);
706 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
707 So we can just loop here. */
708 }
709 chip->state = FL_STATUS;
710 return 0;
711
712 case FL_XIP_WHILE_ERASING:
713 if (mode != FL_READY && mode != FL_POINT &&
714 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
715 goto sleep;
716 chip->oldstate = chip->state;
717 chip->state = FL_READY;
718 return 0;
719
720 case FL_POINT:
721 /* Only if there's no operation suspended... */
722 if (mode == FL_READY && chip->oldstate == FL_READY)
723 return 0;
724
725 default:
726 sleep:
727 set_current_state(TASK_UNINTERRUPTIBLE);
728 add_wait_queue(&chip->wq, &wait);
729 spin_unlock(chip->mutex);
730 schedule();
731 remove_wait_queue(&chip->wq, &wait);
732 spin_lock(chip->mutex);
733 goto resettime;
734 }
735}
736
737static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
738{
739 struct cfi_private *cfi = map->fldrv_priv;
740
741 if (chip->priv) {
742 struct flchip_shared *shared = chip->priv;
743 spin_lock(&shared->lock);
744 if (shared->writing == chip && chip->oldstate == FL_READY) {
745 /* We own the ability to write, but we're done */
746 shared->writing = shared->erasing;
747 if (shared->writing && shared->writing != chip) {
748 /* give back ownership to who we loaned it from */
749 struct flchip *loaner = shared->writing;
750 spin_lock(loaner->mutex);
751 spin_unlock(&shared->lock);
752 spin_unlock(chip->mutex);
753 put_chip(map, loaner, loaner->start);
754 spin_lock(chip->mutex);
755 spin_unlock(loaner->mutex);
756 wake_up(&chip->wq);
757 return;
758 }
759 shared->erasing = NULL;
760 shared->writing = NULL;
761 } else if (shared->erasing == chip && shared->writing != chip) {
762 /*
763 * We own the ability to erase without the ability
764 * to write, which means the erase was suspended
765 * and some other partition is currently writing.
766 * Don't let the switch below mess things up since
767 * we don't have ownership to resume anything.
768 */
769 spin_unlock(&shared->lock);
770 wake_up(&chip->wq);
771 return;
772 }
773 spin_unlock(&shared->lock);
774 }
775
776 switch(chip->oldstate) {
777 case FL_ERASING:
778 chip->state = chip->oldstate;
779 /* What if one interleaved chip has finished and the
780 other hasn't? The old code would leave the finished
781 one in READY mode. That's bad, and caused -EROFS
782 errors to be returned from do_erase_oneblock because
783 that's the only bit it checked for at the time.
784 As the state machine appears to explicitly allow
785 sending the 0x70 (Read Status) command to an erasing
786 chip and expecting it to be ignored, that's what we
787 do. */
788 map_write(map, CMD(0xd0), adr);
789 map_write(map, CMD(0x70), adr);
790 chip->oldstate = FL_READY;
791 chip->state = FL_ERASING;
792 break;
793
794 case FL_XIP_WHILE_ERASING:
795 chip->state = chip->oldstate;
796 chip->oldstate = FL_READY;
797 break;
798
799 case FL_READY:
800 case FL_STATUS:
801 case FL_JEDEC_QUERY:
802 /* We should really make set_vpp() count, rather than doing this */
803 DISABLE_VPP(map);
804 break;
805 default:
806 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
807 }
808 wake_up(&chip->wq);
809}
810
811#ifdef CONFIG_MTD_XIP
812
813/*
814 * No interrupt what so ever can be serviced while the flash isn't in array
815 * mode. This is ensured by the xip_disable() and xip_enable() functions
816 * enclosing any code path where the flash is known not to be in array mode.
817 * And within a XIP disabled code path, only functions marked with __xipram
818 * may be called and nothing else (it's a good thing to inspect generated
819 * assembly to make sure inline functions were actually inlined and that gcc
820 * didn't emit calls to its own support functions). Also configuring MTD CFI
821 * support to a single buswidth and a single interleave is also recommended.
822 * Note that not only IRQs are disabled but the preemption count is also
823 * increased to prevent other locking primitives (namely spin_unlock) from
824 * decrementing the preempt count to zero and scheduling the CPU away while
825 * not in array mode.
826 */
827
828static void xip_disable(struct map_info *map, struct flchip *chip,
829 unsigned long adr)
830{
831 /* TODO: chips with no XIP use should ignore and return */
832 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
833 preempt_disable();
834 local_irq_disable();
835}
836
837static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
838 unsigned long adr)
839{
840 struct cfi_private *cfi = map->fldrv_priv;
841 if (chip->state != FL_POINT && chip->state != FL_READY) {
842 map_write(map, CMD(0xff), adr);
843 chip->state = FL_READY;
844 }
845 (void) map_read(map, adr);
846 asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
847 local_irq_enable();
848 preempt_enable();
849}
850
851/*
852 * When a delay is required for the flash operation to complete, the
853 * xip_udelay() function is polling for both the given timeout and pending
854 * (but still masked) hardware interrupts. Whenever there is an interrupt
855 * pending then the flash erase or write operation is suspended, array mode
856 * restored and interrupts unmasked. Task scheduling might also happen at that
857 * point. The CPU eventually returns from the interrupt or the call to
858 * schedule() and the suspended flash operation is resumed for the remaining
859 * of the delay period.
860 *
861 * Warning: this function _will_ fool interrupt latency tracing tools.
862 */
863
864static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
865 unsigned long adr, int usec)
866{
867 struct cfi_private *cfi = map->fldrv_priv;
868 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
869 map_word status, OK = CMD(0x80);
870 unsigned long suspended, start = xip_currtime();
871 flstate_t oldstate, newstate;
872
873 do {
874 cpu_relax();
875 if (xip_irqpending() && cfip &&
876 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
877 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
878 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
879 /*
880 * Let's suspend the erase or write operation when
881 * supported. Note that we currently don't try to
882 * suspend interleaved chips if there is already
883 * another operation suspended (imagine what happens
884 * when one chip was already done with the current
885 * operation while another chip suspended it, then
886 * we resume the whole thing at once). Yes, it
887 * can happen!
888 */
889 map_write(map, CMD(0xb0), adr);
890 map_write(map, CMD(0x70), adr);
891 usec -= xip_elapsed_since(start);
892 suspended = xip_currtime();
893 do {
894 if (xip_elapsed_since(suspended) > 100000) {
895 /*
896 * The chip doesn't want to suspend
897 * after waiting for 100 msecs.
898 * This is a critical error but there
899 * is not much we can do here.
900 */
901 return;
902 }
903 status = map_read(map, adr);
904 } while (!map_word_andequal(map, status, OK, OK));
905
906 /* Suspend succeeded */
907 oldstate = chip->state;
908 if (oldstate == FL_ERASING) {
909 if (!map_word_bitsset(map, status, CMD(0x40)))
910 break;
911 newstate = FL_XIP_WHILE_ERASING;
912 chip->erase_suspended = 1;
913 } else {
914 if (!map_word_bitsset(map, status, CMD(0x04)))
915 break;
916 newstate = FL_XIP_WHILE_WRITING;
917 chip->write_suspended = 1;
918 }
919 chip->state = newstate;
920 map_write(map, CMD(0xff), adr);
921 (void) map_read(map, adr);
922 asm volatile (".rep 8; nop; .endr");
923 local_irq_enable();
924 preempt_enable();
925 asm volatile (".rep 8; nop; .endr");
926 cond_resched();
927
928 /*
929 * We're back. However someone else might have
930 * decided to go write to the chip if we are in
931 * a suspended erase state. If so let's wait
932 * until it's done.
933 */
934 preempt_disable();
935 while (chip->state != newstate) {
936 DECLARE_WAITQUEUE(wait, current);
937 set_current_state(TASK_UNINTERRUPTIBLE);
938 add_wait_queue(&chip->wq, &wait);
939 preempt_enable();
940 schedule();
941 remove_wait_queue(&chip->wq, &wait);
942 preempt_disable();
943 }
944 /* Disallow XIP again */
945 local_irq_disable();
946
947 /* Resume the write or erase operation */
948 map_write(map, CMD(0xd0), adr);
949 map_write(map, CMD(0x70), adr);
950 chip->state = oldstate;
951 start = xip_currtime();
952 } else if (usec >= 1000000/HZ) {
953 /*
954 * Try to save on CPU power when waiting delay
955 * is at least a system timer tick period.
956 * No need to be extremely accurate here.
957 */
958 xip_cpu_idle();
959 }
960 status = map_read(map, adr);
961 } while (!map_word_andequal(map, status, OK, OK)
962 && xip_elapsed_since(start) < usec);
963}
964
965#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
966
967/*
968 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
969 * the flash is actively programming or erasing since we have to poll for
970 * the operation to complete anyway. We can't do that in a generic way with
971 * a XIP setup so do it before the actual flash operation in this case.
972 */
973#undef INVALIDATE_CACHED_RANGE
974#define INVALIDATE_CACHED_RANGE(x...)
975#define XIP_INVAL_CACHED_RANGE(map, from, size) \
976 do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
977
978/*
979 * Extra notes:
980 *
981 * Activating this XIP support changes the way the code works a bit. For
982 * example the code to suspend the current process when concurrent access
983 * happens is never executed because xip_udelay() will always return with the
984 * same chip state as it was entered with. This is why there is no care for
985 * the presence of add_wait_queue() or schedule() calls from within a couple
986 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
987 * The queueing and scheduling are always happening within xip_udelay().
988 *
989 * Similarly, get_chip() and put_chip() just happen to always be executed
990 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
991 * is in array mode, therefore never executing many cases therein and not
992 * causing any problem with XIP.
993 */
994
995#else
996
997#define xip_disable(map, chip, adr)
998#define xip_enable(map, chip, adr)
999
1000#define UDELAY(map, chip, adr, usec) cfi_udelay(usec)
1001
1002#define XIP_INVAL_CACHED_RANGE(x...)
1003
1004#endif
1005
1006static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1007{
1008 unsigned long cmd_addr;
1009 struct cfi_private *cfi = map->fldrv_priv;
1010 int ret = 0;
1011
1012 adr += chip->start;
1013
1014 /* Ensure cmd read/writes are aligned. */
1015 cmd_addr = adr & ~(map_bankwidth(map)-1);
1016
1017 spin_lock(chip->mutex);
1018
1019 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1020
1021 if (!ret) {
1022 if (chip->state != FL_POINT && chip->state != FL_READY)
1023 map_write(map, CMD(0xff), cmd_addr);
1024
1025 chip->state = FL_POINT;
1026 chip->ref_point_counter++;
1027 }
1028 spin_unlock(chip->mutex);
1029
1030 return ret;
1031}
1032
1033static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1034{
1035 struct map_info *map = mtd->priv;
1036 struct cfi_private *cfi = map->fldrv_priv;
1037 unsigned long ofs;
1038 int chipnum;
1039 int ret = 0;
1040
1041 if (!map->virt || (from + len > mtd->size))
1042 return -EINVAL;
1043
1044 *mtdbuf = (void *)map->virt + from;
1045 *retlen = 0;
1046
1047 /* Now lock the chip(s) to POINT state */
1048
1049 /* ofs: offset within the first chip that the first read should start */
1050 chipnum = (from >> cfi->chipshift);
1051 ofs = from - (chipnum << cfi->chipshift);
1052
1053 while (len) {
1054 unsigned long thislen;
1055
1056 if (chipnum >= cfi->numchips)
1057 break;
1058
1059 if ((len + ofs -1) >> cfi->chipshift)
1060 thislen = (1<<cfi->chipshift) - ofs;
1061 else
1062 thislen = len;
1063
1064 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1065 if (ret)
1066 break;
1067
1068 *retlen += thislen;
1069 len -= thislen;
1070
1071 ofs = 0;
1072 chipnum++;
1073 }
1074 return 0;
1075}
1076
1077static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1078{
1079 struct map_info *map = mtd->priv;
1080 struct cfi_private *cfi = map->fldrv_priv;
1081 unsigned long ofs;
1082 int chipnum;
1083
1084 /* Now unlock the chip(s) POINT state */
1085
1086 /* ofs: offset within the first chip that the first read should start */
1087 chipnum = (from >> cfi->chipshift);
1088 ofs = from - (chipnum << cfi->chipshift);
1089
1090 while (len) {
1091 unsigned long thislen;
1092 struct flchip *chip;
1093
1094 chip = &cfi->chips[chipnum];
1095 if (chipnum >= cfi->numchips)
1096 break;
1097
1098 if ((len + ofs -1) >> cfi->chipshift)
1099 thislen = (1<<cfi->chipshift) - ofs;
1100 else
1101 thislen = len;
1102
1103 spin_lock(chip->mutex);
1104 if (chip->state == FL_POINT) {
1105 chip->ref_point_counter--;
1106 if(chip->ref_point_counter == 0)
1107 chip->state = FL_READY;
1108 } else
1109 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1110
1111 put_chip(map, chip, chip->start);
1112 spin_unlock(chip->mutex);
1113
1114 len -= thislen;
1115 ofs = 0;
1116 chipnum++;
1117 }
1118}
1119
1120static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1121{
1122 unsigned long cmd_addr;
1123 struct cfi_private *cfi = map->fldrv_priv;
1124 int ret;
1125
1126 adr += chip->start;
1127
1128 /* Ensure cmd read/writes are aligned. */
1129 cmd_addr = adr & ~(map_bankwidth(map)-1);
1130
1131 spin_lock(chip->mutex);
1132 ret = get_chip(map, chip, cmd_addr, FL_READY);
1133 if (ret) {
1134 spin_unlock(chip->mutex);
1135 return ret;
1136 }
1137
1138 if (chip->state != FL_POINT && chip->state != FL_READY) {
1139 map_write(map, CMD(0xff), cmd_addr);
1140
1141 chip->state = FL_READY;
1142 }
1143
1144 map_copy_from(map, buf, adr, len);
1145
1146 put_chip(map, chip, cmd_addr);
1147
1148 spin_unlock(chip->mutex);
1149 return 0;
1150}
1151
1152static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1153{
1154 struct map_info *map = mtd->priv;
1155 struct cfi_private *cfi = map->fldrv_priv;
1156 unsigned long ofs;
1157 int chipnum;
1158 int ret = 0;
1159
1160 /* ofs: offset within the first chip that the first read should start */
1161 chipnum = (from >> cfi->chipshift);
1162 ofs = from - (chipnum << cfi->chipshift);
1163
1164 *retlen = 0;
1165
1166 while (len) {
1167 unsigned long thislen;
1168
1169 if (chipnum >= cfi->numchips)
1170 break;
1171
1172 if ((len + ofs -1) >> cfi->chipshift)
1173 thislen = (1<<cfi->chipshift) - ofs;
1174 else
1175 thislen = len;
1176
1177 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1178 if (ret)
1179 break;
1180
1181 *retlen += thislen;
1182 len -= thislen;
1183 buf += thislen;
1184
1185 ofs = 0;
1186 chipnum++;
1187 }
1188 return ret;
1189}
1190
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
Nicolas Pitref77814d2005-02-08 17:11:19 +00001192 unsigned long adr, map_word datum, int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193{
1194 struct cfi_private *cfi = map->fldrv_priv;
Nicolas Pitref77814d2005-02-08 17:11:19 +00001195 map_word status, status_OK, write_cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 unsigned long timeo;
1197 int z, ret=0;
1198
1199 adr += chip->start;
1200
1201 /* Let's determine this according to the interleave only once */
1202 status_OK = CMD(0x80);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001203 switch (mode) {
1204 case FL_WRITING: write_cmd = CMD(0x40); break;
1205 case FL_OTP_WRITE: write_cmd = CMD(0xc0); break;
1206 default: return -EINVAL;
1207 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208
1209 spin_lock(chip->mutex);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001210 ret = get_chip(map, chip, adr, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 if (ret) {
1212 spin_unlock(chip->mutex);
1213 return ret;
1214 }
1215
1216 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1217 ENABLE_VPP(map);
1218 xip_disable(map, chip, adr);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001219 map_write(map, write_cmd, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 map_write(map, datum, adr);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001221 chip->state = mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
1223 spin_unlock(chip->mutex);
1224 INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
1225 UDELAY(map, chip, adr, chip->word_write_time);
1226 spin_lock(chip->mutex);
1227
1228 timeo = jiffies + (HZ/2);
1229 z = 0;
1230 for (;;) {
Nicolas Pitref77814d2005-02-08 17:11:19 +00001231 if (chip->state != mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 /* Someone's suspended the write. Sleep */
1233 DECLARE_WAITQUEUE(wait, current);
1234
1235 set_current_state(TASK_UNINTERRUPTIBLE);
1236 add_wait_queue(&chip->wq, &wait);
1237 spin_unlock(chip->mutex);
1238 schedule();
1239 remove_wait_queue(&chip->wq, &wait);
1240 timeo = jiffies + (HZ / 2); /* FIXME */
1241 spin_lock(chip->mutex);
1242 continue;
1243 }
1244
1245 status = map_read(map, adr);
1246 if (map_word_andequal(map, status, status_OK, status_OK))
1247 break;
1248
1249 /* OK Still waiting */
1250 if (time_after(jiffies, timeo)) {
1251 chip->state = FL_STATUS;
1252 xip_enable(map, chip, adr);
1253 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1254 ret = -EIO;
1255 goto out;
1256 }
1257
1258 /* Latency issues. Drop the lock, wait a while and retry */
1259 spin_unlock(chip->mutex);
1260 z++;
1261 UDELAY(map, chip, adr, 1);
1262 spin_lock(chip->mutex);
1263 }
1264 if (!z) {
1265 chip->word_write_time--;
1266 if (!chip->word_write_time)
1267 chip->word_write_time++;
1268 }
1269 if (z > 1)
1270 chip->word_write_time++;
1271
1272 /* Done and happy. */
1273 chip->state = FL_STATUS;
1274
1275 /* check for lock bit */
1276 if (map_word_bitsset(map, status, CMD(0x02))) {
1277 /* clear status */
1278 map_write(map, CMD(0x50), adr);
1279 /* put back into read status register mode */
1280 map_write(map, CMD(0x70), adr);
1281 ret = -EROFS;
1282 }
1283
1284 xip_enable(map, chip, adr);
1285 out: put_chip(map, chip, adr);
1286 spin_unlock(chip->mutex);
1287
1288 return ret;
1289}
1290
1291
1292static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1293{
1294 struct map_info *map = mtd->priv;
1295 struct cfi_private *cfi = map->fldrv_priv;
1296 int ret = 0;
1297 int chipnum;
1298 unsigned long ofs;
1299
1300 *retlen = 0;
1301 if (!len)
1302 return 0;
1303
1304 chipnum = to >> cfi->chipshift;
1305 ofs = to - (chipnum << cfi->chipshift);
1306
1307 /* If it's not bus-aligned, do the first byte write */
1308 if (ofs & (map_bankwidth(map)-1)) {
1309 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1310 int gap = ofs - bus_ofs;
1311 int n;
1312 map_word datum;
1313
1314 n = min_t(int, len, map_bankwidth(map)-gap);
1315 datum = map_word_ff(map);
1316 datum = map_word_load_partial(map, datum, buf, gap, n);
1317
1318 ret = do_write_oneword(map, &cfi->chips[chipnum],
Nicolas Pitref77814d2005-02-08 17:11:19 +00001319 bus_ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 if (ret)
1321 return ret;
1322
1323 len -= n;
1324 ofs += n;
1325 buf += n;
1326 (*retlen) += n;
1327
1328 if (ofs >> cfi->chipshift) {
1329 chipnum ++;
1330 ofs = 0;
1331 if (chipnum == cfi->numchips)
1332 return 0;
1333 }
1334 }
1335
1336 while(len >= map_bankwidth(map)) {
1337 map_word datum = map_word_load(map, buf);
1338
1339 ret = do_write_oneword(map, &cfi->chips[chipnum],
Nicolas Pitref77814d2005-02-08 17:11:19 +00001340 ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 if (ret)
1342 return ret;
1343
1344 ofs += map_bankwidth(map);
1345 buf += map_bankwidth(map);
1346 (*retlen) += map_bankwidth(map);
1347 len -= map_bankwidth(map);
1348
1349 if (ofs >> cfi->chipshift) {
1350 chipnum ++;
1351 ofs = 0;
1352 if (chipnum == cfi->numchips)
1353 return 0;
1354 }
1355 }
1356
1357 if (len & (map_bankwidth(map)-1)) {
1358 map_word datum;
1359
1360 datum = map_word_ff(map);
1361 datum = map_word_load_partial(map, datum, buf, 0, len);
1362
1363 ret = do_write_oneword(map, &cfi->chips[chipnum],
Nicolas Pitref77814d2005-02-08 17:11:19 +00001364 ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 if (ret)
1366 return ret;
1367
1368 (*retlen) += len;
1369 }
1370
1371 return 0;
1372}
1373
1374
1375static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1376 unsigned long adr, const u_char *buf, int len)
1377{
1378 struct cfi_private *cfi = map->fldrv_priv;
1379 map_word status, status_OK;
1380 unsigned long cmd_adr, timeo;
1381 int wbufsize, z, ret=0, bytes, words;
1382
1383 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1384 adr += chip->start;
1385 cmd_adr = adr & ~(wbufsize-1);
1386
1387 /* Let's determine this according to the interleave only once */
1388 status_OK = CMD(0x80);
1389
1390 spin_lock(chip->mutex);
1391 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1392 if (ret) {
1393 spin_unlock(chip->mutex);
1394 return ret;
1395 }
1396
1397 XIP_INVAL_CACHED_RANGE(map, adr, len);
1398 ENABLE_VPP(map);
1399 xip_disable(map, chip, cmd_adr);
1400
1401 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1402 [...], the device will not accept any more Write to Buffer commands".
1403 So we must check here and reset those bits if they're set. Otherwise
1404 we're just pissing in the wind */
1405 if (chip->state != FL_STATUS)
1406 map_write(map, CMD(0x70), cmd_adr);
1407 status = map_read(map, cmd_adr);
1408 if (map_word_bitsset(map, status, CMD(0x30))) {
1409 xip_enable(map, chip, cmd_adr);
1410 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1411 xip_disable(map, chip, cmd_adr);
1412 map_write(map, CMD(0x50), cmd_adr);
1413 map_write(map, CMD(0x70), cmd_adr);
1414 }
1415
1416 chip->state = FL_WRITING_TO_BUFFER;
1417
1418 z = 0;
1419 for (;;) {
1420 map_write(map, CMD(0xe8), cmd_adr);
1421
1422 status = map_read(map, cmd_adr);
1423 if (map_word_andequal(map, status, status_OK, status_OK))
1424 break;
1425
1426 spin_unlock(chip->mutex);
1427 UDELAY(map, chip, cmd_adr, 1);
1428 spin_lock(chip->mutex);
1429
1430 if (++z > 20) {
1431 /* Argh. Not ready for write to buffer */
1432 map_word Xstatus;
1433 map_write(map, CMD(0x70), cmd_adr);
1434 chip->state = FL_STATUS;
1435 Xstatus = map_read(map, cmd_adr);
1436 /* Odd. Clear status bits */
1437 map_write(map, CMD(0x50), cmd_adr);
1438 map_write(map, CMD(0x70), cmd_adr);
1439 xip_enable(map, chip, cmd_adr);
1440 printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1441 status.x[0], Xstatus.x[0]);
1442 ret = -EIO;
1443 goto out;
1444 }
1445 }
1446
1447 /* Write length of data to come */
1448 bytes = len & (map_bankwidth(map)-1);
1449 words = len / map_bankwidth(map);
1450 map_write(map, CMD(words - !bytes), cmd_adr );
1451
1452 /* Write data */
1453 z = 0;
1454 while(z < words * map_bankwidth(map)) {
1455 map_word datum = map_word_load(map, buf);
1456 map_write(map, datum, adr+z);
1457
1458 z += map_bankwidth(map);
1459 buf += map_bankwidth(map);
1460 }
1461
1462 if (bytes) {
1463 map_word datum;
1464
1465 datum = map_word_ff(map);
1466 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1467 map_write(map, datum, adr+z);
1468 }
1469
1470 /* GO GO GO */
1471 map_write(map, CMD(0xd0), cmd_adr);
1472 chip->state = FL_WRITING;
1473
1474 spin_unlock(chip->mutex);
1475 INVALIDATE_CACHED_RANGE(map, adr, len);
1476 UDELAY(map, chip, cmd_adr, chip->buffer_write_time);
1477 spin_lock(chip->mutex);
1478
1479 timeo = jiffies + (HZ/2);
1480 z = 0;
1481 for (;;) {
1482 if (chip->state != FL_WRITING) {
1483 /* Someone's suspended the write. Sleep */
1484 DECLARE_WAITQUEUE(wait, current);
1485 set_current_state(TASK_UNINTERRUPTIBLE);
1486 add_wait_queue(&chip->wq, &wait);
1487 spin_unlock(chip->mutex);
1488 schedule();
1489 remove_wait_queue(&chip->wq, &wait);
1490 timeo = jiffies + (HZ / 2); /* FIXME */
1491 spin_lock(chip->mutex);
1492 continue;
1493 }
1494
1495 status = map_read(map, cmd_adr);
1496 if (map_word_andequal(map, status, status_OK, status_OK))
1497 break;
1498
1499 /* OK Still waiting */
1500 if (time_after(jiffies, timeo)) {
1501 chip->state = FL_STATUS;
1502 xip_enable(map, chip, cmd_adr);
1503 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1504 ret = -EIO;
1505 goto out;
1506 }
1507
1508 /* Latency issues. Drop the lock, wait a while and retry */
1509 spin_unlock(chip->mutex);
1510 UDELAY(map, chip, cmd_adr, 1);
1511 z++;
1512 spin_lock(chip->mutex);
1513 }
1514 if (!z) {
1515 chip->buffer_write_time--;
1516 if (!chip->buffer_write_time)
1517 chip->buffer_write_time++;
1518 }
1519 if (z > 1)
1520 chip->buffer_write_time++;
1521
1522 /* Done and happy. */
1523 chip->state = FL_STATUS;
1524
1525 /* check for lock bit */
1526 if (map_word_bitsset(map, status, CMD(0x02))) {
1527 /* clear status */
1528 map_write(map, CMD(0x50), cmd_adr);
1529 /* put back into read status register mode */
1530 map_write(map, CMD(0x70), adr);
1531 ret = -EROFS;
1532 }
1533
1534 xip_enable(map, chip, cmd_adr);
1535 out: put_chip(map, chip, cmd_adr);
1536 spin_unlock(chip->mutex);
1537 return ret;
1538}
1539
1540static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1541 size_t len, size_t *retlen, const u_char *buf)
1542{
1543 struct map_info *map = mtd->priv;
1544 struct cfi_private *cfi = map->fldrv_priv;
1545 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1546 int ret = 0;
1547 int chipnum;
1548 unsigned long ofs;
1549
1550 *retlen = 0;
1551 if (!len)
1552 return 0;
1553
1554 chipnum = to >> cfi->chipshift;
1555 ofs = to - (chipnum << cfi->chipshift);
1556
1557 /* If it's not bus-aligned, do the first word write */
1558 if (ofs & (map_bankwidth(map)-1)) {
1559 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1560 if (local_len > len)
1561 local_len = len;
1562 ret = cfi_intelext_write_words(mtd, to, local_len,
1563 retlen, buf);
1564 if (ret)
1565 return ret;
1566 ofs += local_len;
1567 buf += local_len;
1568 len -= local_len;
1569
1570 if (ofs >> cfi->chipshift) {
1571 chipnum ++;
1572 ofs = 0;
1573 if (chipnum == cfi->numchips)
1574 return 0;
1575 }
1576 }
1577
1578 while(len) {
1579 /* We must not cross write block boundaries */
1580 int size = wbufsize - (ofs & (wbufsize-1));
1581
1582 if (size > len)
1583 size = len;
1584 ret = do_write_buffer(map, &cfi->chips[chipnum],
1585 ofs, buf, size);
1586 if (ret)
1587 return ret;
1588
1589 ofs += size;
1590 buf += size;
1591 (*retlen) += size;
1592 len -= size;
1593
1594 if (ofs >> cfi->chipshift) {
1595 chipnum ++;
1596 ofs = 0;
1597 if (chipnum == cfi->numchips)
1598 return 0;
1599 }
1600 }
1601 return 0;
1602}
1603
1604static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1605 unsigned long adr, int len, void *thunk)
1606{
1607 struct cfi_private *cfi = map->fldrv_priv;
1608 map_word status, status_OK;
1609 unsigned long timeo;
1610 int retries = 3;
1611 DECLARE_WAITQUEUE(wait, current);
1612 int ret = 0;
1613
1614 adr += chip->start;
1615
1616 /* Let's determine this according to the interleave only once */
1617 status_OK = CMD(0x80);
1618
1619 retry:
1620 spin_lock(chip->mutex);
1621 ret = get_chip(map, chip, adr, FL_ERASING);
1622 if (ret) {
1623 spin_unlock(chip->mutex);
1624 return ret;
1625 }
1626
1627 XIP_INVAL_CACHED_RANGE(map, adr, len);
1628 ENABLE_VPP(map);
1629 xip_disable(map, chip, adr);
1630
1631 /* Clear the status register first */
1632 map_write(map, CMD(0x50), adr);
1633
1634 /* Now erase */
1635 map_write(map, CMD(0x20), adr);
1636 map_write(map, CMD(0xD0), adr);
1637 chip->state = FL_ERASING;
1638 chip->erase_suspended = 0;
1639
1640 spin_unlock(chip->mutex);
1641 INVALIDATE_CACHED_RANGE(map, adr, len);
1642 UDELAY(map, chip, adr, chip->erase_time*1000/2);
1643 spin_lock(chip->mutex);
1644
1645 /* FIXME. Use a timer to check this, and return immediately. */
1646 /* Once the state machine's known to be working I'll do that */
1647
1648 timeo = jiffies + (HZ*20);
1649 for (;;) {
1650 if (chip->state != FL_ERASING) {
1651 /* Someone's suspended the erase. Sleep */
1652 set_current_state(TASK_UNINTERRUPTIBLE);
1653 add_wait_queue(&chip->wq, &wait);
1654 spin_unlock(chip->mutex);
1655 schedule();
1656 remove_wait_queue(&chip->wq, &wait);
1657 spin_lock(chip->mutex);
1658 continue;
1659 }
1660 if (chip->erase_suspended) {
1661 /* This erase was suspended and resumed.
1662 Adjust the timeout */
1663 timeo = jiffies + (HZ*20); /* FIXME */
1664 chip->erase_suspended = 0;
1665 }
1666
1667 status = map_read(map, adr);
1668 if (map_word_andequal(map, status, status_OK, status_OK))
1669 break;
1670
1671 /* OK Still waiting */
1672 if (time_after(jiffies, timeo)) {
1673 map_word Xstatus;
1674 map_write(map, CMD(0x70), adr);
1675 chip->state = FL_STATUS;
1676 Xstatus = map_read(map, adr);
1677 /* Clear status bits */
1678 map_write(map, CMD(0x50), adr);
1679 map_write(map, CMD(0x70), adr);
1680 xip_enable(map, chip, adr);
1681 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1682 adr, status.x[0], Xstatus.x[0]);
1683 ret = -EIO;
1684 goto out;
1685 }
1686
1687 /* Latency issues. Drop the lock, wait a while and retry */
1688 spin_unlock(chip->mutex);
1689 UDELAY(map, chip, adr, 1000000/HZ);
1690 spin_lock(chip->mutex);
1691 }
1692
1693 /* We've broken this before. It doesn't hurt to be safe */
1694 map_write(map, CMD(0x70), adr);
1695 chip->state = FL_STATUS;
1696 status = map_read(map, adr);
1697
1698 /* check for lock bit */
1699 if (map_word_bitsset(map, status, CMD(0x3a))) {
Thomas Gleixner3a700252005-03-15 19:07:21 +00001700 unsigned long chipstatus;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
1702 /* Reset the error bits */
1703 map_write(map, CMD(0x50), adr);
1704 map_write(map, CMD(0x70), adr);
1705 xip_enable(map, chip, adr);
1706
Thomas Gleixner3a700252005-03-15 19:07:21 +00001707 chipstatus = MERGESTATUS(status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
1709 if ((chipstatus & 0x30) == 0x30) {
Thomas Gleixner50da7f62005-03-19 22:39:52 +00001710 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%lx\n", chipstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 ret = -EIO;
1712 } else if (chipstatus & 0x02) {
1713 /* Protection bit set */
1714 ret = -EROFS;
1715 } else if (chipstatus & 0x8) {
1716 /* Voltage */
Thomas Gleixner50da7f62005-03-19 22:39:52 +00001717 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%lx\n", chipstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 ret = -EIO;
1719 } else if (chipstatus & 0x20) {
1720 if (retries--) {
Thomas Gleixner50da7f62005-03-19 22:39:52 +00001721 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 timeo = jiffies + HZ;
1723 put_chip(map, chip, adr);
1724 spin_unlock(chip->mutex);
1725 goto retry;
1726 }
Thomas Gleixner50da7f62005-03-19 22:39:52 +00001727 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx\n", adr, chipstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 ret = -EIO;
1729 }
1730 } else {
1731 xip_enable(map, chip, adr);
1732 ret = 0;
1733 }
1734
1735 out: put_chip(map, chip, adr);
1736 spin_unlock(chip->mutex);
1737 return ret;
1738}
1739
1740int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1741{
1742 unsigned long ofs, len;
1743 int ret;
1744
1745 ofs = instr->addr;
1746 len = instr->len;
1747
1748 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1749 if (ret)
1750 return ret;
1751
1752 instr->state = MTD_ERASE_DONE;
1753 mtd_erase_callback(instr);
1754
1755 return 0;
1756}
1757
1758static void cfi_intelext_sync (struct mtd_info *mtd)
1759{
1760 struct map_info *map = mtd->priv;
1761 struct cfi_private *cfi = map->fldrv_priv;
1762 int i;
1763 struct flchip *chip;
1764 int ret = 0;
1765
1766 for (i=0; !ret && i<cfi->numchips; i++) {
1767 chip = &cfi->chips[i];
1768
1769 spin_lock(chip->mutex);
1770 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1771
1772 if (!ret) {
1773 chip->oldstate = chip->state;
1774 chip->state = FL_SYNCING;
1775 /* No need to wake_up() on this state change -
1776 * as the whole point is that nobody can do anything
1777 * with the chip now anyway.
1778 */
1779 }
1780 spin_unlock(chip->mutex);
1781 }
1782
1783 /* Unlock the chips again */
1784
1785 for (i--; i >=0; i--) {
1786 chip = &cfi->chips[i];
1787
1788 spin_lock(chip->mutex);
1789
1790 if (chip->state == FL_SYNCING) {
1791 chip->state = chip->oldstate;
Nicolas Pitre09c79332005-03-16 22:41:09 +00001792 chip->oldstate = FL_READY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 wake_up(&chip->wq);
1794 }
1795 spin_unlock(chip->mutex);
1796 }
1797}
1798
1799#ifdef DEBUG_LOCK_BITS
1800static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1801 struct flchip *chip,
1802 unsigned long adr,
1803 int len, void *thunk)
1804{
1805 struct cfi_private *cfi = map->fldrv_priv;
1806 int status, ofs_factor = cfi->interleave * cfi->device_type;
1807
1808 xip_disable(map, chip, adr+(2*ofs_factor));
1809 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1810 chip->state = FL_JEDEC_QUERY;
1811 status = cfi_read_query(map, adr+(2*ofs_factor));
1812 xip_enable(map, chip, 0);
1813 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1814 adr, status);
1815 return 0;
1816}
1817#endif
1818
1819#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1820#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1821
1822static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1823 unsigned long adr, int len, void *thunk)
1824{
1825 struct cfi_private *cfi = map->fldrv_priv;
Todd Poynor9a6e73e2005-03-29 23:06:40 +01001826 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 map_word status, status_OK;
1828 unsigned long timeo = jiffies + HZ;
1829 int ret;
1830
1831 adr += chip->start;
1832
1833 /* Let's determine this according to the interleave only once */
1834 status_OK = CMD(0x80);
1835
1836 spin_lock(chip->mutex);
1837 ret = get_chip(map, chip, adr, FL_LOCKING);
1838 if (ret) {
1839 spin_unlock(chip->mutex);
1840 return ret;
1841 }
1842
1843 ENABLE_VPP(map);
1844 xip_disable(map, chip, adr);
1845
1846 map_write(map, CMD(0x60), adr);
1847 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1848 map_write(map, CMD(0x01), adr);
1849 chip->state = FL_LOCKING;
1850 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1851 map_write(map, CMD(0xD0), adr);
1852 chip->state = FL_UNLOCKING;
1853 } else
1854 BUG();
1855
Todd Poynor9a6e73e2005-03-29 23:06:40 +01001856 /*
1857 * If Instant Individual Block Locking supported then no need
1858 * to delay.
1859 */
1860
1861 if (!extp || !(extp->FeatureSupport & (1 << 5))) {
1862 spin_unlock(chip->mutex);
1863 UDELAY(map, chip, adr, 1000000/HZ);
1864 spin_lock(chip->mutex);
1865 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866
1867 /* FIXME. Use a timer to check this, and return immediately. */
1868 /* Once the state machine's known to be working I'll do that */
1869
1870 timeo = jiffies + (HZ*20);
1871 for (;;) {
1872
1873 status = map_read(map, adr);
1874 if (map_word_andequal(map, status, status_OK, status_OK))
1875 break;
1876
1877 /* OK Still waiting */
1878 if (time_after(jiffies, timeo)) {
1879 map_word Xstatus;
1880 map_write(map, CMD(0x70), adr);
1881 chip->state = FL_STATUS;
1882 Xstatus = map_read(map, adr);
1883 xip_enable(map, chip, adr);
1884 printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1885 status.x[0], Xstatus.x[0]);
1886 put_chip(map, chip, adr);
1887 spin_unlock(chip->mutex);
1888 return -EIO;
1889 }
1890
1891 /* Latency issues. Drop the lock, wait a while and retry */
1892 spin_unlock(chip->mutex);
1893 UDELAY(map, chip, adr, 1);
1894 spin_lock(chip->mutex);
1895 }
1896
1897 /* Done and happy. */
1898 chip->state = FL_STATUS;
1899 xip_enable(map, chip, adr);
1900 put_chip(map, chip, adr);
1901 spin_unlock(chip->mutex);
1902 return 0;
1903}
1904
1905static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1906{
1907 int ret;
1908
1909#ifdef DEBUG_LOCK_BITS
1910 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1911 __FUNCTION__, ofs, len);
1912 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1913 ofs, len, 0);
1914#endif
1915
1916 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1917 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1918
1919#ifdef DEBUG_LOCK_BITS
1920 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1921 __FUNCTION__, ret);
1922 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1923 ofs, len, 0);
1924#endif
1925
1926 return ret;
1927}
1928
1929static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1930{
1931 int ret;
1932
1933#ifdef DEBUG_LOCK_BITS
1934 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1935 __FUNCTION__, ofs, len);
1936 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1937 ofs, len, 0);
1938#endif
1939
1940 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1941 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1942
1943#ifdef DEBUG_LOCK_BITS
1944 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1945 __FUNCTION__, ret);
1946 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1947 ofs, len, 0);
1948#endif
1949
1950 return ret;
1951}
1952
Nicolas Pitref77814d2005-02-08 17:11:19 +00001953#ifdef CONFIG_MTD_OTP
1954
1955typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1956 u_long data_offset, u_char *buf, u_int size,
1957 u_long prot_offset, u_int groupno, u_int groupsize);
1958
1959static int __xipram
1960do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1961 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1962{
1963 struct cfi_private *cfi = map->fldrv_priv;
1964 int ret;
1965
1966 spin_lock(chip->mutex);
1967 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1968 if (ret) {
1969 spin_unlock(chip->mutex);
1970 return ret;
1971 }
1972
1973 /* let's ensure we're not reading back cached data from array mode */
1974 if (map->inval_cache)
1975 map->inval_cache(map, chip->start + offset, size);
1976
1977 xip_disable(map, chip, chip->start);
1978 if (chip->state != FL_JEDEC_QUERY) {
1979 map_write(map, CMD(0x90), chip->start);
1980 chip->state = FL_JEDEC_QUERY;
1981 }
1982 map_copy_from(map, buf, chip->start + offset, size);
1983 xip_enable(map, chip, chip->start);
1984
1985 /* then ensure we don't keep OTP data in the cache */
1986 if (map->inval_cache)
1987 map->inval_cache(map, chip->start + offset, size);
1988
1989 put_chip(map, chip, chip->start);
1990 spin_unlock(chip->mutex);
1991 return 0;
1992}
1993
1994static int
1995do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
1996 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1997{
1998 int ret;
1999
2000 while (size) {
2001 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2002 int gap = offset - bus_ofs;
2003 int n = min_t(int, size, map_bankwidth(map)-gap);
2004 map_word datum = map_word_ff(map);
2005
2006 datum = map_word_load_partial(map, datum, buf, gap, n);
2007 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2008 if (ret)
2009 return ret;
2010
2011 offset += n;
2012 buf += n;
2013 size -= n;
2014 }
2015
2016 return 0;
2017}
2018
2019static int
2020do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2021 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2022{
2023 struct cfi_private *cfi = map->fldrv_priv;
2024 map_word datum;
2025
2026 /* make sure area matches group boundaries */
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002027 if (size != grpsz)
Nicolas Pitref77814d2005-02-08 17:11:19 +00002028 return -EXDEV;
2029
2030 datum = map_word_ff(map);
2031 datum = map_word_clr(map, datum, CMD(1 << grpno));
2032 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2033}
2034
2035static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2036 size_t *retlen, u_char *buf,
2037 otp_op_t action, int user_regs)
2038{
2039 struct map_info *map = mtd->priv;
2040 struct cfi_private *cfi = map->fldrv_priv;
2041 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2042 struct flchip *chip;
2043 struct cfi_intelext_otpinfo *otp;
2044 u_long devsize, reg_prot_offset, data_offset;
2045 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2046 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2047 int ret;
2048
2049 *retlen = 0;
2050
2051 /* Check that we actually have some OTP registers */
2052 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2053 return -ENODATA;
2054
2055 /* we need real chips here not virtual ones */
2056 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2057 chip_step = devsize >> cfi->chipshift;
2058
2059 for (chip_num = 0; chip_num < cfi->numchips; chip_num += chip_step) {
2060 chip = &cfi->chips[chip_num];
2061 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2062
2063 /* first OTP region */
2064 field = 0;
2065 reg_prot_offset = extp->ProtRegAddr;
2066 reg_fact_groups = 1;
2067 reg_fact_size = 1 << extp->FactProtRegSize;
2068 reg_user_groups = 1;
2069 reg_user_size = 1 << extp->UserProtRegSize;
2070
2071 while (len > 0) {
2072 /* flash geometry fixup */
2073 data_offset = reg_prot_offset + 1;
2074 data_offset *= cfi->interleave * cfi->device_type;
2075 reg_prot_offset *= cfi->interleave * cfi->device_type;
2076 reg_fact_size *= cfi->interleave;
2077 reg_user_size *= cfi->interleave;
2078
2079 if (user_regs) {
2080 groups = reg_user_groups;
2081 groupsize = reg_user_size;
2082 /* skip over factory reg area */
2083 groupno = reg_fact_groups;
2084 data_offset += reg_fact_groups * reg_fact_size;
2085 } else {
2086 groups = reg_fact_groups;
2087 groupsize = reg_fact_size;
2088 groupno = 0;
2089 }
2090
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002091 while (len > 0 && groups > 0) {
Nicolas Pitref77814d2005-02-08 17:11:19 +00002092 if (!action) {
2093 /*
2094 * Special case: if action is NULL
2095 * we fill buf with otp_info records.
2096 */
2097 struct otp_info *otpinfo;
2098 map_word lockword;
2099 len -= sizeof(struct otp_info);
2100 if (len <= 0)
2101 return -ENOSPC;
2102 ret = do_otp_read(map, chip,
2103 reg_prot_offset,
2104 (u_char *)&lockword,
2105 map_bankwidth(map),
2106 0, 0, 0);
2107 if (ret)
2108 return ret;
2109 otpinfo = (struct otp_info *)buf;
2110 otpinfo->start = from;
2111 otpinfo->length = groupsize;
2112 otpinfo->locked =
2113 !map_word_bitsset(map, lockword,
2114 CMD(1 << groupno));
2115 from += groupsize;
2116 buf += sizeof(*otpinfo);
2117 *retlen += sizeof(*otpinfo);
2118 } else if (from >= groupsize) {
2119 from -= groupsize;
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002120 data_offset += groupsize;
Nicolas Pitref77814d2005-02-08 17:11:19 +00002121 } else {
2122 int size = groupsize;
2123 data_offset += from;
2124 size -= from;
2125 from = 0;
2126 if (size > len)
2127 size = len;
2128 ret = action(map, chip, data_offset,
2129 buf, size, reg_prot_offset,
2130 groupno, groupsize);
2131 if (ret < 0)
2132 return ret;
2133 buf += size;
2134 len -= size;
2135 *retlen += size;
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002136 data_offset += size;
Nicolas Pitref77814d2005-02-08 17:11:19 +00002137 }
2138 groupno++;
2139 groups--;
2140 }
2141
2142 /* next OTP region */
2143 if (++field == extp->NumProtectionFields)
2144 break;
2145 reg_prot_offset = otp->ProtRegAddr;
2146 reg_fact_groups = otp->FactGroups;
2147 reg_fact_size = 1 << otp->FactProtRegSize;
2148 reg_user_groups = otp->UserGroups;
2149 reg_user_size = 1 << otp->UserProtRegSize;
2150 otp++;
2151 }
2152 }
2153
2154 return 0;
2155}
2156
2157static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2158 size_t len, size_t *retlen,
2159 u_char *buf)
2160{
2161 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2162 buf, do_otp_read, 0);
2163}
2164
2165static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2166 size_t len, size_t *retlen,
2167 u_char *buf)
2168{
2169 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2170 buf, do_otp_read, 1);
2171}
2172
2173static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2174 size_t len, size_t *retlen,
2175 u_char *buf)
2176{
2177 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2178 buf, do_otp_write, 1);
2179}
2180
2181static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2182 loff_t from, size_t len)
2183{
2184 size_t retlen;
2185 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2186 NULL, do_otp_lock, 1);
2187}
2188
2189static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2190 struct otp_info *buf, size_t len)
2191{
2192 size_t retlen;
2193 int ret;
2194
2195 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2196 return ret ? : retlen;
2197}
2198
2199static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2200 struct otp_info *buf, size_t len)
2201{
2202 size_t retlen;
2203 int ret;
2204
2205 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2206 return ret ? : retlen;
2207}
2208
2209#endif
2210
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211static int cfi_intelext_suspend(struct mtd_info *mtd)
2212{
2213 struct map_info *map = mtd->priv;
2214 struct cfi_private *cfi = map->fldrv_priv;
2215 int i;
2216 struct flchip *chip;
2217 int ret = 0;
2218
2219 for (i=0; !ret && i<cfi->numchips; i++) {
2220 chip = &cfi->chips[i];
2221
2222 spin_lock(chip->mutex);
2223
2224 switch (chip->state) {
2225 case FL_READY:
2226 case FL_STATUS:
2227 case FL_CFI_QUERY:
2228 case FL_JEDEC_QUERY:
2229 if (chip->oldstate == FL_READY) {
2230 chip->oldstate = chip->state;
2231 chip->state = FL_PM_SUSPENDED;
2232 /* No need to wake_up() on this state change -
2233 * as the whole point is that nobody can do anything
2234 * with the chip now anyway.
2235 */
2236 } else {
2237 /* There seems to be an operation pending. We must wait for it. */
2238 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2239 ret = -EAGAIN;
2240 }
2241 break;
2242 default:
2243 /* Should we actually wait? Once upon a time these routines weren't
2244 allowed to. Or should we return -EAGAIN, because the upper layers
2245 ought to have already shut down anything which was using the device
2246 anyway? The latter for now. */
2247 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2248 ret = -EAGAIN;
2249 case FL_PM_SUSPENDED:
2250 break;
2251 }
2252 spin_unlock(chip->mutex);
2253 }
2254
2255 /* Unlock the chips again */
2256
2257 if (ret) {
2258 for (i--; i >=0; i--) {
2259 chip = &cfi->chips[i];
2260
2261 spin_lock(chip->mutex);
2262
2263 if (chip->state == FL_PM_SUSPENDED) {
2264 /* No need to force it into a known state here,
2265 because we're returning failure, and it didn't
2266 get power cycled */
2267 chip->state = chip->oldstate;
2268 chip->oldstate = FL_READY;
2269 wake_up(&chip->wq);
2270 }
2271 spin_unlock(chip->mutex);
2272 }
2273 }
2274
2275 return ret;
2276}
2277
2278static void cfi_intelext_resume(struct mtd_info *mtd)
2279{
2280 struct map_info *map = mtd->priv;
2281 struct cfi_private *cfi = map->fldrv_priv;
2282 int i;
2283 struct flchip *chip;
2284
2285 for (i=0; i<cfi->numchips; i++) {
2286
2287 chip = &cfi->chips[i];
2288
2289 spin_lock(chip->mutex);
2290
2291 /* Go to known state. Chip may have been power cycled */
2292 if (chip->state == FL_PM_SUSPENDED) {
2293 map_write(map, CMD(0xFF), cfi->chips[i].start);
2294 chip->oldstate = chip->state = FL_READY;
2295 wake_up(&chip->wq);
2296 }
2297
2298 spin_unlock(chip->mutex);
2299 }
2300}
2301
2302static void cfi_intelext_destroy(struct mtd_info *mtd)
2303{
2304 struct map_info *map = mtd->priv;
2305 struct cfi_private *cfi = map->fldrv_priv;
2306 kfree(cfi->cmdset_priv);
2307 kfree(cfi->cfiq);
2308 kfree(cfi->chips[0].priv);
2309 kfree(cfi);
2310 kfree(mtd->eraseregions);
2311}
2312
2313static char im_name_1[]="cfi_cmdset_0001";
2314static char im_name_3[]="cfi_cmdset_0003";
2315
2316static int __init cfi_intelext_init(void)
2317{
2318 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2319 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2320 return 0;
2321}
2322
2323static void __exit cfi_intelext_exit(void)
2324{
2325 inter_module_unregister(im_name_1);
2326 inter_module_unregister(im_name_3);
2327}
2328
2329module_init(cfi_intelext_init);
2330module_exit(cfi_intelext_exit);
2331
2332MODULE_LICENSE("GPL");
2333MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2334MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");