blob: 10c50604bcd5a18f8889c17f2e6a25bd784a8d14 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
Nicolas Pitre638d9832005-08-06 05:40:46 +01007 * $Id: cfi_cmdset_0001.c,v 1.182 2005/08/06 04:40:41 nico Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
Nicolas Pitre963a6fb2005-04-01 02:59:56 +010032#include <linux/reboot.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/mtd/xip.h>
34#include <linux/mtd/map.h>
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/compatmac.h>
37#include <linux/mtd/cfi.h>
38
39/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42// debugging, turns off buffer write mode if set to 1
43#define FORCE_WORD_WRITE 0
44
45#define MANUFACTURER_INTEL 0x0089
46#define I82802AB 0x00ad
47#define I82802AC 0x00ac
48#define MANUFACTURER_ST 0x0020
49#define M50LPW080 0x002F
50
51static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
55static void cfi_intelext_sync (struct mtd_info *);
56static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
57static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
Todd Poynor8048d2f2005-03-31 00:57:33 +010058#ifdef CONFIG_MTD_OTP
Nicolas Pitref77814d2005-02-08 17:11:19 +000059static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
60static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
63static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
64 struct otp_info *, size_t);
65static int cfi_intelext_get_user_prot_info (struct mtd_info *,
66 struct otp_info *, size_t);
Todd Poynor8048d2f2005-03-31 00:57:33 +010067#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070068static int cfi_intelext_suspend (struct mtd_info *);
69static void cfi_intelext_resume (struct mtd_info *);
Nicolas Pitre963a6fb2005-04-01 02:59:56 +010070static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
72static void cfi_intelext_destroy(struct mtd_info *);
73
74struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
75
76static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
77static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
78
79static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
80 size_t *retlen, u_char **mtdbuf);
81static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
82 size_t len);
83
84static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
85static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
86#include "fwh_lock.h"
87
88
89
90/*
91 * *********** SETUP AND PROBE BITS ***********
92 */
93
94static struct mtd_chip_driver cfi_intelext_chipdrv = {
95 .probe = NULL, /* Not usable directly */
96 .destroy = cfi_intelext_destroy,
97 .name = "cfi_cmdset_0001",
98 .module = THIS_MODULE
99};
100
101/* #define DEBUG_LOCK_BITS */
102/* #define DEBUG_CFI_FEATURES */
103
104#ifdef DEBUG_CFI_FEATURES
105static void cfi_tell_features(struct cfi_pri_intelext *extp)
106{
107 int i;
Nicolas Pitre638d9832005-08-06 05:40:46 +0100108 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
110 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
111 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
112 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
113 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
114 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
115 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
116 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
117 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
118 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
119 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
Nicolas Pitre638d9832005-08-06 05:40:46 +0100120 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
121 for (i=11; i<32; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 if (extp->FeatureSupport & (1<<i))
123 printk(" - Unknown Bit %X: supported\n", i);
124 }
125
126 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
127 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
128 for (i=1; i<8; i++) {
129 if (extp->SuspendCmdSupport & (1<<i))
130 printk(" - Unknown Bit %X: supported\n", i);
131 }
132
133 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
134 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
Nicolas Pitre638d9832005-08-06 05:40:46 +0100135 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
136 for (i=2; i<3; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 if (extp->BlkStatusRegMask & (1<<i))
138 printk(" - Unknown Bit %X Active: yes\n",i);
139 }
Nicolas Pitre638d9832005-08-06 05:40:46 +0100140 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
141 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
142 for (i=6; i<16; i++) {
143 if (extp->BlkStatusRegMask & (1<<i))
144 printk(" - Unknown Bit %X Active: yes\n",i);
145 }
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
148 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
149 if (extp->VppOptimal)
150 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
151 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
152}
153#endif
154
155#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
156/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
157static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
158{
159 struct map_info *map = mtd->priv;
160 struct cfi_private *cfi = map->fldrv_priv;
161 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
162
163 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
164 "erase on write disabled.\n");
165 extp->SuspendCmdSupport &= ~1;
166}
167#endif
168
169#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
170static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
171{
172 struct map_info *map = mtd->priv;
173 struct cfi_private *cfi = map->fldrv_priv;
174 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
175
176 if (cfip && (cfip->FeatureSupport&4)) {
177 cfip->FeatureSupport &= ~4;
178 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
179 }
180}
181#endif
182
183static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
184{
185 struct map_info *map = mtd->priv;
186 struct cfi_private *cfi = map->fldrv_priv;
187
188 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
189 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
190}
191
192static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
193{
194 struct map_info *map = mtd->priv;
195 struct cfi_private *cfi = map->fldrv_priv;
196
197 /* Note this is done after the region info is endian swapped */
198 cfi->cfiq->EraseRegionInfo[1] =
199 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
200};
201
202static void fixup_use_point(struct mtd_info *mtd, void *param)
203{
204 struct map_info *map = mtd->priv;
205 if (!mtd->point && map_is_linear(map)) {
206 mtd->point = cfi_intelext_point;
207 mtd->unpoint = cfi_intelext_unpoint;
208 }
209}
210
211static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
212{
213 struct map_info *map = mtd->priv;
214 struct cfi_private *cfi = map->fldrv_priv;
215 if (cfi->cfiq->BufWriteTimeoutTyp) {
216 printk(KERN_INFO "Using buffer write method\n" );
217 mtd->write = cfi_intelext_write_buffers;
218 }
219}
220
221static struct cfi_fixup cfi_fixup_table[] = {
222#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
223 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
224#endif
225#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
226 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
227#endif
228#if !FORCE_WORD_WRITE
229 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
230#endif
231 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
232 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
233 { 0, 0, NULL, NULL }
234};
235
236static struct cfi_fixup jedec_fixup_table[] = {
237 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
238 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
239 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
240 { 0, 0, NULL, NULL }
241};
242static struct cfi_fixup fixup_table[] = {
243 /* The CFI vendor ids and the JEDEC vendor IDs appear
244 * to be common. It is like the devices id's are as
245 * well. This table is to pick all cases where
246 * we know that is the case.
247 */
248 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
249 { 0, 0, NULL, NULL }
250};
251
252static inline struct cfi_pri_intelext *
253read_pri_intelext(struct map_info *map, __u16 adr)
254{
255 struct cfi_pri_intelext *extp;
256 unsigned int extp_size = sizeof(*extp);
257
258 again:
259 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
260 if (!extp)
261 return NULL;
262
Todd Poynord88f9772005-07-20 22:01:17 +0100263 if (extp->MajorVersion != '1' ||
Nicolas Pitre638d9832005-08-06 05:40:46 +0100264 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
Todd Poynord88f9772005-07-20 22:01:17 +0100265 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
266 "version %c.%c.\n", extp->MajorVersion,
267 extp->MinorVersion);
268 kfree(extp);
269 return NULL;
270 }
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Do some byteswapping if necessary */
273 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
274 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
275 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
276
Nicolas Pitre638d9832005-08-06 05:40:46 +0100277 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 unsigned int extra_size = 0;
279 int nb_parts, i;
280
281 /* Protection Register info */
Nicolas Pitre72b56a22005-02-05 02:06:19 +0000282 extra_size += (extp->NumProtectionFields - 1) *
283 sizeof(struct cfi_intelext_otpinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
285 /* Burst Read info */
Nicolas Pitre638d9832005-08-06 05:40:46 +0100286 extra_size += (extp->MinorVersion < '4') ? 6 : 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
288 /* Number of hardware-partitions */
289 extra_size += 1;
290 if (extp_size < sizeof(*extp) + extra_size)
291 goto need_more;
292 nb_parts = extp->extra[extra_size - 1];
293
Nicolas Pitre638d9832005-08-06 05:40:46 +0100294 /* skip the sizeof(partregion) field in CFI 1.4 */
295 if (extp->MinorVersion >= '4')
296 extra_size += 2;
297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 for (i = 0; i < nb_parts; i++) {
299 struct cfi_intelext_regioninfo *rinfo;
300 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
301 extra_size += sizeof(*rinfo);
302 if (extp_size < sizeof(*extp) + extra_size)
303 goto need_more;
304 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
305 extra_size += (rinfo->NumBlockTypes - 1)
306 * sizeof(struct cfi_intelext_blockinfo);
307 }
308
Nicolas Pitre638d9832005-08-06 05:40:46 +0100309 if (extp->MinorVersion >= '4')
310 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
311
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 if (extp_size < sizeof(*extp) + extra_size) {
313 need_more:
314 extp_size = sizeof(*extp) + extra_size;
315 kfree(extp);
316 if (extp_size > 4096) {
317 printk(KERN_ERR
318 "%s: cfi_pri_intelext is too fat\n",
319 __FUNCTION__);
320 return NULL;
321 }
322 goto again;
323 }
324 }
325
326 return extp;
327}
328
329/* This routine is made available to other mtd code via
330 * inter_module_register. It must only be accessed through
331 * inter_module_get which will bump the use count of this module. The
332 * addresses passed back in cfi are valid as long as the use count of
333 * this module is non-zero, i.e. between inter_module_get and
334 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
335 */
336struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
337{
338 struct cfi_private *cfi = map->fldrv_priv;
339 struct mtd_info *mtd;
340 int i;
341
342 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
343 if (!mtd) {
344 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
345 return NULL;
346 }
347 memset(mtd, 0, sizeof(*mtd));
348 mtd->priv = map;
349 mtd->type = MTD_NORFLASH;
350
351 /* Fill in the default mtd operations */
352 mtd->erase = cfi_intelext_erase_varsize;
353 mtd->read = cfi_intelext_read;
354 mtd->write = cfi_intelext_write_words;
355 mtd->sync = cfi_intelext_sync;
356 mtd->lock = cfi_intelext_lock;
357 mtd->unlock = cfi_intelext_unlock;
358 mtd->suspend = cfi_intelext_suspend;
359 mtd->resume = cfi_intelext_resume;
360 mtd->flags = MTD_CAP_NORFLASH;
361 mtd->name = map->name;
Nicolas Pitre963a6fb2005-04-01 02:59:56 +0100362
363 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
364
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 if (cfi->cfi_mode == CFI_MODE_CFI) {
366 /*
367 * It's a real CFI chip, not one for which the probe
368 * routine faked a CFI structure. So we read the feature
369 * table from it.
370 */
371 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
372 struct cfi_pri_intelext *extp;
373
374 extp = read_pri_intelext(map, adr);
375 if (!extp) {
376 kfree(mtd);
377 return NULL;
378 }
379
380 /* Install our own private info structure */
381 cfi->cmdset_priv = extp;
382
383 cfi_fixup(mtd, cfi_fixup_table);
384
385#ifdef DEBUG_CFI_FEATURES
386 /* Tell the user about it in lots of lovely detail */
387 cfi_tell_features(extp);
388#endif
389
390 if(extp->SuspendCmdSupport & 1) {
391 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
392 }
393 }
394 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
395 /* Apply jedec specific fixups */
396 cfi_fixup(mtd, jedec_fixup_table);
397 }
398 /* Apply generic fixups */
399 cfi_fixup(mtd, fixup_table);
400
401 for (i=0; i< cfi->numchips; i++) {
402 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
403 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
404 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
405 cfi->chips[i].ref_point_counter = 0;
406 }
407
408 map->fldrv = &cfi_intelext_chipdrv;
409
410 return cfi_intelext_setup(mtd);
411}
412
413static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
414{
415 struct map_info *map = mtd->priv;
416 struct cfi_private *cfi = map->fldrv_priv;
417 unsigned long offset = 0;
418 int i,j;
419 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
420
421 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
422
423 mtd->size = devsize * cfi->numchips;
424
425 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
426 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
427 * mtd->numeraseregions, GFP_KERNEL);
428 if (!mtd->eraseregions) {
429 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
430 goto setup_err;
431 }
432
433 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
434 unsigned long ernum, ersize;
435 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
436 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
437
438 if (mtd->erasesize < ersize) {
439 mtd->erasesize = ersize;
440 }
441 for (j=0; j<cfi->numchips; j++) {
442 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
443 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
444 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
445 }
446 offset += (ersize * ernum);
447 }
448
449 if (offset != devsize) {
450 /* Argh */
451 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
452 goto setup_err;
453 }
454
455 for (i=0; i<mtd->numeraseregions;i++){
Nicolas Pitre48436532005-08-06 05:16:52 +0100456 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 i,mtd->eraseregions[i].offset,
458 mtd->eraseregions[i].erasesize,
459 mtd->eraseregions[i].numblocks);
460 }
461
Nicolas Pitref77814d2005-02-08 17:11:19 +0000462#ifdef CONFIG_MTD_OTP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
Nicolas Pitref77814d2005-02-08 17:11:19 +0000464 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
465 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
466 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
467 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
468 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469#endif
470
471 /* This function has the potential to distort the reality
472 a bit and therefore should be called last. */
473 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
474 goto setup_err;
475
476 __module_get(THIS_MODULE);
Nicolas Pitre963a6fb2005-04-01 02:59:56 +0100477 register_reboot_notifier(&mtd->reboot_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 return mtd;
479
480 setup_err:
481 if(mtd) {
482 if(mtd->eraseregions)
483 kfree(mtd->eraseregions);
484 kfree(mtd);
485 }
486 kfree(cfi->cmdset_priv);
487 return NULL;
488}
489
490static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
491 struct cfi_private **pcfi)
492{
493 struct map_info *map = mtd->priv;
494 struct cfi_private *cfi = *pcfi;
495 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
496
497 /*
498 * Probing of multi-partition flash ships.
499 *
500 * To support multiple partitions when available, we simply arrange
501 * for each of them to have their own flchip structure even if they
502 * are on the same physical chip. This means completely recreating
503 * a new cfi_private structure right here which is a blatent code
504 * layering violation, but this is still the least intrusive
505 * arrangement at this point. This can be rearranged in the future
506 * if someone feels motivated enough. --nico
507 */
Nicolas Pitre638d9832005-08-06 05:40:46 +0100508 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 && extp->FeatureSupport & (1 << 9)) {
510 struct cfi_private *newcfi;
511 struct flchip *chip;
512 struct flchip_shared *shared;
513 int offs, numregions, numparts, partshift, numvirtchips, i, j;
514
515 /* Protection Register info */
Nicolas Pitre72b56a22005-02-05 02:06:19 +0000516 offs = (extp->NumProtectionFields - 1) *
517 sizeof(struct cfi_intelext_otpinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
519 /* Burst Read info */
Nicolas Pitre638d9832005-08-06 05:40:46 +0100520 offs += (extp->MinorVersion < '4') ? 6 : 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
522 /* Number of partition regions */
523 numregions = extp->extra[offs];
524 offs += 1;
525
Nicolas Pitre638d9832005-08-06 05:40:46 +0100526 /* skip the sizeof(partregion) field in CFI 1.4 */
527 if (extp->MinorVersion >= '4')
528 offs += 2;
529
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 /* Number of hardware partitions */
531 numparts = 0;
532 for (i = 0; i < numregions; i++) {
533 struct cfi_intelext_regioninfo *rinfo;
534 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
535 numparts += rinfo->NumIdentPartitions;
536 offs += sizeof(*rinfo)
537 + (rinfo->NumBlockTypes - 1) *
538 sizeof(struct cfi_intelext_blockinfo);
539 }
540
Nicolas Pitre638d9832005-08-06 05:40:46 +0100541 /* Programming Region info */
542 if (extp->MinorVersion >= '4') {
543 struct cfi_intelext_programming_regioninfo *prinfo;
544 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
545 MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
546 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
547 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
548 mtd->flags |= MTD_PROGRAM_REGIONS;
549 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
550 map->name, MTD_PROGREGION_SIZE(mtd),
551 MTD_PROGREGION_CTRLMODE_VALID(mtd),
552 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
553 }
554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 /*
556 * All functions below currently rely on all chips having
557 * the same geometry so we'll just assume that all hardware
558 * partitions are of the same size too.
559 */
560 partshift = cfi->chipshift - __ffs(numparts);
561
562 if ((1 << partshift) < mtd->erasesize) {
563 printk( KERN_ERR
564 "%s: bad number of hw partitions (%d)\n",
565 __FUNCTION__, numparts);
566 return -EINVAL;
567 }
568
569 numvirtchips = cfi->numchips * numparts;
570 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
571 if (!newcfi)
572 return -ENOMEM;
573 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
574 if (!shared) {
575 kfree(newcfi);
576 return -ENOMEM;
577 }
578 memcpy(newcfi, cfi, sizeof(struct cfi_private));
579 newcfi->numchips = numvirtchips;
580 newcfi->chipshift = partshift;
581
582 chip = &newcfi->chips[0];
583 for (i = 0; i < cfi->numchips; i++) {
584 shared[i].writing = shared[i].erasing = NULL;
585 spin_lock_init(&shared[i].lock);
586 for (j = 0; j < numparts; j++) {
587 *chip = cfi->chips[i];
588 chip->start += j << partshift;
589 chip->priv = &shared[i];
590 /* those should be reset too since
591 they create memory references. */
592 init_waitqueue_head(&chip->wq);
593 spin_lock_init(&chip->_spinlock);
594 chip->mutex = &chip->_spinlock;
595 chip++;
596 }
597 }
598
599 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
600 "--> %d partitions of %d KiB\n",
601 map->name, cfi->numchips, cfi->interleave,
602 newcfi->numchips, 1<<(newcfi->chipshift-10));
603
604 map->fldrv_priv = newcfi;
605 *pcfi = newcfi;
606 kfree(cfi);
607 }
608
609 return 0;
610}
611
612/*
613 * *********** CHIP ACCESS FUNCTIONS ***********
614 */
615
616static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
617{
618 DECLARE_WAITQUEUE(wait, current);
619 struct cfi_private *cfi = map->fldrv_priv;
620 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
621 unsigned long timeo;
622 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
623
624 resettime:
625 timeo = jiffies + HZ;
626 retry:
Nicolas Pitref77814d2005-02-08 17:11:19 +0000627 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 /*
629 * OK. We have possibility for contension on the write/erase
630 * operations which are global to the real chip and not per
631 * partition. So let's fight it over in the partition which
632 * currently has authority on the operation.
633 *
634 * The rules are as follows:
635 *
636 * - any write operation must own shared->writing.
637 *
638 * - any erase operation must own _both_ shared->writing and
639 * shared->erasing.
640 *
641 * - contension arbitration is handled in the owner's context.
642 *
643 * The 'shared' struct can be read when its lock is taken.
644 * However any writes to it can only be made when the current
645 * owner's lock is also held.
646 */
647 struct flchip_shared *shared = chip->priv;
648 struct flchip *contender;
649 spin_lock(&shared->lock);
650 contender = shared->writing;
651 if (contender && contender != chip) {
652 /*
653 * The engine to perform desired operation on this
654 * partition is already in use by someone else.
655 * Let's fight over it in the context of the chip
656 * currently using it. If it is possible to suspend,
657 * that other partition will do just that, otherwise
658 * it'll happily send us to sleep. In any case, when
659 * get_chip returns success we're clear to go ahead.
660 */
661 int ret = spin_trylock(contender->mutex);
662 spin_unlock(&shared->lock);
663 if (!ret)
664 goto retry;
665 spin_unlock(chip->mutex);
666 ret = get_chip(map, contender, contender->start, mode);
667 spin_lock(chip->mutex);
668 if (ret) {
669 spin_unlock(contender->mutex);
670 return ret;
671 }
672 timeo = jiffies + HZ;
673 spin_lock(&shared->lock);
674 }
675
676 /* We now own it */
677 shared->writing = chip;
678 if (mode == FL_ERASING)
679 shared->erasing = chip;
680 if (contender && contender != chip)
681 spin_unlock(contender->mutex);
682 spin_unlock(&shared->lock);
683 }
684
685 switch (chip->state) {
686
687 case FL_STATUS:
688 for (;;) {
689 status = map_read(map, adr);
690 if (map_word_andequal(map, status, status_OK, status_OK))
691 break;
692
693 /* At this point we're fine with write operations
694 in other partitions as they don't conflict. */
695 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
696 break;
697
698 if (time_after(jiffies, timeo)) {
Nicolas Pitre48436532005-08-06 05:16:52 +0100699 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
700 map->name, status.x[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 return -EIO;
702 }
703 spin_unlock(chip->mutex);
704 cfi_udelay(1);
705 spin_lock(chip->mutex);
706 /* Someone else might have been playing with it. */
707 goto retry;
708 }
709
710 case FL_READY:
711 case FL_CFI_QUERY:
712 case FL_JEDEC_QUERY:
713 return 0;
714
715 case FL_ERASING:
716 if (!cfip ||
717 !(cfip->FeatureSupport & 2) ||
718 !(mode == FL_READY || mode == FL_POINT ||
719 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
720 goto sleep;
721
722
723 /* Erase suspend */
724 map_write(map, CMD(0xB0), adr);
725
726 /* If the flash has finished erasing, then 'erase suspend'
727 * appears to make some (28F320) flash devices switch to
728 * 'read' mode. Make sure that we switch to 'read status'
729 * mode so we get the right data. --rmk
730 */
731 map_write(map, CMD(0x70), adr);
732 chip->oldstate = FL_ERASING;
733 chip->state = FL_ERASE_SUSPENDING;
734 chip->erase_suspended = 1;
735 for (;;) {
736 status = map_read(map, adr);
737 if (map_word_andequal(map, status, status_OK, status_OK))
738 break;
739
740 if (time_after(jiffies, timeo)) {
741 /* Urgh. Resume and pretend we weren't here. */
742 map_write(map, CMD(0xd0), adr);
743 /* Make sure we're in 'read status' mode if it had finished */
744 map_write(map, CMD(0x70), adr);
745 chip->state = FL_ERASING;
746 chip->oldstate = FL_READY;
Nicolas Pitre48436532005-08-06 05:16:52 +0100747 printk(KERN_ERR "%s: Chip not ready after erase "
748 "suspended: status = 0x%lx\n", map->name, status.x[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 return -EIO;
750 }
751
752 spin_unlock(chip->mutex);
753 cfi_udelay(1);
754 spin_lock(chip->mutex);
755 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
756 So we can just loop here. */
757 }
758 chip->state = FL_STATUS;
759 return 0;
760
761 case FL_XIP_WHILE_ERASING:
762 if (mode != FL_READY && mode != FL_POINT &&
763 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
764 goto sleep;
765 chip->oldstate = chip->state;
766 chip->state = FL_READY;
767 return 0;
768
769 case FL_POINT:
770 /* Only if there's no operation suspended... */
771 if (mode == FL_READY && chip->oldstate == FL_READY)
772 return 0;
773
774 default:
775 sleep:
776 set_current_state(TASK_UNINTERRUPTIBLE);
777 add_wait_queue(&chip->wq, &wait);
778 spin_unlock(chip->mutex);
779 schedule();
780 remove_wait_queue(&chip->wq, &wait);
781 spin_lock(chip->mutex);
782 goto resettime;
783 }
784}
785
786static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
787{
788 struct cfi_private *cfi = map->fldrv_priv;
789
790 if (chip->priv) {
791 struct flchip_shared *shared = chip->priv;
792 spin_lock(&shared->lock);
793 if (shared->writing == chip && chip->oldstate == FL_READY) {
794 /* We own the ability to write, but we're done */
795 shared->writing = shared->erasing;
796 if (shared->writing && shared->writing != chip) {
797 /* give back ownership to who we loaned it from */
798 struct flchip *loaner = shared->writing;
799 spin_lock(loaner->mutex);
800 spin_unlock(&shared->lock);
801 spin_unlock(chip->mutex);
802 put_chip(map, loaner, loaner->start);
803 spin_lock(chip->mutex);
804 spin_unlock(loaner->mutex);
805 wake_up(&chip->wq);
806 return;
807 }
808 shared->erasing = NULL;
809 shared->writing = NULL;
810 } else if (shared->erasing == chip && shared->writing != chip) {
811 /*
812 * We own the ability to erase without the ability
813 * to write, which means the erase was suspended
814 * and some other partition is currently writing.
815 * Don't let the switch below mess things up since
816 * we don't have ownership to resume anything.
817 */
818 spin_unlock(&shared->lock);
819 wake_up(&chip->wq);
820 return;
821 }
822 spin_unlock(&shared->lock);
823 }
824
825 switch(chip->oldstate) {
826 case FL_ERASING:
827 chip->state = chip->oldstate;
828 /* What if one interleaved chip has finished and the
829 other hasn't? The old code would leave the finished
830 one in READY mode. That's bad, and caused -EROFS
831 errors to be returned from do_erase_oneblock because
832 that's the only bit it checked for at the time.
833 As the state machine appears to explicitly allow
834 sending the 0x70 (Read Status) command to an erasing
835 chip and expecting it to be ignored, that's what we
836 do. */
837 map_write(map, CMD(0xd0), adr);
838 map_write(map, CMD(0x70), adr);
839 chip->oldstate = FL_READY;
840 chip->state = FL_ERASING;
841 break;
842
843 case FL_XIP_WHILE_ERASING:
844 chip->state = chip->oldstate;
845 chip->oldstate = FL_READY;
846 break;
847
848 case FL_READY:
849 case FL_STATUS:
850 case FL_JEDEC_QUERY:
851 /* We should really make set_vpp() count, rather than doing this */
852 DISABLE_VPP(map);
853 break;
854 default:
Nicolas Pitre48436532005-08-06 05:16:52 +0100855 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 }
857 wake_up(&chip->wq);
858}
859
860#ifdef CONFIG_MTD_XIP
861
862/*
863 * No interrupt what so ever can be serviced while the flash isn't in array
864 * mode. This is ensured by the xip_disable() and xip_enable() functions
865 * enclosing any code path where the flash is known not to be in array mode.
866 * And within a XIP disabled code path, only functions marked with __xipram
867 * may be called and nothing else (it's a good thing to inspect generated
868 * assembly to make sure inline functions were actually inlined and that gcc
869 * didn't emit calls to its own support functions). Also configuring MTD CFI
870 * support to a single buswidth and a single interleave is also recommended.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 */
872
873static void xip_disable(struct map_info *map, struct flchip *chip,
874 unsigned long adr)
875{
876 /* TODO: chips with no XIP use should ignore and return */
877 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 local_irq_disable();
879}
880
881static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
882 unsigned long adr)
883{
884 struct cfi_private *cfi = map->fldrv_priv;
885 if (chip->state != FL_POINT && chip->state != FL_READY) {
886 map_write(map, CMD(0xff), adr);
887 chip->state = FL_READY;
888 }
889 (void) map_read(map, adr);
Thomas Gleixner97f927a2005-07-07 16:50:16 +0200890 xip_iprefetch();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892}
893
894/*
895 * When a delay is required for the flash operation to complete, the
896 * xip_udelay() function is polling for both the given timeout and pending
897 * (but still masked) hardware interrupts. Whenever there is an interrupt
898 * pending then the flash erase or write operation is suspended, array mode
899 * restored and interrupts unmasked. Task scheduling might also happen at that
900 * point. The CPU eventually returns from the interrupt or the call to
901 * schedule() and the suspended flash operation is resumed for the remaining
902 * of the delay period.
903 *
904 * Warning: this function _will_ fool interrupt latency tracing tools.
905 */
906
907static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
908 unsigned long adr, int usec)
909{
910 struct cfi_private *cfi = map->fldrv_priv;
911 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
912 map_word status, OK = CMD(0x80);
913 unsigned long suspended, start = xip_currtime();
914 flstate_t oldstate, newstate;
915
916 do {
917 cpu_relax();
918 if (xip_irqpending() && cfip &&
919 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
920 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
921 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
922 /*
923 * Let's suspend the erase or write operation when
924 * supported. Note that we currently don't try to
925 * suspend interleaved chips if there is already
926 * another operation suspended (imagine what happens
927 * when one chip was already done with the current
928 * operation while another chip suspended it, then
929 * we resume the whole thing at once). Yes, it
930 * can happen!
931 */
932 map_write(map, CMD(0xb0), adr);
933 map_write(map, CMD(0x70), adr);
934 usec -= xip_elapsed_since(start);
935 suspended = xip_currtime();
936 do {
937 if (xip_elapsed_since(suspended) > 100000) {
938 /*
939 * The chip doesn't want to suspend
940 * after waiting for 100 msecs.
941 * This is a critical error but there
942 * is not much we can do here.
943 */
944 return;
945 }
946 status = map_read(map, adr);
947 } while (!map_word_andequal(map, status, OK, OK));
948
949 /* Suspend succeeded */
950 oldstate = chip->state;
951 if (oldstate == FL_ERASING) {
952 if (!map_word_bitsset(map, status, CMD(0x40)))
953 break;
954 newstate = FL_XIP_WHILE_ERASING;
955 chip->erase_suspended = 1;
956 } else {
957 if (!map_word_bitsset(map, status, CMD(0x04)))
958 break;
959 newstate = FL_XIP_WHILE_WRITING;
960 chip->write_suspended = 1;
961 }
962 chip->state = newstate;
963 map_write(map, CMD(0xff), adr);
964 (void) map_read(map, adr);
965 asm volatile (".rep 8; nop; .endr");
966 local_irq_enable();
Nicolas Pitre6da70122005-05-19 18:05:47 +0100967 spin_unlock(chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 asm volatile (".rep 8; nop; .endr");
969 cond_resched();
970
971 /*
972 * We're back. However someone else might have
973 * decided to go write to the chip if we are in
974 * a suspended erase state. If so let's wait
975 * until it's done.
976 */
Nicolas Pitre6da70122005-05-19 18:05:47 +0100977 spin_lock(chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 while (chip->state != newstate) {
979 DECLARE_WAITQUEUE(wait, current);
980 set_current_state(TASK_UNINTERRUPTIBLE);
981 add_wait_queue(&chip->wq, &wait);
Nicolas Pitre6da70122005-05-19 18:05:47 +0100982 spin_unlock(chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 schedule();
984 remove_wait_queue(&chip->wq, &wait);
Nicolas Pitre6da70122005-05-19 18:05:47 +0100985 spin_lock(chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 }
987 /* Disallow XIP again */
988 local_irq_disable();
989
990 /* Resume the write or erase operation */
991 map_write(map, CMD(0xd0), adr);
992 map_write(map, CMD(0x70), adr);
993 chip->state = oldstate;
994 start = xip_currtime();
995 } else if (usec >= 1000000/HZ) {
996 /*
997 * Try to save on CPU power when waiting delay
998 * is at least a system timer tick period.
999 * No need to be extremely accurate here.
1000 */
1001 xip_cpu_idle();
1002 }
1003 status = map_read(map, adr);
1004 } while (!map_word_andequal(map, status, OK, OK)
1005 && xip_elapsed_since(start) < usec);
1006}
1007
1008#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1009
1010/*
1011 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1012 * the flash is actively programming or erasing since we have to poll for
1013 * the operation to complete anyway. We can't do that in a generic way with
Nicolas Pitre6da70122005-05-19 18:05:47 +01001014 * a XIP setup so do it before the actual flash operation in this case
1015 * and stub it out from INVALIDATE_CACHE_UDELAY.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 */
Nicolas Pitre6da70122005-05-19 18:05:47 +01001017#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1018 INVALIDATE_CACHED_RANGE(map, from, size)
1019
1020#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1021 UDELAY(map, chip, adr, usec)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
1023/*
1024 * Extra notes:
1025 *
1026 * Activating this XIP support changes the way the code works a bit. For
1027 * example the code to suspend the current process when concurrent access
1028 * happens is never executed because xip_udelay() will always return with the
1029 * same chip state as it was entered with. This is why there is no care for
1030 * the presence of add_wait_queue() or schedule() calls from within a couple
1031 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1032 * The queueing and scheduling are always happening within xip_udelay().
1033 *
1034 * Similarly, get_chip() and put_chip() just happen to always be executed
1035 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1036 * is in array mode, therefore never executing many cases therein and not
1037 * causing any problem with XIP.
1038 */
1039
1040#else
1041
1042#define xip_disable(map, chip, adr)
1043#define xip_enable(map, chip, adr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044#define XIP_INVAL_CACHED_RANGE(x...)
1045
Nicolas Pitre6da70122005-05-19 18:05:47 +01001046#define UDELAY(map, chip, adr, usec) \
1047do { \
1048 spin_unlock(chip->mutex); \
1049 cfi_udelay(usec); \
1050 spin_lock(chip->mutex); \
1051} while (0)
1052
1053#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1054do { \
1055 spin_unlock(chip->mutex); \
1056 INVALIDATE_CACHED_RANGE(map, adr, len); \
1057 cfi_udelay(usec); \
1058 spin_lock(chip->mutex); \
1059} while (0)
1060
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061#endif
1062
1063static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1064{
1065 unsigned long cmd_addr;
1066 struct cfi_private *cfi = map->fldrv_priv;
1067 int ret = 0;
1068
1069 adr += chip->start;
1070
1071 /* Ensure cmd read/writes are aligned. */
1072 cmd_addr = adr & ~(map_bankwidth(map)-1);
1073
1074 spin_lock(chip->mutex);
1075
1076 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1077
1078 if (!ret) {
1079 if (chip->state != FL_POINT && chip->state != FL_READY)
1080 map_write(map, CMD(0xff), cmd_addr);
1081
1082 chip->state = FL_POINT;
1083 chip->ref_point_counter++;
1084 }
1085 spin_unlock(chip->mutex);
1086
1087 return ret;
1088}
1089
1090static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1091{
1092 struct map_info *map = mtd->priv;
1093 struct cfi_private *cfi = map->fldrv_priv;
1094 unsigned long ofs;
1095 int chipnum;
1096 int ret = 0;
1097
1098 if (!map->virt || (from + len > mtd->size))
1099 return -EINVAL;
1100
1101 *mtdbuf = (void *)map->virt + from;
1102 *retlen = 0;
1103
1104 /* Now lock the chip(s) to POINT state */
1105
1106 /* ofs: offset within the first chip that the first read should start */
1107 chipnum = (from >> cfi->chipshift);
1108 ofs = from - (chipnum << cfi->chipshift);
1109
1110 while (len) {
1111 unsigned long thislen;
1112
1113 if (chipnum >= cfi->numchips)
1114 break;
1115
1116 if ((len + ofs -1) >> cfi->chipshift)
1117 thislen = (1<<cfi->chipshift) - ofs;
1118 else
1119 thislen = len;
1120
1121 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1122 if (ret)
1123 break;
1124
1125 *retlen += thislen;
1126 len -= thislen;
1127
1128 ofs = 0;
1129 chipnum++;
1130 }
1131 return 0;
1132}
1133
1134static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1135{
1136 struct map_info *map = mtd->priv;
1137 struct cfi_private *cfi = map->fldrv_priv;
1138 unsigned long ofs;
1139 int chipnum;
1140
1141 /* Now unlock the chip(s) POINT state */
1142
1143 /* ofs: offset within the first chip that the first read should start */
1144 chipnum = (from >> cfi->chipshift);
1145 ofs = from - (chipnum << cfi->chipshift);
1146
1147 while (len) {
1148 unsigned long thislen;
1149 struct flchip *chip;
1150
1151 chip = &cfi->chips[chipnum];
1152 if (chipnum >= cfi->numchips)
1153 break;
1154
1155 if ((len + ofs -1) >> cfi->chipshift)
1156 thislen = (1<<cfi->chipshift) - ofs;
1157 else
1158 thislen = len;
1159
1160 spin_lock(chip->mutex);
1161 if (chip->state == FL_POINT) {
1162 chip->ref_point_counter--;
1163 if(chip->ref_point_counter == 0)
1164 chip->state = FL_READY;
1165 } else
Nicolas Pitre48436532005-08-06 05:16:52 +01001166 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
1168 put_chip(map, chip, chip->start);
1169 spin_unlock(chip->mutex);
1170
1171 len -= thislen;
1172 ofs = 0;
1173 chipnum++;
1174 }
1175}
1176
1177static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1178{
1179 unsigned long cmd_addr;
1180 struct cfi_private *cfi = map->fldrv_priv;
1181 int ret;
1182
1183 adr += chip->start;
1184
1185 /* Ensure cmd read/writes are aligned. */
1186 cmd_addr = adr & ~(map_bankwidth(map)-1);
1187
1188 spin_lock(chip->mutex);
1189 ret = get_chip(map, chip, cmd_addr, FL_READY);
1190 if (ret) {
1191 spin_unlock(chip->mutex);
1192 return ret;
1193 }
1194
1195 if (chip->state != FL_POINT && chip->state != FL_READY) {
1196 map_write(map, CMD(0xff), cmd_addr);
1197
1198 chip->state = FL_READY;
1199 }
1200
1201 map_copy_from(map, buf, adr, len);
1202
1203 put_chip(map, chip, cmd_addr);
1204
1205 spin_unlock(chip->mutex);
1206 return 0;
1207}
1208
1209static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1210{
1211 struct map_info *map = mtd->priv;
1212 struct cfi_private *cfi = map->fldrv_priv;
1213 unsigned long ofs;
1214 int chipnum;
1215 int ret = 0;
1216
1217 /* ofs: offset within the first chip that the first read should start */
1218 chipnum = (from >> cfi->chipshift);
1219 ofs = from - (chipnum << cfi->chipshift);
1220
1221 *retlen = 0;
1222
1223 while (len) {
1224 unsigned long thislen;
1225
1226 if (chipnum >= cfi->numchips)
1227 break;
1228
1229 if ((len + ofs -1) >> cfi->chipshift)
1230 thislen = (1<<cfi->chipshift) - ofs;
1231 else
1232 thislen = len;
1233
1234 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1235 if (ret)
1236 break;
1237
1238 *retlen += thislen;
1239 len -= thislen;
1240 buf += thislen;
1241
1242 ofs = 0;
1243 chipnum++;
1244 }
1245 return ret;
1246}
1247
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
Nicolas Pitref77814d2005-02-08 17:11:19 +00001249 unsigned long adr, map_word datum, int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250{
1251 struct cfi_private *cfi = map->fldrv_priv;
Nicolas Pitref77814d2005-02-08 17:11:19 +00001252 map_word status, status_OK, write_cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 unsigned long timeo;
1254 int z, ret=0;
1255
1256 adr += chip->start;
1257
Nicolas Pitre638d9832005-08-06 05:40:46 +01001258 /* Let's determine those according to the interleave only once */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 status_OK = CMD(0x80);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001260 switch (mode) {
Nicolas Pitre638d9832005-08-06 05:40:46 +01001261 case FL_WRITING:
1262 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1263 break;
1264 case FL_OTP_WRITE:
1265 write_cmd = CMD(0xc0);
1266 break;
1267 default:
1268 return -EINVAL;
Nicolas Pitref77814d2005-02-08 17:11:19 +00001269 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270
1271 spin_lock(chip->mutex);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001272 ret = get_chip(map, chip, adr, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 if (ret) {
1274 spin_unlock(chip->mutex);
1275 return ret;
1276 }
1277
1278 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1279 ENABLE_VPP(map);
1280 xip_disable(map, chip, adr);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001281 map_write(map, write_cmd, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 map_write(map, datum, adr);
Nicolas Pitref77814d2005-02-08 17:11:19 +00001283 chip->state = mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
Nicolas Pitre6da70122005-05-19 18:05:47 +01001285 INVALIDATE_CACHE_UDELAY(map, chip,
1286 adr, map_bankwidth(map),
1287 chip->word_write_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
1289 timeo = jiffies + (HZ/2);
1290 z = 0;
1291 for (;;) {
Nicolas Pitref77814d2005-02-08 17:11:19 +00001292 if (chip->state != mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 /* Someone's suspended the write. Sleep */
1294 DECLARE_WAITQUEUE(wait, current);
1295
1296 set_current_state(TASK_UNINTERRUPTIBLE);
1297 add_wait_queue(&chip->wq, &wait);
1298 spin_unlock(chip->mutex);
1299 schedule();
1300 remove_wait_queue(&chip->wq, &wait);
1301 timeo = jiffies + (HZ / 2); /* FIXME */
1302 spin_lock(chip->mutex);
1303 continue;
1304 }
1305
1306 status = map_read(map, adr);
1307 if (map_word_andequal(map, status, status_OK, status_OK))
1308 break;
1309
1310 /* OK Still waiting */
1311 if (time_after(jiffies, timeo)) {
Nicolas Pitre48436532005-08-06 05:16:52 +01001312 map_write(map, CMD(0x70), adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 chip->state = FL_STATUS;
1314 xip_enable(map, chip, adr);
Nicolas Pitre48436532005-08-06 05:16:52 +01001315 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 ret = -EIO;
1317 goto out;
1318 }
1319
1320 /* Latency issues. Drop the lock, wait a while and retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 z++;
1322 UDELAY(map, chip, adr, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 }
1324 if (!z) {
1325 chip->word_write_time--;
1326 if (!chip->word_write_time)
Nicolas Pitre48436532005-08-06 05:16:52 +01001327 chip->word_write_time = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 }
1329 if (z > 1)
1330 chip->word_write_time++;
1331
1332 /* Done and happy. */
1333 chip->state = FL_STATUS;
1334
Nicolas Pitre48436532005-08-06 05:16:52 +01001335 /* check for errors */
1336 if (map_word_bitsset(map, status, CMD(0x1a))) {
1337 unsigned long chipstatus = MERGESTATUS(status);
1338
1339 /* reset status */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 map_write(map, CMD(0x50), adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 map_write(map, CMD(0x70), adr);
Nicolas Pitre48436532005-08-06 05:16:52 +01001342 xip_enable(map, chip, adr);
1343
1344 if (chipstatus & 0x02) {
1345 ret = -EROFS;
1346 } else if (chipstatus & 0x08) {
1347 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1348 ret = -EIO;
1349 } else {
1350 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1351 ret = -EINVAL;
1352 }
1353
1354 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 }
1356
1357 xip_enable(map, chip, adr);
1358 out: put_chip(map, chip, adr);
1359 spin_unlock(chip->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 return ret;
1361}
1362
1363
1364static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1365{
1366 struct map_info *map = mtd->priv;
1367 struct cfi_private *cfi = map->fldrv_priv;
1368 int ret = 0;
1369 int chipnum;
1370 unsigned long ofs;
1371
1372 *retlen = 0;
1373 if (!len)
1374 return 0;
1375
1376 chipnum = to >> cfi->chipshift;
1377 ofs = to - (chipnum << cfi->chipshift);
1378
1379 /* If it's not bus-aligned, do the first byte write */
1380 if (ofs & (map_bankwidth(map)-1)) {
1381 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1382 int gap = ofs - bus_ofs;
1383 int n;
1384 map_word datum;
1385
1386 n = min_t(int, len, map_bankwidth(map)-gap);
1387 datum = map_word_ff(map);
1388 datum = map_word_load_partial(map, datum, buf, gap, n);
1389
1390 ret = do_write_oneword(map, &cfi->chips[chipnum],
Nicolas Pitref77814d2005-02-08 17:11:19 +00001391 bus_ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 if (ret)
1393 return ret;
1394
1395 len -= n;
1396 ofs += n;
1397 buf += n;
1398 (*retlen) += n;
1399
1400 if (ofs >> cfi->chipshift) {
1401 chipnum ++;
1402 ofs = 0;
1403 if (chipnum == cfi->numchips)
1404 return 0;
1405 }
1406 }
1407
1408 while(len >= map_bankwidth(map)) {
1409 map_word datum = map_word_load(map, buf);
1410
1411 ret = do_write_oneword(map, &cfi->chips[chipnum],
Nicolas Pitref77814d2005-02-08 17:11:19 +00001412 ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 if (ret)
1414 return ret;
1415
1416 ofs += map_bankwidth(map);
1417 buf += map_bankwidth(map);
1418 (*retlen) += map_bankwidth(map);
1419 len -= map_bankwidth(map);
1420
1421 if (ofs >> cfi->chipshift) {
1422 chipnum ++;
1423 ofs = 0;
1424 if (chipnum == cfi->numchips)
1425 return 0;
1426 }
1427 }
1428
1429 if (len & (map_bankwidth(map)-1)) {
1430 map_word datum;
1431
1432 datum = map_word_ff(map);
1433 datum = map_word_load_partial(map, datum, buf, 0, len);
1434
1435 ret = do_write_oneword(map, &cfi->chips[chipnum],
Nicolas Pitref77814d2005-02-08 17:11:19 +00001436 ofs, datum, FL_WRITING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 if (ret)
1438 return ret;
1439
1440 (*retlen) += len;
1441 }
1442
1443 return 0;
1444}
1445
1446
1447static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1448 unsigned long adr, const u_char *buf, int len)
1449{
1450 struct cfi_private *cfi = map->fldrv_priv;
Nicolas Pitre638d9832005-08-06 05:40:46 +01001451 map_word status, status_OK, write_cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 unsigned long cmd_adr, timeo;
1453 int wbufsize, z, ret=0, bytes, words;
1454
1455 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1456 adr += chip->start;
1457 cmd_adr = adr & ~(wbufsize-1);
Nicolas Pitre638d9832005-08-06 05:40:46 +01001458
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 /* Let's determine this according to the interleave only once */
1460 status_OK = CMD(0x80);
Nicolas Pitre638d9832005-08-06 05:40:46 +01001461 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
1463 spin_lock(chip->mutex);
1464 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1465 if (ret) {
1466 spin_unlock(chip->mutex);
1467 return ret;
1468 }
1469
1470 XIP_INVAL_CACHED_RANGE(map, adr, len);
1471 ENABLE_VPP(map);
1472 xip_disable(map, chip, cmd_adr);
1473
1474 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1475 [...], the device will not accept any more Write to Buffer commands".
1476 So we must check here and reset those bits if they're set. Otherwise
1477 we're just pissing in the wind */
1478 if (chip->state != FL_STATUS)
1479 map_write(map, CMD(0x70), cmd_adr);
1480 status = map_read(map, cmd_adr);
1481 if (map_word_bitsset(map, status, CMD(0x30))) {
1482 xip_enable(map, chip, cmd_adr);
1483 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1484 xip_disable(map, chip, cmd_adr);
1485 map_write(map, CMD(0x50), cmd_adr);
1486 map_write(map, CMD(0x70), cmd_adr);
1487 }
1488
1489 chip->state = FL_WRITING_TO_BUFFER;
1490
1491 z = 0;
1492 for (;;) {
Nicolas Pitre638d9832005-08-06 05:40:46 +01001493 map_write(map, write_cmd, cmd_adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494
1495 status = map_read(map, cmd_adr);
1496 if (map_word_andequal(map, status, status_OK, status_OK))
1497 break;
1498
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 UDELAY(map, chip, cmd_adr, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
1501 if (++z > 20) {
1502 /* Argh. Not ready for write to buffer */
1503 map_word Xstatus;
1504 map_write(map, CMD(0x70), cmd_adr);
1505 chip->state = FL_STATUS;
1506 Xstatus = map_read(map, cmd_adr);
1507 /* Odd. Clear status bits */
1508 map_write(map, CMD(0x50), cmd_adr);
1509 map_write(map, CMD(0x70), cmd_adr);
1510 xip_enable(map, chip, cmd_adr);
Nicolas Pitre48436532005-08-06 05:16:52 +01001511 printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1512 map->name, status.x[0], Xstatus.x[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 ret = -EIO;
1514 goto out;
1515 }
1516 }
1517
1518 /* Write length of data to come */
1519 bytes = len & (map_bankwidth(map)-1);
1520 words = len / map_bankwidth(map);
1521 map_write(map, CMD(words - !bytes), cmd_adr );
1522
1523 /* Write data */
1524 z = 0;
1525 while(z < words * map_bankwidth(map)) {
1526 map_word datum = map_word_load(map, buf);
1527 map_write(map, datum, adr+z);
1528
1529 z += map_bankwidth(map);
1530 buf += map_bankwidth(map);
1531 }
1532
1533 if (bytes) {
1534 map_word datum;
1535
1536 datum = map_word_ff(map);
1537 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1538 map_write(map, datum, adr+z);
1539 }
1540
1541 /* GO GO GO */
1542 map_write(map, CMD(0xd0), cmd_adr);
1543 chip->state = FL_WRITING;
1544
Nicolas Pitre6da70122005-05-19 18:05:47 +01001545 INVALIDATE_CACHE_UDELAY(map, chip,
1546 cmd_adr, len,
1547 chip->buffer_write_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548
1549 timeo = jiffies + (HZ/2);
1550 z = 0;
1551 for (;;) {
1552 if (chip->state != FL_WRITING) {
1553 /* Someone's suspended the write. Sleep */
1554 DECLARE_WAITQUEUE(wait, current);
1555 set_current_state(TASK_UNINTERRUPTIBLE);
1556 add_wait_queue(&chip->wq, &wait);
1557 spin_unlock(chip->mutex);
1558 schedule();
1559 remove_wait_queue(&chip->wq, &wait);
1560 timeo = jiffies + (HZ / 2); /* FIXME */
1561 spin_lock(chip->mutex);
1562 continue;
1563 }
1564
1565 status = map_read(map, cmd_adr);
1566 if (map_word_andequal(map, status, status_OK, status_OK))
1567 break;
1568
1569 /* OK Still waiting */
1570 if (time_after(jiffies, timeo)) {
Nicolas Pitre48436532005-08-06 05:16:52 +01001571 map_write(map, CMD(0x70), cmd_adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 chip->state = FL_STATUS;
1573 xip_enable(map, chip, cmd_adr);
Nicolas Pitre48436532005-08-06 05:16:52 +01001574 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 ret = -EIO;
1576 goto out;
1577 }
1578
1579 /* Latency issues. Drop the lock, wait a while and retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 z++;
Nicolas Pitre6da70122005-05-19 18:05:47 +01001581 UDELAY(map, chip, cmd_adr, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 }
1583 if (!z) {
1584 chip->buffer_write_time--;
1585 if (!chip->buffer_write_time)
Nicolas Pitre48436532005-08-06 05:16:52 +01001586 chip->buffer_write_time = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 }
1588 if (z > 1)
1589 chip->buffer_write_time++;
1590
1591 /* Done and happy. */
1592 chip->state = FL_STATUS;
1593
Nicolas Pitre48436532005-08-06 05:16:52 +01001594 /* check for errors */
1595 if (map_word_bitsset(map, status, CMD(0x1a))) {
1596 unsigned long chipstatus = MERGESTATUS(status);
1597
1598 /* reset status */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 map_write(map, CMD(0x50), cmd_adr);
Nicolas Pitre48436532005-08-06 05:16:52 +01001600 map_write(map, CMD(0x70), cmd_adr);
1601 xip_enable(map, chip, cmd_adr);
1602
1603 if (chipstatus & 0x02) {
1604 ret = -EROFS;
1605 } else if (chipstatus & 0x08) {
1606 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1607 ret = -EIO;
1608 } else {
1609 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1610 ret = -EINVAL;
1611 }
1612
1613 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 }
1615
1616 xip_enable(map, chip, cmd_adr);
1617 out: put_chip(map, chip, cmd_adr);
1618 spin_unlock(chip->mutex);
1619 return ret;
1620}
1621
1622static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1623 size_t len, size_t *retlen, const u_char *buf)
1624{
1625 struct map_info *map = mtd->priv;
1626 struct cfi_private *cfi = map->fldrv_priv;
1627 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1628 int ret = 0;
1629 int chipnum;
1630 unsigned long ofs;
1631
1632 *retlen = 0;
1633 if (!len)
1634 return 0;
1635
1636 chipnum = to >> cfi->chipshift;
1637 ofs = to - (chipnum << cfi->chipshift);
1638
1639 /* If it's not bus-aligned, do the first word write */
1640 if (ofs & (map_bankwidth(map)-1)) {
1641 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1642 if (local_len > len)
1643 local_len = len;
1644 ret = cfi_intelext_write_words(mtd, to, local_len,
1645 retlen, buf);
1646 if (ret)
1647 return ret;
1648 ofs += local_len;
1649 buf += local_len;
1650 len -= local_len;
1651
1652 if (ofs >> cfi->chipshift) {
1653 chipnum ++;
1654 ofs = 0;
1655 if (chipnum == cfi->numchips)
1656 return 0;
1657 }
1658 }
1659
1660 while(len) {
1661 /* We must not cross write block boundaries */
1662 int size = wbufsize - (ofs & (wbufsize-1));
1663
1664 if (size > len)
1665 size = len;
1666 ret = do_write_buffer(map, &cfi->chips[chipnum],
1667 ofs, buf, size);
1668 if (ret)
1669 return ret;
1670
1671 ofs += size;
1672 buf += size;
1673 (*retlen) += size;
1674 len -= size;
1675
1676 if (ofs >> cfi->chipshift) {
1677 chipnum ++;
1678 ofs = 0;
1679 if (chipnum == cfi->numchips)
1680 return 0;
1681 }
1682 }
1683 return 0;
1684}
1685
1686static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1687 unsigned long adr, int len, void *thunk)
1688{
1689 struct cfi_private *cfi = map->fldrv_priv;
1690 map_word status, status_OK;
1691 unsigned long timeo;
1692 int retries = 3;
1693 DECLARE_WAITQUEUE(wait, current);
1694 int ret = 0;
1695
1696 adr += chip->start;
1697
1698 /* Let's determine this according to the interleave only once */
1699 status_OK = CMD(0x80);
1700
1701 retry:
1702 spin_lock(chip->mutex);
1703 ret = get_chip(map, chip, adr, FL_ERASING);
1704 if (ret) {
1705 spin_unlock(chip->mutex);
1706 return ret;
1707 }
1708
1709 XIP_INVAL_CACHED_RANGE(map, adr, len);
1710 ENABLE_VPP(map);
1711 xip_disable(map, chip, adr);
1712
1713 /* Clear the status register first */
1714 map_write(map, CMD(0x50), adr);
1715
1716 /* Now erase */
1717 map_write(map, CMD(0x20), adr);
1718 map_write(map, CMD(0xD0), adr);
1719 chip->state = FL_ERASING;
1720 chip->erase_suspended = 0;
1721
Nicolas Pitre6da70122005-05-19 18:05:47 +01001722 INVALIDATE_CACHE_UDELAY(map, chip,
1723 adr, len,
1724 chip->erase_time*1000/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725
1726 /* FIXME. Use a timer to check this, and return immediately. */
1727 /* Once the state machine's known to be working I'll do that */
1728
1729 timeo = jiffies + (HZ*20);
1730 for (;;) {
1731 if (chip->state != FL_ERASING) {
1732 /* Someone's suspended the erase. Sleep */
1733 set_current_state(TASK_UNINTERRUPTIBLE);
1734 add_wait_queue(&chip->wq, &wait);
1735 spin_unlock(chip->mutex);
1736 schedule();
1737 remove_wait_queue(&chip->wq, &wait);
1738 spin_lock(chip->mutex);
1739 continue;
1740 }
1741 if (chip->erase_suspended) {
1742 /* This erase was suspended and resumed.
1743 Adjust the timeout */
1744 timeo = jiffies + (HZ*20); /* FIXME */
1745 chip->erase_suspended = 0;
1746 }
1747
1748 status = map_read(map, adr);
1749 if (map_word_andequal(map, status, status_OK, status_OK))
1750 break;
1751
1752 /* OK Still waiting */
1753 if (time_after(jiffies, timeo)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 map_write(map, CMD(0x70), adr);
1755 chip->state = FL_STATUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 xip_enable(map, chip, adr);
Nicolas Pitre48436532005-08-06 05:16:52 +01001757 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 ret = -EIO;
1759 goto out;
1760 }
1761
1762 /* Latency issues. Drop the lock, wait a while and retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 UDELAY(map, chip, adr, 1000000/HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 }
1765
1766 /* We've broken this before. It doesn't hurt to be safe */
1767 map_write(map, CMD(0x70), adr);
1768 chip->state = FL_STATUS;
1769 status = map_read(map, adr);
1770
Nicolas Pitre48436532005-08-06 05:16:52 +01001771 /* check for errors */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 if (map_word_bitsset(map, status, CMD(0x3a))) {
Nicolas Pitre48436532005-08-06 05:16:52 +01001773 unsigned long chipstatus = MERGESTATUS(status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774
1775 /* Reset the error bits */
1776 map_write(map, CMD(0x50), adr);
1777 map_write(map, CMD(0x70), adr);
1778 xip_enable(map, chip, adr);
1779
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 if ((chipstatus & 0x30) == 0x30) {
Nicolas Pitre48436532005-08-06 05:16:52 +01001781 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1782 ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 } else if (chipstatus & 0x02) {
1784 /* Protection bit set */
1785 ret = -EROFS;
1786 } else if (chipstatus & 0x8) {
1787 /* Voltage */
Nicolas Pitre48436532005-08-06 05:16:52 +01001788 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 ret = -EIO;
Nicolas Pitre48436532005-08-06 05:16:52 +01001790 } else if (chipstatus & 0x20 && retries--) {
1791 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1792 timeo = jiffies + HZ;
1793 put_chip(map, chip, adr);
1794 spin_unlock(chip->mutex);
1795 goto retry;
1796 } else {
1797 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 ret = -EIO;
1799 }
Nicolas Pitre48436532005-08-06 05:16:52 +01001800
1801 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 }
1803
Nicolas Pitre48436532005-08-06 05:16:52 +01001804 xip_enable(map, chip, adr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 out: put_chip(map, chip, adr);
1806 spin_unlock(chip->mutex);
1807 return ret;
1808}
1809
1810int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1811{
1812 unsigned long ofs, len;
1813 int ret;
1814
1815 ofs = instr->addr;
1816 len = instr->len;
1817
1818 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1819 if (ret)
1820 return ret;
1821
1822 instr->state = MTD_ERASE_DONE;
1823 mtd_erase_callback(instr);
1824
1825 return 0;
1826}
1827
1828static void cfi_intelext_sync (struct mtd_info *mtd)
1829{
1830 struct map_info *map = mtd->priv;
1831 struct cfi_private *cfi = map->fldrv_priv;
1832 int i;
1833 struct flchip *chip;
1834 int ret = 0;
1835
1836 for (i=0; !ret && i<cfi->numchips; i++) {
1837 chip = &cfi->chips[i];
1838
1839 spin_lock(chip->mutex);
1840 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1841
1842 if (!ret) {
1843 chip->oldstate = chip->state;
1844 chip->state = FL_SYNCING;
1845 /* No need to wake_up() on this state change -
1846 * as the whole point is that nobody can do anything
1847 * with the chip now anyway.
1848 */
1849 }
1850 spin_unlock(chip->mutex);
1851 }
1852
1853 /* Unlock the chips again */
1854
1855 for (i--; i >=0; i--) {
1856 chip = &cfi->chips[i];
1857
1858 spin_lock(chip->mutex);
1859
1860 if (chip->state == FL_SYNCING) {
1861 chip->state = chip->oldstate;
Nicolas Pitre09c79332005-03-16 22:41:09 +00001862 chip->oldstate = FL_READY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 wake_up(&chip->wq);
1864 }
1865 spin_unlock(chip->mutex);
1866 }
1867}
1868
1869#ifdef DEBUG_LOCK_BITS
1870static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1871 struct flchip *chip,
1872 unsigned long adr,
1873 int len, void *thunk)
1874{
1875 struct cfi_private *cfi = map->fldrv_priv;
1876 int status, ofs_factor = cfi->interleave * cfi->device_type;
1877
Todd Poynorc25bb1f2005-04-27 21:01:52 +01001878 adr += chip->start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 xip_disable(map, chip, adr+(2*ofs_factor));
Todd Poynorc25bb1f2005-04-27 21:01:52 +01001880 map_write(map, CMD(0x90), adr+(2*ofs_factor));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 chip->state = FL_JEDEC_QUERY;
1882 status = cfi_read_query(map, adr+(2*ofs_factor));
1883 xip_enable(map, chip, 0);
1884 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1885 adr, status);
1886 return 0;
1887}
1888#endif
1889
1890#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1891#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1892
1893static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1894 unsigned long adr, int len, void *thunk)
1895{
1896 struct cfi_private *cfi = map->fldrv_priv;
Todd Poynor9a6e73e2005-03-29 23:06:40 +01001897 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 map_word status, status_OK;
1899 unsigned long timeo = jiffies + HZ;
1900 int ret;
1901
1902 adr += chip->start;
1903
1904 /* Let's determine this according to the interleave only once */
1905 status_OK = CMD(0x80);
1906
1907 spin_lock(chip->mutex);
1908 ret = get_chip(map, chip, adr, FL_LOCKING);
1909 if (ret) {
1910 spin_unlock(chip->mutex);
1911 return ret;
1912 }
1913
1914 ENABLE_VPP(map);
1915 xip_disable(map, chip, adr);
1916
1917 map_write(map, CMD(0x60), adr);
1918 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1919 map_write(map, CMD(0x01), adr);
1920 chip->state = FL_LOCKING;
1921 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1922 map_write(map, CMD(0xD0), adr);
1923 chip->state = FL_UNLOCKING;
1924 } else
1925 BUG();
1926
Todd Poynor9a6e73e2005-03-29 23:06:40 +01001927 /*
1928 * If Instant Individual Block Locking supported then no need
1929 * to delay.
1930 */
1931
Nicolas Pitre6da70122005-05-19 18:05:47 +01001932 if (!extp || !(extp->FeatureSupport & (1 << 5)))
Todd Poynor9a6e73e2005-03-29 23:06:40 +01001933 UDELAY(map, chip, adr, 1000000/HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934
1935 /* FIXME. Use a timer to check this, and return immediately. */
1936 /* Once the state machine's known to be working I'll do that */
1937
1938 timeo = jiffies + (HZ*20);
1939 for (;;) {
1940
1941 status = map_read(map, adr);
1942 if (map_word_andequal(map, status, status_OK, status_OK))
1943 break;
1944
1945 /* OK Still waiting */
1946 if (time_after(jiffies, timeo)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 map_write(map, CMD(0x70), adr);
1948 chip->state = FL_STATUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 xip_enable(map, chip, adr);
Nicolas Pitre48436532005-08-06 05:16:52 +01001950 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 put_chip(map, chip, adr);
1952 spin_unlock(chip->mutex);
1953 return -EIO;
1954 }
1955
1956 /* Latency issues. Drop the lock, wait a while and retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 UDELAY(map, chip, adr, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 }
1959
1960 /* Done and happy. */
1961 chip->state = FL_STATUS;
1962 xip_enable(map, chip, adr);
1963 put_chip(map, chip, adr);
1964 spin_unlock(chip->mutex);
1965 return 0;
1966}
1967
1968static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1969{
1970 int ret;
1971
1972#ifdef DEBUG_LOCK_BITS
1973 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1974 __FUNCTION__, ofs, len);
1975 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1976 ofs, len, 0);
1977#endif
1978
1979 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1980 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1981
1982#ifdef DEBUG_LOCK_BITS
1983 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1984 __FUNCTION__, ret);
1985 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1986 ofs, len, 0);
1987#endif
1988
1989 return ret;
1990}
1991
1992static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1993{
1994 int ret;
1995
1996#ifdef DEBUG_LOCK_BITS
1997 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1998 __FUNCTION__, ofs, len);
1999 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2000 ofs, len, 0);
2001#endif
2002
2003 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2004 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2005
2006#ifdef DEBUG_LOCK_BITS
2007 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2008 __FUNCTION__, ret);
2009 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2010 ofs, len, 0);
2011#endif
2012
2013 return ret;
2014}
2015
Nicolas Pitref77814d2005-02-08 17:11:19 +00002016#ifdef CONFIG_MTD_OTP
2017
2018typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2019 u_long data_offset, u_char *buf, u_int size,
2020 u_long prot_offset, u_int groupno, u_int groupsize);
2021
2022static int __xipram
2023do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2024 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2025{
2026 struct cfi_private *cfi = map->fldrv_priv;
2027 int ret;
2028
2029 spin_lock(chip->mutex);
2030 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2031 if (ret) {
2032 spin_unlock(chip->mutex);
2033 return ret;
2034 }
2035
2036 /* let's ensure we're not reading back cached data from array mode */
Nicolas Pitre6da70122005-05-19 18:05:47 +01002037 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
Nicolas Pitref77814d2005-02-08 17:11:19 +00002038
2039 xip_disable(map, chip, chip->start);
2040 if (chip->state != FL_JEDEC_QUERY) {
2041 map_write(map, CMD(0x90), chip->start);
2042 chip->state = FL_JEDEC_QUERY;
2043 }
2044 map_copy_from(map, buf, chip->start + offset, size);
2045 xip_enable(map, chip, chip->start);
2046
2047 /* then ensure we don't keep OTP data in the cache */
Nicolas Pitre6da70122005-05-19 18:05:47 +01002048 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
Nicolas Pitref77814d2005-02-08 17:11:19 +00002049
2050 put_chip(map, chip, chip->start);
2051 spin_unlock(chip->mutex);
2052 return 0;
2053}
2054
2055static int
2056do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2057 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2058{
2059 int ret;
2060
2061 while (size) {
2062 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2063 int gap = offset - bus_ofs;
2064 int n = min_t(int, size, map_bankwidth(map)-gap);
2065 map_word datum = map_word_ff(map);
2066
2067 datum = map_word_load_partial(map, datum, buf, gap, n);
2068 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2069 if (ret)
2070 return ret;
2071
2072 offset += n;
2073 buf += n;
2074 size -= n;
2075 }
2076
2077 return 0;
2078}
2079
2080static int
2081do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2082 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2083{
2084 struct cfi_private *cfi = map->fldrv_priv;
2085 map_word datum;
2086
2087 /* make sure area matches group boundaries */
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002088 if (size != grpsz)
Nicolas Pitref77814d2005-02-08 17:11:19 +00002089 return -EXDEV;
2090
2091 datum = map_word_ff(map);
2092 datum = map_word_clr(map, datum, CMD(1 << grpno));
2093 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2094}
2095
2096static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2097 size_t *retlen, u_char *buf,
2098 otp_op_t action, int user_regs)
2099{
2100 struct map_info *map = mtd->priv;
2101 struct cfi_private *cfi = map->fldrv_priv;
2102 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2103 struct flchip *chip;
2104 struct cfi_intelext_otpinfo *otp;
2105 u_long devsize, reg_prot_offset, data_offset;
2106 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2107 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2108 int ret;
2109
2110 *retlen = 0;
2111
2112 /* Check that we actually have some OTP registers */
2113 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2114 return -ENODATA;
2115
2116 /* we need real chips here not virtual ones */
2117 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2118 chip_step = devsize >> cfi->chipshift;
Nicolas Pitredce2b4d2005-04-01 17:36:29 +01002119 chip_num = 0;
Nicolas Pitref77814d2005-02-08 17:11:19 +00002120
Nicolas Pitredce2b4d2005-04-01 17:36:29 +01002121 /* Some chips have OTP located in the _top_ partition only.
2122 For example: Intel 28F256L18T (T means top-parameter device) */
2123 if (cfi->mfr == MANUFACTURER_INTEL) {
2124 switch (cfi->id) {
2125 case 0x880b:
2126 case 0x880c:
2127 case 0x880d:
2128 chip_num = chip_step - 1;
2129 }
2130 }
2131
2132 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
Nicolas Pitref77814d2005-02-08 17:11:19 +00002133 chip = &cfi->chips[chip_num];
2134 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2135
2136 /* first OTP region */
2137 field = 0;
2138 reg_prot_offset = extp->ProtRegAddr;
2139 reg_fact_groups = 1;
2140 reg_fact_size = 1 << extp->FactProtRegSize;
2141 reg_user_groups = 1;
2142 reg_user_size = 1 << extp->UserProtRegSize;
2143
2144 while (len > 0) {
2145 /* flash geometry fixup */
2146 data_offset = reg_prot_offset + 1;
2147 data_offset *= cfi->interleave * cfi->device_type;
2148 reg_prot_offset *= cfi->interleave * cfi->device_type;
2149 reg_fact_size *= cfi->interleave;
2150 reg_user_size *= cfi->interleave;
2151
2152 if (user_regs) {
2153 groups = reg_user_groups;
2154 groupsize = reg_user_size;
2155 /* skip over factory reg area */
2156 groupno = reg_fact_groups;
2157 data_offset += reg_fact_groups * reg_fact_size;
2158 } else {
2159 groups = reg_fact_groups;
2160 groupsize = reg_fact_size;
2161 groupno = 0;
2162 }
2163
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002164 while (len > 0 && groups > 0) {
Nicolas Pitref77814d2005-02-08 17:11:19 +00002165 if (!action) {
2166 /*
2167 * Special case: if action is NULL
2168 * we fill buf with otp_info records.
2169 */
2170 struct otp_info *otpinfo;
2171 map_word lockword;
2172 len -= sizeof(struct otp_info);
2173 if (len <= 0)
2174 return -ENOSPC;
2175 ret = do_otp_read(map, chip,
2176 reg_prot_offset,
2177 (u_char *)&lockword,
2178 map_bankwidth(map),
2179 0, 0, 0);
2180 if (ret)
2181 return ret;
2182 otpinfo = (struct otp_info *)buf;
2183 otpinfo->start = from;
2184 otpinfo->length = groupsize;
2185 otpinfo->locked =
2186 !map_word_bitsset(map, lockword,
2187 CMD(1 << groupno));
2188 from += groupsize;
2189 buf += sizeof(*otpinfo);
2190 *retlen += sizeof(*otpinfo);
2191 } else if (from >= groupsize) {
2192 from -= groupsize;
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002193 data_offset += groupsize;
Nicolas Pitref77814d2005-02-08 17:11:19 +00002194 } else {
2195 int size = groupsize;
2196 data_offset += from;
2197 size -= from;
2198 from = 0;
2199 if (size > len)
2200 size = len;
2201 ret = action(map, chip, data_offset,
2202 buf, size, reg_prot_offset,
2203 groupno, groupsize);
2204 if (ret < 0)
2205 return ret;
2206 buf += size;
2207 len -= size;
2208 *retlen += size;
Nicolas Pitre332d71f2005-02-17 20:35:04 +00002209 data_offset += size;
Nicolas Pitref77814d2005-02-08 17:11:19 +00002210 }
2211 groupno++;
2212 groups--;
2213 }
2214
2215 /* next OTP region */
2216 if (++field == extp->NumProtectionFields)
2217 break;
2218 reg_prot_offset = otp->ProtRegAddr;
2219 reg_fact_groups = otp->FactGroups;
2220 reg_fact_size = 1 << otp->FactProtRegSize;
2221 reg_user_groups = otp->UserGroups;
2222 reg_user_size = 1 << otp->UserProtRegSize;
2223 otp++;
2224 }
2225 }
2226
2227 return 0;
2228}
2229
2230static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2231 size_t len, size_t *retlen,
2232 u_char *buf)
2233{
2234 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2235 buf, do_otp_read, 0);
2236}
2237
2238static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2239 size_t len, size_t *retlen,
2240 u_char *buf)
2241{
2242 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2243 buf, do_otp_read, 1);
2244}
2245
2246static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2247 size_t len, size_t *retlen,
2248 u_char *buf)
2249{
2250 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2251 buf, do_otp_write, 1);
2252}
2253
2254static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2255 loff_t from, size_t len)
2256{
2257 size_t retlen;
2258 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2259 NULL, do_otp_lock, 1);
2260}
2261
2262static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2263 struct otp_info *buf, size_t len)
2264{
2265 size_t retlen;
2266 int ret;
2267
2268 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2269 return ret ? : retlen;
2270}
2271
2272static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2273 struct otp_info *buf, size_t len)
2274{
2275 size_t retlen;
2276 int ret;
2277
2278 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2279 return ret ? : retlen;
2280}
2281
2282#endif
2283
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284static int cfi_intelext_suspend(struct mtd_info *mtd)
2285{
2286 struct map_info *map = mtd->priv;
2287 struct cfi_private *cfi = map->fldrv_priv;
2288 int i;
2289 struct flchip *chip;
2290 int ret = 0;
2291
2292 for (i=0; !ret && i<cfi->numchips; i++) {
2293 chip = &cfi->chips[i];
2294
2295 spin_lock(chip->mutex);
2296
2297 switch (chip->state) {
2298 case FL_READY:
2299 case FL_STATUS:
2300 case FL_CFI_QUERY:
2301 case FL_JEDEC_QUERY:
2302 if (chip->oldstate == FL_READY) {
2303 chip->oldstate = chip->state;
2304 chip->state = FL_PM_SUSPENDED;
2305 /* No need to wake_up() on this state change -
2306 * as the whole point is that nobody can do anything
2307 * with the chip now anyway.
2308 */
2309 } else {
2310 /* There seems to be an operation pending. We must wait for it. */
2311 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2312 ret = -EAGAIN;
2313 }
2314 break;
2315 default:
2316 /* Should we actually wait? Once upon a time these routines weren't
2317 allowed to. Or should we return -EAGAIN, because the upper layers
2318 ought to have already shut down anything which was using the device
2319 anyway? The latter for now. */
2320 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2321 ret = -EAGAIN;
2322 case FL_PM_SUSPENDED:
2323 break;
2324 }
2325 spin_unlock(chip->mutex);
2326 }
2327
2328 /* Unlock the chips again */
2329
2330 if (ret) {
2331 for (i--; i >=0; i--) {
2332 chip = &cfi->chips[i];
2333
2334 spin_lock(chip->mutex);
2335
2336 if (chip->state == FL_PM_SUSPENDED) {
2337 /* No need to force it into a known state here,
2338 because we're returning failure, and it didn't
2339 get power cycled */
2340 chip->state = chip->oldstate;
2341 chip->oldstate = FL_READY;
2342 wake_up(&chip->wq);
2343 }
2344 spin_unlock(chip->mutex);
2345 }
2346 }
2347
2348 return ret;
2349}
2350
2351static void cfi_intelext_resume(struct mtd_info *mtd)
2352{
2353 struct map_info *map = mtd->priv;
2354 struct cfi_private *cfi = map->fldrv_priv;
2355 int i;
2356 struct flchip *chip;
2357
2358 for (i=0; i<cfi->numchips; i++) {
2359
2360 chip = &cfi->chips[i];
2361
2362 spin_lock(chip->mutex);
2363
2364 /* Go to known state. Chip may have been power cycled */
2365 if (chip->state == FL_PM_SUSPENDED) {
2366 map_write(map, CMD(0xFF), cfi->chips[i].start);
2367 chip->oldstate = chip->state = FL_READY;
2368 wake_up(&chip->wq);
2369 }
2370
2371 spin_unlock(chip->mutex);
2372 }
2373}
2374
Nicolas Pitre963a6fb2005-04-01 02:59:56 +01002375static int cfi_intelext_reset(struct mtd_info *mtd)
2376{
2377 struct map_info *map = mtd->priv;
2378 struct cfi_private *cfi = map->fldrv_priv;
2379 int i, ret;
2380
2381 for (i=0; i < cfi->numchips; i++) {
2382 struct flchip *chip = &cfi->chips[i];
2383
2384 /* force the completion of any ongoing operation
2385 and switch to array mode so any bootloader in
2386 flash is accessible for soft reboot. */
2387 spin_lock(chip->mutex);
2388 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2389 if (!ret) {
2390 map_write(map, CMD(0xff), chip->start);
2391 chip->state = FL_READY;
2392 }
2393 spin_unlock(chip->mutex);
2394 }
2395
2396 return 0;
2397}
2398
2399static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2400 void *v)
2401{
2402 struct mtd_info *mtd;
2403
2404 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2405 cfi_intelext_reset(mtd);
2406 return NOTIFY_DONE;
2407}
2408
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409static void cfi_intelext_destroy(struct mtd_info *mtd)
2410{
2411 struct map_info *map = mtd->priv;
2412 struct cfi_private *cfi = map->fldrv_priv;
Nicolas Pitre963a6fb2005-04-01 02:59:56 +01002413 cfi_intelext_reset(mtd);
2414 unregister_reboot_notifier(&mtd->reboot_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 kfree(cfi->cmdset_priv);
2416 kfree(cfi->cfiq);
2417 kfree(cfi->chips[0].priv);
2418 kfree(cfi);
2419 kfree(mtd->eraseregions);
2420}
2421
Nicolas Pitre638d9832005-08-06 05:40:46 +01002422static char im_name_0001[] = "cfi_cmdset_0001";
2423static char im_name_0003[] = "cfi_cmdset_0003";
2424static char im_name_0200[] = "cfi_cmdset_0200";
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425
2426static int __init cfi_intelext_init(void)
2427{
Nicolas Pitre638d9832005-08-06 05:40:46 +01002428 inter_module_register(im_name_0001, THIS_MODULE, &cfi_cmdset_0001);
2429 inter_module_register(im_name_0003, THIS_MODULE, &cfi_cmdset_0001);
2430 inter_module_register(im_name_0200, THIS_MODULE, &cfi_cmdset_0001);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 return 0;
2432}
2433
2434static void __exit cfi_intelext_exit(void)
2435{
Nicolas Pitre638d9832005-08-06 05:40:46 +01002436 inter_module_unregister(im_name_0001);
2437 inter_module_unregister(im_name_0003);
2438 inter_module_unregister(im_name_0200);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439}
2440
2441module_init(cfi_intelext_init);
2442module_exit(cfi_intelext_exit);
2443
2444MODULE_LICENSE("GPL");
2445MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2446MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");