blob: d87dc3fbd4baf5017150a8b6234ff38cf0dfee1a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001
2/* Common Flash Interface structures
3 * See http://support.intel.com/design/flash/technote/index.htm
Nicolas Pitre72b56a22005-02-05 02:06:19 +00004 * $Id: cfi.h,v 1.51 2005/02/05 02:06:16 nico Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 */
6
7#ifndef __MTD_CFI_H__
8#define __MTD_CFI_H__
9
10#include <linux/config.h>
11#include <linux/version.h>
12#include <linux/delay.h>
13#include <linux/types.h>
14#include <linux/interrupt.h>
15#include <linux/mtd/flashchip.h>
16#include <linux/mtd/map.h>
17#include <linux/mtd/cfi_endian.h>
18
19#ifdef CONFIG_MTD_CFI_I1
20#define cfi_interleave(cfi) 1
21#define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1)
22#else
23#define cfi_interleave_is_1(cfi) (0)
24#endif
25
26#ifdef CONFIG_MTD_CFI_I2
27# ifdef cfi_interleave
28# undef cfi_interleave
29# define cfi_interleave(cfi) ((cfi)->interleave)
30# else
31# define cfi_interleave(cfi) 2
32# endif
33#define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2)
34#else
35#define cfi_interleave_is_2(cfi) (0)
36#endif
37
38#ifdef CONFIG_MTD_CFI_I4
39# ifdef cfi_interleave
40# undef cfi_interleave
41# define cfi_interleave(cfi) ((cfi)->interleave)
42# else
43# define cfi_interleave(cfi) 4
44# endif
45#define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4)
46#else
47#define cfi_interleave_is_4(cfi) (0)
48#endif
49
50#ifdef CONFIG_MTD_CFI_I8
51# ifdef cfi_interleave
52# undef cfi_interleave
53# define cfi_interleave(cfi) ((cfi)->interleave)
54# else
55# define cfi_interleave(cfi) 8
56# endif
57#define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8)
58#else
59#define cfi_interleave_is_8(cfi) (0)
60#endif
61
62static inline int cfi_interleave_supported(int i)
63{
64 switch (i) {
65#ifdef CONFIG_MTD_CFI_I1
66 case 1:
67#endif
68#ifdef CONFIG_MTD_CFI_I2
69 case 2:
70#endif
71#ifdef CONFIG_MTD_CFI_I4
72 case 4:
73#endif
74#ifdef CONFIG_MTD_CFI_I8
75 case 8:
76#endif
77 return 1;
78
79 default:
80 return 0;
81 }
82}
83
84
85/* NB: these values must represents the number of bytes needed to meet the
86 * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes.
87 * These numbers are used in calculations.
88 */
89#define CFI_DEVICETYPE_X8 (8 / 8)
90#define CFI_DEVICETYPE_X16 (16 / 8)
91#define CFI_DEVICETYPE_X32 (32 / 8)
92#define CFI_DEVICETYPE_X64 (64 / 8)
93
94/* NB: We keep these structures in memory in HOST byteorder, except
95 * where individually noted.
96 */
97
98/* Basic Query Structure */
99struct cfi_ident {
100 uint8_t qry[3];
101 uint16_t P_ID;
102 uint16_t P_ADR;
103 uint16_t A_ID;
104 uint16_t A_ADR;
105 uint8_t VccMin;
106 uint8_t VccMax;
107 uint8_t VppMin;
108 uint8_t VppMax;
109 uint8_t WordWriteTimeoutTyp;
110 uint8_t BufWriteTimeoutTyp;
111 uint8_t BlockEraseTimeoutTyp;
112 uint8_t ChipEraseTimeoutTyp;
113 uint8_t WordWriteTimeoutMax;
114 uint8_t BufWriteTimeoutMax;
115 uint8_t BlockEraseTimeoutMax;
116 uint8_t ChipEraseTimeoutMax;
117 uint8_t DevSize;
118 uint16_t InterfaceDesc;
119 uint16_t MaxBufWriteSize;
120 uint8_t NumEraseRegions;
121 uint32_t EraseRegionInfo[0]; /* Not host ordered */
122} __attribute__((packed));
123
124/* Extended Query Structure for both PRI and ALT */
125
126struct cfi_extquery {
127 uint8_t pri[3];
128 uint8_t MajorVersion;
129 uint8_t MinorVersion;
130} __attribute__((packed));
131
132/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
133
134struct cfi_pri_intelext {
135 uint8_t pri[3];
136 uint8_t MajorVersion;
137 uint8_t MinorVersion;
138 uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature
139 block follows - FIXME - not currently supported */
140 uint8_t SuspendCmdSupport;
141 uint16_t BlkStatusRegMask;
142 uint8_t VccOptimal;
143 uint8_t VppOptimal;
144 uint8_t NumProtectionFields;
145 uint16_t ProtRegAddr;
146 uint8_t FactProtRegSize;
147 uint8_t UserProtRegSize;
148 uint8_t extra[0];
149} __attribute__((packed));
150
Nicolas Pitre72b56a22005-02-05 02:06:19 +0000151struct cfi_intelext_otpinfo {
152 uint32_t ProtRegAddr;
153 uint16_t FactGroups;
154 uint8_t FactProtRegSize;
155 uint16_t UserGroups;
156 uint8_t UserProtRegSize;
157} __attribute__((packed));
158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159struct cfi_intelext_blockinfo {
160 uint16_t NumIdentBlocks;
161 uint16_t BlockSize;
162 uint16_t MinBlockEraseCycles;
163 uint8_t BitsPerCell;
164 uint8_t BlockCap;
165} __attribute__((packed));
166
167struct cfi_intelext_regioninfo {
168 uint16_t NumIdentPartitions;
169 uint8_t NumOpAllowed;
170 uint8_t NumOpAllowedSimProgMode;
171 uint8_t NumOpAllowedSimEraMode;
172 uint8_t NumBlockTypes;
173 struct cfi_intelext_blockinfo BlockTypes[1];
174} __attribute__((packed));
175
176/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
177
178struct cfi_pri_amdstd {
179 uint8_t pri[3];
180 uint8_t MajorVersion;
181 uint8_t MinorVersion;
182 uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */
183 uint8_t EraseSuspend;
184 uint8_t BlkProt;
185 uint8_t TmpBlkUnprotect;
186 uint8_t BlkProtUnprot;
187 uint8_t SimultaneousOps;
188 uint8_t BurstMode;
189 uint8_t PageMode;
190 uint8_t VppMin;
191 uint8_t VppMax;
192 uint8_t TopBottom;
193} __attribute__((packed));
194
195struct cfi_pri_query {
196 uint8_t NumFields;
197 uint32_t ProtField[1]; /* Not host ordered */
198} __attribute__((packed));
199
200struct cfi_bri_query {
201 uint8_t PageModeReadCap;
202 uint8_t NumFields;
203 uint32_t ConfField[1]; /* Not host ordered */
204} __attribute__((packed));
205
206#define P_ID_NONE 0x0000
207#define P_ID_INTEL_EXT 0x0001
208#define P_ID_AMD_STD 0x0002
209#define P_ID_INTEL_STD 0x0003
210#define P_ID_AMD_EXT 0x0004
211#define P_ID_WINBOND 0x0006
212#define P_ID_ST_ADV 0x0020
213#define P_ID_MITSUBISHI_STD 0x0100
214#define P_ID_MITSUBISHI_EXT 0x0101
215#define P_ID_SST_PAGE 0x0102
216#define P_ID_INTEL_PERFORMANCE 0x0200
217#define P_ID_INTEL_DATA 0x0210
218#define P_ID_RESERVED 0xffff
219
220
221#define CFI_MODE_CFI 1
222#define CFI_MODE_JEDEC 0
223
224struct cfi_private {
225 uint16_t cmdset;
226 void *cmdset_priv;
227 int interleave;
228 int device_type;
229 int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */
230 int addr_unlock1;
231 int addr_unlock2;
232 struct mtd_info *(*cmdset_setup)(struct map_info *);
233 struct cfi_ident *cfiq; /* For now only one. We insist that all devs
234 must be of the same type. */
235 int mfr, id;
236 int numchips;
237 unsigned long chipshift; /* Because they're of the same type */
238 const char *im_name; /* inter_module name for cmdset_setup */
239 struct flchip chips[0]; /* per-chip data structure for each chip */
240};
241
242/*
243 * Returns the command address according to the given geometry.
244 */
245static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type)
246{
247 return (cmd_ofs * type) * interleave;
248}
249
250/*
251 * Transforms the CFI command for the given geometry (bus width & interleave).
252 * It looks too long to be inline, but in the common case it should almost all
253 * get optimised away.
254 */
255static inline map_word cfi_build_cmd(u_char cmd, struct map_info *map, struct cfi_private *cfi)
256{
257 map_word val = { {0} };
258 int wordwidth, words_per_bus, chip_mode, chips_per_word;
259 unsigned long onecmd;
260 int i;
261
262 /* We do it this way to give the compiler a fighting chance
263 of optimising away all the crap for 'bankwidth' larger than
264 an unsigned long, in the common case where that support is
265 disabled */
266 if (map_bankwidth_is_large(map)) {
267 wordwidth = sizeof(unsigned long);
268 words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
269 } else {
270 wordwidth = map_bankwidth(map);
271 words_per_bus = 1;
272 }
273
274 chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
275 chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
276
277 /* First, determine what the bit-pattern should be for a single
278 device, according to chip mode and endianness... */
279 switch (chip_mode) {
280 default: BUG();
281 case 1:
282 onecmd = cmd;
283 break;
284 case 2:
285 onecmd = cpu_to_cfi16(cmd);
286 break;
287 case 4:
288 onecmd = cpu_to_cfi32(cmd);
289 break;
290 }
291
292 /* Now replicate it across the size of an unsigned long, or
293 just to the bus width as appropriate */
294 switch (chips_per_word) {
295 default: BUG();
296#if BITS_PER_LONG >= 64
297 case 8:
298 onecmd |= (onecmd << (chip_mode * 32));
299#endif
300 case 4:
301 onecmd |= (onecmd << (chip_mode * 16));
302 case 2:
303 onecmd |= (onecmd << (chip_mode * 8));
304 case 1:
305 ;
306 }
307
308 /* And finally, for the multi-word case, replicate it
309 in all words in the structure */
310 for (i=0; i < words_per_bus; i++) {
311 val.x[i] = onecmd;
312 }
313
314 return val;
315}
316#define CMD(x) cfi_build_cmd((x), map, cfi)
317
318/*
319 * Sends a CFI command to a bank of flash for the given geometry.
320 *
321 * Returns the offset in flash where the command was written.
322 * If prev_val is non-null, it will be set to the value at the command address,
323 * before the command was written.
324 */
325static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
326 struct map_info *map, struct cfi_private *cfi,
327 int type, map_word *prev_val)
328{
329 map_word val;
330 uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type);
331
332 val = cfi_build_cmd(cmd, map, cfi);
333
334 if (prev_val)
335 *prev_val = map_read(map, addr);
336
337 map_write(map, val, addr);
338
339 return addr - base;
340}
341
342static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
343{
344 map_word val = map_read(map, addr);
345
346 if (map_bankwidth_is_1(map)) {
347 return val.x[0];
348 } else if (map_bankwidth_is_2(map)) {
349 return cfi16_to_cpu(val.x[0]);
350 } else {
351 /* No point in a 64-bit byteswap since that would just be
352 swapping the responses from different chips, and we are
353 only interested in one chip (a representative sample) */
354 return cfi32_to_cpu(val.x[0]);
355 }
356}
357
358static inline void cfi_udelay(int us)
359{
360 if (us >= 1000) {
361 msleep((us+999)/1000);
362 } else {
363 udelay(us);
364 cond_resched();
365 }
366}
367
368static inline void cfi_spin_lock(spinlock_t *mutex)
369{
370 spin_lock_bh(mutex);
371}
372
373static inline void cfi_spin_unlock(spinlock_t *mutex)
374{
375 spin_unlock_bh(mutex);
376}
377
378struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
379 const char* name);
380struct cfi_fixup {
381 uint16_t mfr;
382 uint16_t id;
383 void (*fixup)(struct mtd_info *mtd, void* param);
384 void* param;
385};
386
387#define CFI_MFR_ANY 0xffff
388#define CFI_ID_ANY 0xffff
389
390#define CFI_MFR_AMD 0x0001
391#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
392
393void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
394
395typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
396 unsigned long adr, int len, void *thunk);
397
398int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
399 loff_t ofs, size_t len, void *thunk);
400
401
402#endif /* __MTD_CFI_H__ */