blob: 23028b384418e14e0f84e9ca46180ecdf1141207 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
6 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9 *
10 * For licensing information, see the file 'LICENCE' in this directory.
11 *
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +010012 * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/mtd/mtd.h>
19#include <linux/crc32.h>
20#include <linux/mtd/nand.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080021#include <linux/jiffies.h>
Al Viro914e2632006-10-18 13:55:46 -040022#include <linux/sched.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080023
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "nodelist.h"
25
26/* For testing write failures */
27#undef BREAKME
28#undef BREAKMEHEADER
29
30#ifdef BREAKME
31static unsigned char *brokenbuf;
32#endif
33
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +010034#define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
35#define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037/* max. erase failures before we mark a block bad */
38#define MAX_ERASE_FAILURES 2
39
Linus Torvalds1da177e2005-04-16 15:20:36 -070040struct jffs2_inodirty {
41 uint32_t ino;
42 struct jffs2_inodirty *next;
43};
44
45static struct jffs2_inodirty inodirty_nomem;
46
47static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
48{
49 struct jffs2_inodirty *this = c->wbuf_inodes;
50
51 /* If a malloc failed, consider _everything_ dirty */
52 if (this == &inodirty_nomem)
53 return 1;
54
55 /* If ino == 0, _any_ non-GC writes mean 'yes' */
56 if (this && !ino)
57 return 1;
58
59 /* Look to see if the inode in question is pending in the wbuf */
60 while (this) {
61 if (this->ino == ino)
62 return 1;
63 this = this->next;
64 }
65 return 0;
66}
67
68static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
69{
70 struct jffs2_inodirty *this;
71
72 this = c->wbuf_inodes;
73
74 if (this != &inodirty_nomem) {
75 while (this) {
76 struct jffs2_inodirty *next = this->next;
77 kfree(this);
78 this = next;
79 }
80 }
81 c->wbuf_inodes = NULL;
82}
83
84static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
85{
86 struct jffs2_inodirty *new;
87
88 /* Mark the superblock dirty so that kupdated will flush... */
Artem B. Bityuckiy4d952702005-03-18 09:58:09 +000089 jffs2_erase_pending_trigger(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91 if (jffs2_wbuf_pending_for_ino(c, ino))
92 return;
93
94 new = kmalloc(sizeof(*new), GFP_KERNEL);
95 if (!new) {
96 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
97 jffs2_clear_wbuf_ino_list(c);
98 c->wbuf_inodes = &inodirty_nomem;
99 return;
100 }
101 new->ino = ino;
102 new->next = c->wbuf_inodes;
103 c->wbuf_inodes = new;
104 return;
105}
106
107static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
108{
109 struct list_head *this, *next;
110 static int n;
111
112 if (list_empty(&c->erasable_pending_wbuf_list))
113 return;
114
115 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
116 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
117
118 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
119 list_del(this);
120 if ((jiffies + (n++)) & 127) {
121 /* Most of the time, we just erase it immediately. Otherwise we
122 spend ages scanning it on mount, etc. */
123 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
124 list_add_tail(&jeb->list, &c->erase_pending_list);
125 c->nr_erasing_blocks++;
126 jffs2_erase_pending_trigger(c);
127 } else {
128 /* Sometimes, however, we leave it elsewhere so it doesn't get
129 immediately reused, and we spread the load a bit. */
130 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
131 list_add_tail(&jeb->list, &c->erasable_list);
132 }
133 }
134}
135
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000136#define REFILE_NOTEMPTY 0
137#define REFILE_ANYWAY 1
138
139static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140{
141 D1(printk("About to refile bad block at %08x\n", jeb->offset));
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 /* File the existing block on the bad_used_list.... */
144 if (c->nextblock == jeb)
145 c->nextblock = NULL;
146 else /* Not sure this should ever happen... need more coffee */
147 list_del(&jeb->list);
148 if (jeb->first_node) {
149 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
150 list_add(&jeb->list, &c->bad_used_list);
151 } else {
Estelle Hammache9b88f472005-01-28 18:53:05 +0000152 BUG_ON(allow_empty == REFILE_NOTEMPTY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 /* It has to have had some nodes or we couldn't be here */
154 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
155 list_add(&jeb->list, &c->erase_pending_list);
156 c->nr_erasing_blocks++;
157 jffs2_erase_pending_trigger(c);
158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
David Woodhouse9bfeb692006-05-26 21:19:05 +0100160 if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
161 uint32_t oldfree = jeb->free_size;
162
163 jffs2_link_node_ref(c, jeb,
164 (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
165 oldfree, NULL);
166 /* convert to wasted */
167 c->wasted_size += oldfree;
168 jeb->wasted_size += oldfree;
169 c->dirty_size -= oldfree;
170 jeb->dirty_size -= oldfree;
171 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100173 jffs2_dbg_dump_block_lists_nolock(c);
174 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
175 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176}
177
David Woodhouse9bfeb692006-05-26 21:19:05 +0100178static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
179 struct jffs2_inode_info *f,
180 struct jffs2_raw_node_ref *raw,
181 union jffs2_node_union *node)
182{
183 struct jffs2_node_frag *frag;
184 struct jffs2_full_dirent *fd;
185
186 dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
187 node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
188
189 BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
190 je16_to_cpu(node->u.magic) != 0);
191
192 switch (je16_to_cpu(node->u.nodetype)) {
193 case JFFS2_NODETYPE_INODE:
David Woodhouseddc58bd2006-05-27 13:15:16 +0100194 if (f->metadata && f->metadata->raw == raw) {
195 dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
196 return &f->metadata->raw;
197 }
David Woodhouse9bfeb692006-05-26 21:19:05 +0100198 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
199 BUG_ON(!frag);
200 /* Find a frag which refers to the full_dnode we want to modify */
201 while (!frag->node || frag->node->raw != raw) {
202 frag = frag_next(frag);
203 BUG_ON(!frag);
204 }
205 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
206 return &frag->node->raw;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100207
208 case JFFS2_NODETYPE_DIRENT:
209 for (fd = f->dents; fd; fd = fd->next) {
210 if (fd->raw == raw) {
211 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
212 return &fd->raw;
213 }
214 }
215 BUG();
David Woodhouseddc58bd2006-05-27 13:15:16 +0100216
David Woodhouse9bfeb692006-05-26 21:19:05 +0100217 default:
218 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
219 je16_to_cpu(node->u.nodetype));
220 break;
221 }
222 return NULL;
223}
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225/* Recover from failure to write wbuf. Recover the nodes up to the
226 * wbuf, not the one which we were starting to try to write. */
227
228static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
229{
230 struct jffs2_eraseblock *jeb, *new_jeb;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100231 struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 size_t retlen;
233 int ret;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100234 int nr_refile = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 unsigned char *buf;
236 uint32_t start, end, ofs, len;
237
David Woodhouse046b8b92006-05-25 01:50:35 +0100238 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 spin_lock(&c->erase_completion_lock);
Vitaly Wool180bfb32007-03-06 17:01:04 +0300241 if (c->wbuf_ofs % c->mtd->erasesize)
242 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
243 else
244 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100245 spin_unlock(&c->erase_completion_lock);
246
247 BUG_ON(!ref_obsolete(jeb->last_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
249 /* Find the first node to be recovered, by skipping over every
250 node which ends before the wbuf starts, or which is obsolete. */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100251 for (next = raw = jeb->first_node; next; raw = next) {
252 next = ref_next(raw);
253
254 if (ref_obsolete(raw) ||
255 (next && ref_offset(next) <= c->wbuf_ofs)) {
256 dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
257 ref_offset(raw), ref_flags(raw),
258 (ref_offset(raw) + ref_totlen(c, jeb, raw)),
259 c->wbuf_ofs);
260 continue;
261 }
262 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
263 ref_offset(raw), ref_flags(raw),
264 (ref_offset(raw) + ref_totlen(c, jeb, raw)));
265
266 first_raw = raw;
267 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 }
269
David Woodhouse9bfeb692006-05-26 21:19:05 +0100270 if (!first_raw) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 /* All nodes were obsolete. Nothing to recover. */
272 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
David Woodhouse9bfeb692006-05-26 21:19:05 +0100273 c->wbuf_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 return;
275 }
276
David Woodhouse9bfeb692006-05-26 21:19:05 +0100277 start = ref_offset(first_raw);
278 end = ref_offset(jeb->last_node);
279 nr_refile = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
David Woodhouse9bfeb692006-05-26 21:19:05 +0100281 /* Count the number of refs which need to be copied */
282 while ((raw = ref_next(raw)) != jeb->last_node)
283 nr_refile++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
David Woodhouse9bfeb692006-05-26 21:19:05 +0100285 dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
286 start, end, end - start, nr_refile);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
288 buf = NULL;
289 if (start < c->wbuf_ofs) {
290 /* First affected node was already partially written.
291 * Attempt to reread the old data into our buffer. */
292
293 buf = kmalloc(end - start, GFP_KERNEL);
294 if (!buf) {
295 printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
296
297 goto read_failed;
298 }
299
300 /* Do the read... */
Thomas Gleixner9223a452006-05-23 17:21:03 +0200301 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000302
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200303 /* ECC recovered ? */
304 if ((ret == -EUCLEAN || ret == -EBADMSG) &&
305 (retlen == c->wbuf_ofs - start))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 ret = 0;
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 if (ret || retlen != c->wbuf_ofs - start) {
309 printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
310
311 kfree(buf);
312 buf = NULL;
313 read_failed:
David Woodhouse9bfeb692006-05-26 21:19:05 +0100314 first_raw = ref_next(first_raw);
315 nr_refile--;
316 while (first_raw && ref_obsolete(first_raw)) {
317 first_raw = ref_next(first_raw);
318 nr_refile--;
319 }
320
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 /* If this was the only node to be recovered, give up */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100322 if (!first_raw) {
323 c->wbuf_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 return;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100325 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
327 /* It wasn't. Go on and try to recover nodes complete in the wbuf */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100328 start = ref_offset(first_raw);
329 dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
330 start, end, end - start, nr_refile);
331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 } else {
333 /* Read succeeded. Copy the remaining data from the wbuf */
334 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
335 }
336 }
337 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
338 Either 'buf' contains the data, or we find it in the wbuf */
339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 /* ... and get an allocation of space from a shiny new block instead */
David Woodhouse9fe48542006-05-23 00:38:06 +0100341 ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 if (ret) {
343 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
Estelle Hammache9b88f472005-01-28 18:53:05 +0000344 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 return;
346 }
David Woodhouse9bfeb692006-05-26 21:19:05 +0100347
348 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
349 if (ret) {
350 printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
351 kfree(buf);
352 return;
353 }
354
David Woodhouse9fe48542006-05-23 00:38:06 +0100355 ofs = write_ofs(c);
356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 if (end-start >= c->wbuf_pagesize) {
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000358 /* Need to do another write immediately, but it's possible
Estelle Hammache9b88f472005-01-28 18:53:05 +0000359 that this is just because the wbuf itself is completely
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000360 full, and there's nothing earlier read back from the
361 flash. Hence 'buf' isn't necessarily what we're writing
Estelle Hammache9b88f472005-01-28 18:53:05 +0000362 from. */
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000363 unsigned char *rewrite_buf = buf?:c->wbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
365
366 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
367 towrite, ofs));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369#ifdef BREAKMEHEADER
370 static int breakme;
371 if (breakme++ == 20) {
372 printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
373 breakme = 0;
Thomas Gleixner9223a452006-05-23 17:21:03 +0200374 c->mtd->write(c->mtd, ofs, towrite, &retlen,
375 brokenbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 ret = -EIO;
377 } else
378#endif
Thomas Gleixner9223a452006-05-23 17:21:03 +0200379 ret = c->mtd->write(c->mtd, ofs, towrite, &retlen,
380 rewrite_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
382 if (ret || retlen != towrite) {
383 /* Argh. We tried. Really we did. */
384 printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
Estelle Hammache9b88f472005-01-28 18:53:05 +0000385 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386
David Woodhouse2f785402006-05-24 02:04:45 +0100387 if (retlen)
David Woodhouse9bfeb692006-05-26 21:19:05 +0100388 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 return;
391 }
392 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
393
394 c->wbuf_len = (end - start) - towrite;
395 c->wbuf_ofs = ofs + towrite;
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000396 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 } else {
399 /* OK, now we're left with the dregs in whichever buffer we're using */
400 if (buf) {
401 memcpy(c->wbuf, buf, end-start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 } else {
403 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
404 }
405 c->wbuf_ofs = ofs;
406 c->wbuf_len = end - start;
407 }
408
409 /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
410 new_jeb = &c->blocks[ofs / c->sector_size];
411
412 spin_lock(&c->erase_completion_lock);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100413 for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
414 uint32_t rawlen = ref_totlen(c, jeb, raw);
415 struct jffs2_inode_cache *ic;
416 struct jffs2_raw_node_ref *new_ref;
417 struct jffs2_raw_node_ref **adjust_ref = NULL;
418 struct jffs2_inode_info *f = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
420 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
David Woodhouse9bfeb692006-05-26 21:19:05 +0100421 rawlen, ref_offset(raw), ref_flags(raw), ofs));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
David Woodhouse9bfeb692006-05-26 21:19:05 +0100423 ic = jffs2_raw_ref_to_ic(raw);
424
425 /* Ick. This XATTR mess should be fixed shortly... */
426 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
427 struct jffs2_xattr_datum *xd = (void *)ic;
428 BUG_ON(xd->node != raw);
429 adjust_ref = &xd->node;
430 raw->next_in_ino = NULL;
431 ic = NULL;
432 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
433 struct jffs2_xattr_datum *xr = (void *)ic;
434 BUG_ON(xr->node != raw);
435 adjust_ref = &xr->node;
436 raw->next_in_ino = NULL;
437 ic = NULL;
438 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
439 struct jffs2_raw_node_ref **p = &ic->nodes;
440
441 /* Remove the old node from the per-inode list */
442 while (*p && *p != (void *)ic) {
443 if (*p == raw) {
444 (*p) = (raw->next_in_ino);
445 raw->next_in_ino = NULL;
446 break;
447 }
448 p = &((*p)->next_in_ino);
449 }
450
451 if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
452 /* If it's an in-core inode, then we have to adjust any
453 full_dirent or full_dnode structure to point to the
454 new version instead of the old */
455 f = jffs2_gc_fetch_inode(c, ic->ino, ic->nlink);
456 if (IS_ERR(f)) {
457 /* Should never happen; it _must_ be present */
458 JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
459 ic->ino, PTR_ERR(f));
460 BUG();
461 }
462 /* We don't lock f->sem. There's a number of ways we could
463 end up in here with it already being locked, and nobody's
464 going to modify it on us anyway because we hold the
465 alloc_sem. We're only changing one ->raw pointer too,
466 which we can get away with without upsetting readers. */
467 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
468 (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
469 } else if (unlikely(ic->state != INO_STATE_PRESENT &&
470 ic->state != INO_STATE_CHECKEDABSENT &&
471 ic->state != INO_STATE_GC)) {
472 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
473 BUG();
474 }
475 }
476
477 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
478
479 if (adjust_ref) {
480 BUG_ON(*adjust_ref != raw);
481 *adjust_ref = new_ref;
482 }
483 if (f)
484 jffs2_gc_release_inode(c, f);
485
486 if (!ref_obsolete(raw)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 jeb->dirty_size += rawlen;
488 jeb->used_size -= rawlen;
489 c->dirty_size += rawlen;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100490 c->used_size -= rawlen;
491 raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
492 BUG_ON(raw->next_in_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 ofs += rawlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 }
496
David Woodhouse9bfeb692006-05-26 21:19:05 +0100497 kfree(buf);
498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 /* Fix up the original jeb now it's on the bad_list */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100500 if (first_raw == jeb->first_node) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
Akinobu Mitaf1166292006-06-26 00:24:46 -0700502 list_move(&jeb->list, &c->erase_pending_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 c->nr_erasing_blocks++;
504 jffs2_erase_pending_trigger(c);
505 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100507 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100508 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100510 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100511 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
513 spin_unlock(&c->erase_completion_lock);
514
David Woodhouse9bfeb692006-05-26 21:19:05 +0100515 D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len));
516
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517}
518
519/* Meaning of pad argument:
520 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
521 1: Pad, do not adjust nextblock free_size
522 2: Pad, adjust nextblock free_size
523*/
524#define NOPAD 0
525#define PAD_NOACCOUNT 1
526#define PAD_ACCOUNTING 2
527
528static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
529{
David Woodhouse9bfeb692006-05-26 21:19:05 +0100530 struct jffs2_eraseblock *wbuf_jeb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 int ret;
532 size_t retlen;
533
Andrew Victor3be36672005-02-09 09:09:05 +0000534 /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 del_timer() the timer we never initialised. */
Andrew Victor3be36672005-02-09 09:09:05 +0000536 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 return 0;
538
539 if (!down_trylock(&c->alloc_sem)) {
540 up(&c->alloc_sem);
541 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
542 BUG();
543 }
544
Andrew Victor3be36672005-02-09 09:09:05 +0000545 if (!c->wbuf_len) /* already checked c->wbuf above */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 return 0;
547
David Woodhouse9bfeb692006-05-26 21:19:05 +0100548 wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
549 if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
David Woodhouse2f785402006-05-24 02:04:45 +0100550 return -ENOMEM;
551
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 /* claim remaining space on the page
553 this happens, if we have a change to a new block,
554 or if fsync forces us to flush the writebuffer.
555 if we have a switch to next page, we will not have
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000556 enough remaining space for this.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 */
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +0100558 if (pad ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 c->wbuf_len = PAD(c->wbuf_len);
560
561 /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
562 with 8 byte page size */
563 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000564
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
566 struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
567 padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
568 padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
569 padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
570 padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
571 }
572 }
573 /* else jffs2_flash_writev has actually filled in the rest of the
574 buffer for us, and will deal with the node refs etc. later. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576#ifdef BREAKME
577 static int breakme;
578 if (breakme++ == 20) {
579 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
580 breakme = 0;
Thomas Gleixner9223a452006-05-23 17:21:03 +0200581 c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
582 brokenbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 ret = -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000584 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585#endif
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000586
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
588
589 if (ret || retlen != c->wbuf_pagesize) {
590 if (ret)
591 printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
592 else {
593 printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
594 retlen, c->wbuf_pagesize);
595 ret = -EIO;
596 }
597
598 jffs2_wbuf_recover(c);
599
600 return ret;
601 }
602
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 /* Adjust free size of the block if we padded. */
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +0100604 if (pad) {
David Woodhouse0bcc0992006-05-21 13:00:54 +0100605 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
David Woodhouse9bfeb692006-05-26 21:19:05 +0100608 (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000610 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 padded. If there is less free space in the block than that,
612 something screwed up */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100613 if (wbuf_jeb->free_size < waste) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
David Woodhouse0bcc0992006-05-21 13:00:54 +0100615 c->wbuf_ofs, c->wbuf_len, waste);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
David Woodhouse9bfeb692006-05-26 21:19:05 +0100617 wbuf_jeb->offset, wbuf_jeb->free_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 BUG();
619 }
David Woodhouse0bcc0992006-05-21 13:00:54 +0100620
621 spin_lock(&c->erase_completion_lock);
622
David Woodhouse9bfeb692006-05-26 21:19:05 +0100623 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
David Woodhouse0bcc0992006-05-21 13:00:54 +0100624 /* FIXME: that made it count as dirty. Convert to wasted */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100625 wbuf_jeb->dirty_size -= waste;
David Woodhouse0bcc0992006-05-21 13:00:54 +0100626 c->dirty_size -= waste;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100627 wbuf_jeb->wasted_size += waste;
David Woodhouse0bcc0992006-05-21 13:00:54 +0100628 c->wasted_size += waste;
629 } else
630 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
632 /* Stick any now-obsoleted blocks on the erase_pending_list */
633 jffs2_refile_wbuf_blocks(c);
634 jffs2_clear_wbuf_ino_list(c);
635 spin_unlock(&c->erase_completion_lock);
636
637 memset(c->wbuf,0xff,c->wbuf_pagesize);
638 /* adjust write buffer offset, else we get a non contiguous write bug */
639 c->wbuf_ofs += c->wbuf_pagesize;
640 c->wbuf_len = 0;
641 return 0;
642}
643
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000644/* Trigger garbage collection to flush the write-buffer.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000646 outstanding. If ino arg non-zero, do it only if a write for the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 given inode is outstanding. */
648int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
649{
650 uint32_t old_wbuf_ofs;
651 uint32_t old_wbuf_len;
652 int ret = 0;
653
654 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
655
David Woodhouse8aee6ac2005-02-02 22:12:08 +0000656 if (!c->wbuf)
657 return 0;
658
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 down(&c->alloc_sem);
660 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
661 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
662 up(&c->alloc_sem);
663 return 0;
664 }
665
666 old_wbuf_ofs = c->wbuf_ofs;
667 old_wbuf_len = c->wbuf_len;
668
669 if (c->unchecked_size) {
670 /* GC won't make any progress for a while */
671 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
672 down_write(&c->wbuf_sem);
673 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000674 /* retry flushing wbuf in case jffs2_wbuf_recover
675 left some data in the wbuf */
676 if (ret)
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000677 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 up_write(&c->wbuf_sem);
679 } else while (old_wbuf_len &&
680 old_wbuf_ofs == c->wbuf_ofs) {
681
682 up(&c->alloc_sem);
683
684 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
685
686 ret = jffs2_garbage_collect_pass(c);
687 if (ret) {
688 /* GC failed. Flush it with padding instead */
689 down(&c->alloc_sem);
690 down_write(&c->wbuf_sem);
691 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000692 /* retry flushing wbuf in case jffs2_wbuf_recover
693 left some data in the wbuf */
694 if (ret)
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000695 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 up_write(&c->wbuf_sem);
697 break;
698 }
699 down(&c->alloc_sem);
700 }
701
702 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
703
704 up(&c->alloc_sem);
705 return ret;
706}
707
708/* Pad write-buffer to end and write it, wasting space. */
709int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
710{
711 int ret;
712
David Woodhouse8aee6ac2005-02-02 22:12:08 +0000713 if (!c->wbuf)
714 return 0;
715
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 down_write(&c->wbuf_sem);
717 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000718 /* retry - maybe wbuf recover left some data in wbuf. */
719 if (ret)
720 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 up_write(&c->wbuf_sem);
722
723 return ret;
724}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200726static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
727 size_t len)
728{
729 if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
730 return 0;
731
732 if (len > (c->wbuf_pagesize - c->wbuf_len))
733 len = c->wbuf_pagesize - c->wbuf_len;
734 memcpy(c->wbuf + c->wbuf_len, buf, len);
735 c->wbuf_len += (uint32_t) len;
736 return len;
737}
738
739int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
740 unsigned long count, loff_t to, size_t *retlen,
741 uint32_t ino)
742{
743 struct jffs2_eraseblock *jeb;
744 size_t wbuf_retlen, donelen = 0;
745 uint32_t outvec_to = to;
746 int ret, invec;
747
748 /* If not writebuffered flash, don't bother */
Andrew Victor3be36672005-02-09 09:09:05 +0000749 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000751
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 down_write(&c->wbuf_sem);
753
754 /* If wbuf_ofs is not initialized, set it to target address */
755 if (c->wbuf_ofs == 0xFFFFFFFF) {
756 c->wbuf_ofs = PAGE_DIV(to);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000757 c->wbuf_len = PAGE_MOD(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 memset(c->wbuf,0xff,c->wbuf_pagesize);
759 }
760
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200761 /*
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200762 * Sanity checks on target address. It's permitted to write
763 * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
764 * write at the beginning of a new erase block. Anything else,
765 * and you die. New block starts at xxx000c (0-b = block
766 * header)
767 */
Andrew Victor3be36672005-02-09 09:09:05 +0000768 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 /* It's a write to a new block */
770 if (c->wbuf_len) {
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200771 D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx "
772 "causes flush of wbuf at 0x%08x\n",
773 (unsigned long)to, c->wbuf_ofs));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200775 if (ret)
776 goto outerr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 }
778 /* set pointer to new block */
779 c->wbuf_ofs = PAGE_DIV(to);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000780 c->wbuf_len = PAGE_MOD(to);
781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
783 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
784 /* We're not writing immediately after the writebuffer. Bad. */
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200785 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write "
786 "to %08lx\n", (unsigned long)to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 if (c->wbuf_len)
788 printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200789 c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 BUG();
791 }
792
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200793 /* adjust alignment offset */
794 if (c->wbuf_len != PAGE_MOD(to)) {
795 c->wbuf_len = PAGE_MOD(to);
796 /* take care of alignment to next page */
797 if (!c->wbuf_len) {
798 c->wbuf_len = c->wbuf_pagesize;
799 ret = __jffs2_flush_wbuf(c, NOPAD);
800 if (ret)
801 goto outerr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 }
803 }
804
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200805 for (invec = 0; invec < count; invec++) {
806 int vlen = invecs[invec].iov_len;
807 uint8_t *v = invecs[invec].iov_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200809 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200811 if (c->wbuf_len == c->wbuf_pagesize) {
812 ret = __jffs2_flush_wbuf(c, NOPAD);
813 if (ret)
814 goto outerr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 }
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200816 vlen -= wbuf_retlen;
817 outvec_to += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 donelen += wbuf_retlen;
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200819 v += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200821 if (vlen >= c->wbuf_pagesize) {
822 ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen),
823 &wbuf_retlen, v);
824 if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
825 goto outfile;
826
827 vlen -= wbuf_retlen;
828 outvec_to += wbuf_retlen;
829 c->wbuf_ofs = outvec_to;
830 donelen += wbuf_retlen;
831 v += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 }
833
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200834 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
835 if (c->wbuf_len == c->wbuf_pagesize) {
836 ret = __jffs2_flush_wbuf(c, NOPAD);
837 if (ret)
838 goto outerr;
839 }
840
841 outvec_to += wbuf_retlen;
842 donelen += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 }
844
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200845 /*
846 * If there's a remainder in the wbuf and it's a non-GC write,
847 * remember that the wbuf affects this ino
848 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 *retlen = donelen;
850
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100851 if (jffs2_sum_active()) {
852 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
853 if (res)
854 return res;
855 }
856
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 if (c->wbuf_len && ino)
858 jffs2_wbuf_dirties_inode(c, ino);
859
860 ret = 0;
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200861 up_write(&c->wbuf_sem);
862 return ret;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000863
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200864outfile:
865 /*
866 * At this point we have no problem, c->wbuf is empty. However
867 * refile nextblock to avoid writing again to same address.
868 */
869
870 spin_lock(&c->erase_completion_lock);
871
872 jeb = &c->blocks[outvec_to / c->sector_size];
873 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
874
875 spin_unlock(&c->erase_completion_lock);
876
877outerr:
878 *retlen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 up_write(&c->wbuf_sem);
880 return ret;
881}
882
883/*
884 * This is the entry for flash write.
885 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
886*/
David Woodhouse9bfeb692006-05-26 21:19:05 +0100887int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
888 size_t *retlen, const u_char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889{
890 struct kvec vecs[1];
891
Andrew Victor3be36672005-02-09 09:09:05 +0000892 if (!jffs2_is_writebuffered(c))
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100893 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
895 vecs[0].iov_base = (unsigned char *) buf;
896 vecs[0].iov_len = len;
897 return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
898}
899
900/*
901 Handle readback from writebuffer and ECC failure return
902*/
903int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
904{
905 loff_t orbf = 0, owbf = 0, lwbf = 0;
906 int ret;
907
Andrew Victor3be36672005-02-09 09:09:05 +0000908 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 return c->mtd->read(c->mtd, ofs, len, retlen, buf);
910
Andrew Victor3be36672005-02-09 09:09:05 +0000911 /* Read flash */
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100912 down_read(&c->wbuf_sem);
Thomas Gleixner9223a452006-05-23 17:21:03 +0200913 ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
Andrew Victor3be36672005-02-09 09:09:05 +0000914
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200915 if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
916 if (ret == -EBADMSG)
917 printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx)"
918 " returned ECC error\n", len, ofs);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000919 /*
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200920 * We have the raw data without ECC correction in the buffer,
921 * maybe we are lucky and all data or parts are correct. We
922 * check the node. If data are corrupted node check will sort
923 * it out. We keep this block, it will fail on write or erase
924 * and the we mark it bad. Or should we do that now? But we
925 * should give him a chance. Maybe we had a system crash or
926 * power loss before the ecc write or a erase was completed.
Andrew Victor3be36672005-02-09 09:09:05 +0000927 * So we return success. :)
928 */
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200929 ret = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000930 }
Andrew Victor3be36672005-02-09 09:09:05 +0000931
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 /* if no writebuffer available or write buffer empty, return */
933 if (!c->wbuf_pagesize || !c->wbuf_len)
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100934 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
936 /* if we read in a different block, return */
Andrew Victor3be36672005-02-09 09:09:05 +0000937 if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100938 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939
940 if (ofs >= c->wbuf_ofs) {
941 owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
942 if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
943 goto exit;
944 lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000945 if (lwbf > len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 lwbf = len;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000947 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
949 if (orbf > len) /* is write beyond write buffer ? */
950 goto exit;
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200951 lwbf = len - orbf; /* number of bytes to copy */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000952 if (lwbf > c->wbuf_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 lwbf = c->wbuf_len;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000954 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 if (lwbf > 0)
956 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
957
958exit:
959 up_read(&c->wbuf_sem);
960 return ret;
961}
962
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +0200963#define NR_OOB_SCAN_PAGES 4
964
965/* For historical reasons we use only 12 bytes for OOB clean marker */
966#define OOB_CM_SIZE 12
967
968static const struct jffs2_unknown_node oob_cleanmarker =
969{
970 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
971 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
972 .totlen = cpu_to_je32(8)
973};
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200974
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975/*
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +0200976 * Check, if the out of band area is empty. This function knows about the clean
977 * marker and if it is present in OOB, treats the OOB as empty anyway.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 */
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200979int jffs2_check_oob_empty(struct jffs2_sb_info *c,
980 struct jffs2_eraseblock *jeb, int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981{
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +0200982 int i, ret;
983 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200984 struct mtd_oob_ops ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +0200986 ops.mode = MTD_OOB_AUTO;
987 ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200988 ops.oobbuf = c->oobbuf;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +0200989 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200990 ops.datbuf = NULL;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200991
992 ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +0200993 if (ret || ops.oobretlen != ops.ooblen) {
Andrew Morton7be26bf2007-02-17 16:02:10 -0800994 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
995 " bytes, read %zd bytes, error %d\n",
996 jeb->offset, ops.ooblen, ops.oobretlen, ret);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +0200997 if (!ret)
998 ret = -EIO;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200999 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001001
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001002 for(i = 0; i < ops.ooblen; i++) {
1003 if (mode && i < cmlen)
1004 /* Yeah, we know about the cleanmarker */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 continue;
1006
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001007 if (ops.oobbuf[i] != 0xFF) {
1008 D2(printk(KERN_DEBUG "Found %02x at %x in OOB for "
1009 "%08x\n", ops.oobbuf[i], i, jeb->offset));
1010 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 }
1012 }
1013
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001014 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015}
1016
1017/*
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001018 * Check for a valid cleanmarker.
1019 * Returns: 0 if a valid cleanmarker was found
1020 * 1 if no cleanmarker was found
1021 * negative error code if an error occurred
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001022 */
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001023int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
1024 struct jffs2_eraseblock *jeb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025{
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001026 struct mtd_oob_ops ops;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001027 int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001029 ops.mode = MTD_OOB_AUTO;
1030 ops.ooblen = cmlen;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001031 ops.oobbuf = c->oobbuf;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001032 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001033 ops.datbuf = NULL;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001034
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001035 ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
1036 if (ret || ops.oobretlen != ops.ooblen) {
Andrew Morton7be26bf2007-02-17 16:02:10 -08001037 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
1038 " bytes, read %zd bytes, error %d\n",
1039 jeb->offset, ops.ooblen, ops.oobretlen, ret);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001040 if (!ret)
1041 ret = -EIO;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001042 return ret;
1043 }
1044
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001045 return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046}
1047
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001048int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1049 struct jffs2_eraseblock *jeb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050{
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001051 int ret;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001052 struct mtd_oob_ops ops;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001053 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001055 ops.mode = MTD_OOB_AUTO;
1056 ops.ooblen = cmlen;
1057 ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1058 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001059 ops.datbuf = NULL;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001060
1061 ret = c->mtd->write_oob(c->mtd, jeb->offset, &ops);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001062 if (ret || ops.oobretlen != ops.ooblen) {
Andrew Morton7be26bf2007-02-17 16:02:10 -08001063 printk(KERN_ERR "cannot write OOB for EB at %08x, requested %zd"
1064 " bytes, read %zd bytes, error %d\n",
1065 jeb->offset, ops.ooblen, ops.oobretlen, ret);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001066 if (!ret)
1067 ret = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 return ret;
1069 }
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001070
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 return 0;
1072}
1073
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001074/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 * On NAND we try to mark this block bad. If the block was erased more
1076 * than MAX_ERASE_FAILURES we mark it finaly bad.
1077 * Don't care about failures. This block remains on the erase-pending
1078 * or badblock list as long as nobody manipulates the flash with
1079 * a bootloader or something like that.
1080 */
1081
1082int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1083{
1084 int ret;
1085
1086 /* if the count is < max, we try to write the counter to the 2nd page oob area */
1087 if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1088 return 0;
1089
1090 if (!c->mtd->block_markbad)
1091 return 1; // What else can we do?
1092
1093 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
1094 ret = c->mtd->block_markbad(c->mtd, bad_offset);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001095
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 if (ret) {
1097 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1098 return ret;
1099 }
1100 return 1;
1101}
1102
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001103int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104{
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001105 struct nand_ecclayout *oinfo = c->mtd->ecclayout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 if (!c->mtd->oobsize)
1108 return 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001109
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 /* Cleanmarker is out-of-band, so inline size zero */
1111 c->cleanmarker_size = 0;
1112
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001113 if (!oinfo || oinfo->oobavail == 0) {
1114 printk(KERN_ERR "inconsistent device description\n");
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001115 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 }
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001117
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001118 D1(printk(KERN_DEBUG "JFFS2 using OOB on NAND\n"));
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001119
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001120 c->oobavail = oinfo->oobavail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
1122 /* Initialise write buffer */
1123 init_rwsem(&c->wbuf_sem);
Joern Engel28318772006-05-22 23:18:05 +02001124 c->wbuf_pagesize = c->mtd->writesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 c->wbuf_ofs = 0xFFFFFFFF;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001126
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1128 if (!c->wbuf)
1129 return -ENOMEM;
1130
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001131 c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL);
1132 if (!c->oobbuf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 kfree(c->wbuf);
1134 return -ENOMEM;
1135 }
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001136
1137 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138}
1139
1140void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1141{
1142 kfree(c->wbuf);
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001143 kfree(c->oobbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144}
1145
Andrew Victor8f15fd52005-02-09 09:17:45 +00001146int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1147 c->cleanmarker_size = 0; /* No cleanmarkers needed */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001148
Andrew Victor8f15fd52005-02-09 09:17:45 +00001149 /* Initialize write buffer */
1150 init_rwsem(&c->wbuf_sem);
Andrew Victor8f15fd52005-02-09 09:17:45 +00001151
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001152
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001153 c->wbuf_pagesize = c->mtd->erasesize;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001154
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001155 /* Find a suitable c->sector_size
1156 * - Not too much sectors
1157 * - Sectors have to be at least 4 K + some bytes
1158 * - All known dataflashes have erase sizes of 528 or 1056
1159 * - we take at least 8 eraseblocks and want to have at least 8K size
1160 * - The concatenation should be a power of 2
1161 */
Andrew Victor8f15fd52005-02-09 09:17:45 +00001162
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001163 c->sector_size = 8 * c->mtd->erasesize;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001164
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001165 while (c->sector_size < 8192) {
1166 c->sector_size *= 2;
1167 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001168
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001169 /* It may be necessary to adjust the flash size */
1170 c->flash_size = c->mtd->size;
1171
1172 if ((c->flash_size % c->sector_size) != 0) {
1173 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1174 printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1175 };
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001176
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001177 c->wbuf_ofs = 0xFFFFFFFF;
Andrew Victor8f15fd52005-02-09 09:17:45 +00001178 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1179 if (!c->wbuf)
1180 return -ENOMEM;
1181
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001182 printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
Andrew Victor8f15fd52005-02-09 09:17:45 +00001183
1184 return 0;
1185}
1186
1187void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1188 kfree(c->wbuf);
1189}
Andrew Victor8f15fd52005-02-09 09:17:45 +00001190
Nicolas Pitre59da7212005-08-06 05:51:33 +01001191int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
Joern Engelc8b229d2006-05-22 23:18:12 +02001192 /* Cleanmarker currently occupies whole programming regions,
1193 * either one or 2 for 8Byte STMicro flashes. */
1194 c->cleanmarker_size = max(16u, c->mtd->writesize);
Nicolas Pitre59da7212005-08-06 05:51:33 +01001195
1196 /* Initialize write buffer */
1197 init_rwsem(&c->wbuf_sem);
Joern Engel28318772006-05-22 23:18:05 +02001198 c->wbuf_pagesize = c->mtd->writesize;
Nicolas Pitre59da7212005-08-06 05:51:33 +01001199 c->wbuf_ofs = 0xFFFFFFFF;
1200
1201 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1202 if (!c->wbuf)
1203 return -ENOMEM;
1204
1205 return 0;
1206}
1207
1208void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1209 kfree(c->wbuf);
1210}