blob: f87f11af708633b0e29cdce5b284317fe94822b6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
6 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9 *
10 * For licensing information, see the file 'LICENCE' in this directory.
11 *
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +010012 * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/mtd/mtd.h>
19#include <linux/crc32.h>
20#include <linux/mtd/nand.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080021#include <linux/jiffies.h>
Al Viro914e2632006-10-18 13:55:46 -040022#include <linux/sched.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080023
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "nodelist.h"
25
26/* For testing write failures */
27#undef BREAKME
28#undef BREAKMEHEADER
29
30#ifdef BREAKME
31static unsigned char *brokenbuf;
32#endif
33
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +010034#define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
35#define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037/* max. erase failures before we mark a block bad */
38#define MAX_ERASE_FAILURES 2
39
Linus Torvalds1da177e2005-04-16 15:20:36 -070040struct jffs2_inodirty {
41 uint32_t ino;
42 struct jffs2_inodirty *next;
43};
44
45static struct jffs2_inodirty inodirty_nomem;
46
47static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
48{
49 struct jffs2_inodirty *this = c->wbuf_inodes;
50
51 /* If a malloc failed, consider _everything_ dirty */
52 if (this == &inodirty_nomem)
53 return 1;
54
55 /* If ino == 0, _any_ non-GC writes mean 'yes' */
56 if (this && !ino)
57 return 1;
58
59 /* Look to see if the inode in question is pending in the wbuf */
60 while (this) {
61 if (this->ino == ino)
62 return 1;
63 this = this->next;
64 }
65 return 0;
66}
67
68static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
69{
70 struct jffs2_inodirty *this;
71
72 this = c->wbuf_inodes;
73
74 if (this != &inodirty_nomem) {
75 while (this) {
76 struct jffs2_inodirty *next = this->next;
77 kfree(this);
78 this = next;
79 }
80 }
81 c->wbuf_inodes = NULL;
82}
83
84static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
85{
86 struct jffs2_inodirty *new;
87
88 /* Mark the superblock dirty so that kupdated will flush... */
Artem B. Bityuckiy4d952702005-03-18 09:58:09 +000089 jffs2_erase_pending_trigger(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91 if (jffs2_wbuf_pending_for_ino(c, ino))
92 return;
93
94 new = kmalloc(sizeof(*new), GFP_KERNEL);
95 if (!new) {
96 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
97 jffs2_clear_wbuf_ino_list(c);
98 c->wbuf_inodes = &inodirty_nomem;
99 return;
100 }
101 new->ino = ino;
102 new->next = c->wbuf_inodes;
103 c->wbuf_inodes = new;
104 return;
105}
106
107static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
108{
109 struct list_head *this, *next;
110 static int n;
111
112 if (list_empty(&c->erasable_pending_wbuf_list))
113 return;
114
115 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
116 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
117
118 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
119 list_del(this);
120 if ((jiffies + (n++)) & 127) {
121 /* Most of the time, we just erase it immediately. Otherwise we
122 spend ages scanning it on mount, etc. */
123 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
124 list_add_tail(&jeb->list, &c->erase_pending_list);
125 c->nr_erasing_blocks++;
126 jffs2_erase_pending_trigger(c);
127 } else {
128 /* Sometimes, however, we leave it elsewhere so it doesn't get
129 immediately reused, and we spread the load a bit. */
130 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
131 list_add_tail(&jeb->list, &c->erasable_list);
132 }
133 }
134}
135
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000136#define REFILE_NOTEMPTY 0
137#define REFILE_ANYWAY 1
138
139static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140{
141 D1(printk("About to refile bad block at %08x\n", jeb->offset));
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 /* File the existing block on the bad_used_list.... */
144 if (c->nextblock == jeb)
145 c->nextblock = NULL;
146 else /* Not sure this should ever happen... need more coffee */
147 list_del(&jeb->list);
148 if (jeb->first_node) {
149 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
150 list_add(&jeb->list, &c->bad_used_list);
151 } else {
Estelle Hammache9b88f472005-01-28 18:53:05 +0000152 BUG_ON(allow_empty == REFILE_NOTEMPTY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 /* It has to have had some nodes or we couldn't be here */
154 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
155 list_add(&jeb->list, &c->erase_pending_list);
156 c->nr_erasing_blocks++;
157 jffs2_erase_pending_trigger(c);
158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
David Woodhouse9bfeb692006-05-26 21:19:05 +0100160 if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
161 uint32_t oldfree = jeb->free_size;
162
163 jffs2_link_node_ref(c, jeb,
164 (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
165 oldfree, NULL);
166 /* convert to wasted */
167 c->wasted_size += oldfree;
168 jeb->wasted_size += oldfree;
169 c->dirty_size -= oldfree;
170 jeb->dirty_size -= oldfree;
171 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100173 jffs2_dbg_dump_block_lists_nolock(c);
174 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
175 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176}
177
David Woodhouse9bfeb692006-05-26 21:19:05 +0100178static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
179 struct jffs2_inode_info *f,
180 struct jffs2_raw_node_ref *raw,
181 union jffs2_node_union *node)
182{
183 struct jffs2_node_frag *frag;
184 struct jffs2_full_dirent *fd;
185
186 dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
187 node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
188
189 BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
190 je16_to_cpu(node->u.magic) != 0);
191
192 switch (je16_to_cpu(node->u.nodetype)) {
193 case JFFS2_NODETYPE_INODE:
David Woodhouseddc58bd2006-05-27 13:15:16 +0100194 if (f->metadata && f->metadata->raw == raw) {
195 dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
196 return &f->metadata->raw;
197 }
David Woodhouse9bfeb692006-05-26 21:19:05 +0100198 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
199 BUG_ON(!frag);
200 /* Find a frag which refers to the full_dnode we want to modify */
201 while (!frag->node || frag->node->raw != raw) {
202 frag = frag_next(frag);
203 BUG_ON(!frag);
204 }
205 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
206 return &frag->node->raw;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100207
208 case JFFS2_NODETYPE_DIRENT:
209 for (fd = f->dents; fd; fd = fd->next) {
210 if (fd->raw == raw) {
211 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
212 return &fd->raw;
213 }
214 }
215 BUG();
David Woodhouseddc58bd2006-05-27 13:15:16 +0100216
David Woodhouse9bfeb692006-05-26 21:19:05 +0100217 default:
218 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
219 je16_to_cpu(node->u.nodetype));
220 break;
221 }
222 return NULL;
223}
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225/* Recover from failure to write wbuf. Recover the nodes up to the
226 * wbuf, not the one which we were starting to try to write. */
227
228static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
229{
230 struct jffs2_eraseblock *jeb, *new_jeb;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100231 struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 size_t retlen;
233 int ret;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100234 int nr_refile = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 unsigned char *buf;
236 uint32_t start, end, ofs, len;
237
David Woodhouse046b8b92006-05-25 01:50:35 +0100238 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 spin_lock(&c->erase_completion_lock);
Vitaly Wool180bfb32007-03-06 17:01:04 +0300241 if (c->wbuf_ofs % c->mtd->erasesize)
242 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
243 else
244 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100245 spin_unlock(&c->erase_completion_lock);
246
247 BUG_ON(!ref_obsolete(jeb->last_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
249 /* Find the first node to be recovered, by skipping over every
250 node which ends before the wbuf starts, or which is obsolete. */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100251 for (next = raw = jeb->first_node; next; raw = next) {
252 next = ref_next(raw);
253
254 if (ref_obsolete(raw) ||
255 (next && ref_offset(next) <= c->wbuf_ofs)) {
256 dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
257 ref_offset(raw), ref_flags(raw),
258 (ref_offset(raw) + ref_totlen(c, jeb, raw)),
259 c->wbuf_ofs);
260 continue;
261 }
262 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
263 ref_offset(raw), ref_flags(raw),
264 (ref_offset(raw) + ref_totlen(c, jeb, raw)));
265
266 first_raw = raw;
267 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 }
269
David Woodhouse9bfeb692006-05-26 21:19:05 +0100270 if (!first_raw) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 /* All nodes were obsolete. Nothing to recover. */
272 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
David Woodhouse9bfeb692006-05-26 21:19:05 +0100273 c->wbuf_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 return;
275 }
276
David Woodhouse9bfeb692006-05-26 21:19:05 +0100277 start = ref_offset(first_raw);
278 end = ref_offset(jeb->last_node);
279 nr_refile = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
David Woodhouse9bfeb692006-05-26 21:19:05 +0100281 /* Count the number of refs which need to be copied */
282 while ((raw = ref_next(raw)) != jeb->last_node)
283 nr_refile++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
David Woodhouse9bfeb692006-05-26 21:19:05 +0100285 dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
286 start, end, end - start, nr_refile);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
288 buf = NULL;
289 if (start < c->wbuf_ofs) {
290 /* First affected node was already partially written.
291 * Attempt to reread the old data into our buffer. */
292
293 buf = kmalloc(end - start, GFP_KERNEL);
294 if (!buf) {
295 printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
296
297 goto read_failed;
298 }
299
300 /* Do the read... */
Thomas Gleixner9223a452006-05-23 17:21:03 +0200301 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000302
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200303 /* ECC recovered ? */
304 if ((ret == -EUCLEAN || ret == -EBADMSG) &&
305 (retlen == c->wbuf_ofs - start))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 ret = 0;
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 if (ret || retlen != c->wbuf_ofs - start) {
309 printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
310
311 kfree(buf);
312 buf = NULL;
313 read_failed:
David Woodhouse9bfeb692006-05-26 21:19:05 +0100314 first_raw = ref_next(first_raw);
315 nr_refile--;
316 while (first_raw && ref_obsolete(first_raw)) {
317 first_raw = ref_next(first_raw);
318 nr_refile--;
319 }
320
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 /* If this was the only node to be recovered, give up */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100322 if (!first_raw) {
323 c->wbuf_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 return;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100325 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
327 /* It wasn't. Go on and try to recover nodes complete in the wbuf */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100328 start = ref_offset(first_raw);
329 dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
330 start, end, end - start, nr_refile);
331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 } else {
333 /* Read succeeded. Copy the remaining data from the wbuf */
334 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
335 }
336 }
337 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
338 Either 'buf' contains the data, or we find it in the wbuf */
339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 /* ... and get an allocation of space from a shiny new block instead */
David Woodhouse9fe48542006-05-23 00:38:06 +0100341 ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 if (ret) {
343 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
Estelle Hammache9b88f472005-01-28 18:53:05 +0000344 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 return;
346 }
David Woodhouse9bfeb692006-05-26 21:19:05 +0100347
Adrian Hunter7f762ab2007-04-04 13:47:53 +0300348 /* The summary is not recovered, so it must be disabled for this erase block */
349 jffs2_sum_disable_collecting(c->summary);
350
David Woodhouse9bfeb692006-05-26 21:19:05 +0100351 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
352 if (ret) {
353 printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
354 kfree(buf);
355 return;
356 }
357
David Woodhouse9fe48542006-05-23 00:38:06 +0100358 ofs = write_ofs(c);
359
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 if (end-start >= c->wbuf_pagesize) {
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000361 /* Need to do another write immediately, but it's possible
Estelle Hammache9b88f472005-01-28 18:53:05 +0000362 that this is just because the wbuf itself is completely
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000363 full, and there's nothing earlier read back from the
364 flash. Hence 'buf' isn't necessarily what we're writing
Estelle Hammache9b88f472005-01-28 18:53:05 +0000365 from. */
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000366 unsigned char *rewrite_buf = buf?:c->wbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
368
369 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
370 towrite, ofs));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000371
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372#ifdef BREAKMEHEADER
373 static int breakme;
374 if (breakme++ == 20) {
375 printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
376 breakme = 0;
Thomas Gleixner9223a452006-05-23 17:21:03 +0200377 c->mtd->write(c->mtd, ofs, towrite, &retlen,
378 brokenbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 ret = -EIO;
380 } else
381#endif
Thomas Gleixner9223a452006-05-23 17:21:03 +0200382 ret = c->mtd->write(c->mtd, ofs, towrite, &retlen,
383 rewrite_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
385 if (ret || retlen != towrite) {
386 /* Argh. We tried. Really we did. */
387 printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
Estelle Hammache9b88f472005-01-28 18:53:05 +0000388 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
David Woodhouse2f785402006-05-24 02:04:45 +0100390 if (retlen)
David Woodhouse9bfeb692006-05-26 21:19:05 +0100391 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 return;
394 }
395 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
396
397 c->wbuf_len = (end - start) - towrite;
398 c->wbuf_ofs = ofs + towrite;
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000399 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 } else {
402 /* OK, now we're left with the dregs in whichever buffer we're using */
403 if (buf) {
404 memcpy(c->wbuf, buf, end-start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 } else {
406 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
407 }
408 c->wbuf_ofs = ofs;
409 c->wbuf_len = end - start;
410 }
411
412 /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
413 new_jeb = &c->blocks[ofs / c->sector_size];
414
415 spin_lock(&c->erase_completion_lock);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100416 for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
417 uint32_t rawlen = ref_totlen(c, jeb, raw);
418 struct jffs2_inode_cache *ic;
419 struct jffs2_raw_node_ref *new_ref;
420 struct jffs2_raw_node_ref **adjust_ref = NULL;
421 struct jffs2_inode_info *f = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
423 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
David Woodhouse9bfeb692006-05-26 21:19:05 +0100424 rawlen, ref_offset(raw), ref_flags(raw), ofs));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
David Woodhouse9bfeb692006-05-26 21:19:05 +0100426 ic = jffs2_raw_ref_to_ic(raw);
427
428 /* Ick. This XATTR mess should be fixed shortly... */
429 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
430 struct jffs2_xattr_datum *xd = (void *)ic;
431 BUG_ON(xd->node != raw);
432 adjust_ref = &xd->node;
433 raw->next_in_ino = NULL;
434 ic = NULL;
435 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
436 struct jffs2_xattr_datum *xr = (void *)ic;
437 BUG_ON(xr->node != raw);
438 adjust_ref = &xr->node;
439 raw->next_in_ino = NULL;
440 ic = NULL;
441 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
442 struct jffs2_raw_node_ref **p = &ic->nodes;
443
444 /* Remove the old node from the per-inode list */
445 while (*p && *p != (void *)ic) {
446 if (*p == raw) {
447 (*p) = (raw->next_in_ino);
448 raw->next_in_ino = NULL;
449 break;
450 }
451 p = &((*p)->next_in_ino);
452 }
453
454 if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
455 /* If it's an in-core inode, then we have to adjust any
456 full_dirent or full_dnode structure to point to the
457 new version instead of the old */
458 f = jffs2_gc_fetch_inode(c, ic->ino, ic->nlink);
459 if (IS_ERR(f)) {
460 /* Should never happen; it _must_ be present */
461 JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
462 ic->ino, PTR_ERR(f));
463 BUG();
464 }
465 /* We don't lock f->sem. There's a number of ways we could
466 end up in here with it already being locked, and nobody's
467 going to modify it on us anyway because we hold the
468 alloc_sem. We're only changing one ->raw pointer too,
469 which we can get away with without upsetting readers. */
470 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
471 (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
472 } else if (unlikely(ic->state != INO_STATE_PRESENT &&
473 ic->state != INO_STATE_CHECKEDABSENT &&
474 ic->state != INO_STATE_GC)) {
475 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
476 BUG();
477 }
478 }
479
480 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
481
482 if (adjust_ref) {
483 BUG_ON(*adjust_ref != raw);
484 *adjust_ref = new_ref;
485 }
486 if (f)
487 jffs2_gc_release_inode(c, f);
488
489 if (!ref_obsolete(raw)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 jeb->dirty_size += rawlen;
491 jeb->used_size -= rawlen;
492 c->dirty_size += rawlen;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100493 c->used_size -= rawlen;
494 raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
495 BUG_ON(raw->next_in_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 ofs += rawlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 }
499
David Woodhouse9bfeb692006-05-26 21:19:05 +0100500 kfree(buf);
501
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 /* Fix up the original jeb now it's on the bad_list */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100503 if (first_raw == jeb->first_node) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
Akinobu Mitaf1166292006-06-26 00:24:46 -0700505 list_move(&jeb->list, &c->erase_pending_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 c->nr_erasing_blocks++;
507 jffs2_erase_pending_trigger(c);
508 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100510 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100511 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100513 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100514 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515
516 spin_unlock(&c->erase_completion_lock);
517
David Woodhouse9bfeb692006-05-26 21:19:05 +0100518 D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len));
519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520}
521
522/* Meaning of pad argument:
523 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
524 1: Pad, do not adjust nextblock free_size
525 2: Pad, adjust nextblock free_size
526*/
527#define NOPAD 0
528#define PAD_NOACCOUNT 1
529#define PAD_ACCOUNTING 2
530
531static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
532{
David Woodhouse9bfeb692006-05-26 21:19:05 +0100533 struct jffs2_eraseblock *wbuf_jeb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 int ret;
535 size_t retlen;
536
Andrew Victor3be36672005-02-09 09:09:05 +0000537 /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 del_timer() the timer we never initialised. */
Andrew Victor3be36672005-02-09 09:09:05 +0000539 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 return 0;
541
542 if (!down_trylock(&c->alloc_sem)) {
543 up(&c->alloc_sem);
544 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
545 BUG();
546 }
547
Andrew Victor3be36672005-02-09 09:09:05 +0000548 if (!c->wbuf_len) /* already checked c->wbuf above */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 return 0;
550
David Woodhouse9bfeb692006-05-26 21:19:05 +0100551 wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
552 if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
David Woodhouse2f785402006-05-24 02:04:45 +0100553 return -ENOMEM;
554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 /* claim remaining space on the page
556 this happens, if we have a change to a new block,
557 or if fsync forces us to flush the writebuffer.
558 if we have a switch to next page, we will not have
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000559 enough remaining space for this.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 */
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +0100561 if (pad ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 c->wbuf_len = PAD(c->wbuf_len);
563
564 /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
565 with 8 byte page size */
566 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000567
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
569 struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
570 padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
571 padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
572 padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
573 padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
574 }
575 }
576 /* else jffs2_flash_writev has actually filled in the rest of the
577 buffer for us, and will deal with the node refs etc. later. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000578
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579#ifdef BREAKME
580 static int breakme;
581 if (breakme++ == 20) {
582 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
583 breakme = 0;
Thomas Gleixner9223a452006-05-23 17:21:03 +0200584 c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
585 brokenbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 ret = -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000587 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588#endif
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000589
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
591
592 if (ret || retlen != c->wbuf_pagesize) {
593 if (ret)
594 printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
595 else {
596 printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
597 retlen, c->wbuf_pagesize);
598 ret = -EIO;
599 }
600
601 jffs2_wbuf_recover(c);
602
603 return ret;
604 }
605
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 /* Adjust free size of the block if we padded. */
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +0100607 if (pad) {
David Woodhouse0bcc0992006-05-21 13:00:54 +0100608 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
David Woodhouse9bfeb692006-05-26 21:19:05 +0100611 (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000613 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 padded. If there is less free space in the block than that,
615 something screwed up */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100616 if (wbuf_jeb->free_size < waste) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
David Woodhouse0bcc0992006-05-21 13:00:54 +0100618 c->wbuf_ofs, c->wbuf_len, waste);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
David Woodhouse9bfeb692006-05-26 21:19:05 +0100620 wbuf_jeb->offset, wbuf_jeb->free_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 BUG();
622 }
David Woodhouse0bcc0992006-05-21 13:00:54 +0100623
624 spin_lock(&c->erase_completion_lock);
625
David Woodhouse9bfeb692006-05-26 21:19:05 +0100626 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
David Woodhouse0bcc0992006-05-21 13:00:54 +0100627 /* FIXME: that made it count as dirty. Convert to wasted */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100628 wbuf_jeb->dirty_size -= waste;
David Woodhouse0bcc0992006-05-21 13:00:54 +0100629 c->dirty_size -= waste;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100630 wbuf_jeb->wasted_size += waste;
David Woodhouse0bcc0992006-05-21 13:00:54 +0100631 c->wasted_size += waste;
632 } else
633 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
635 /* Stick any now-obsoleted blocks on the erase_pending_list */
636 jffs2_refile_wbuf_blocks(c);
637 jffs2_clear_wbuf_ino_list(c);
638 spin_unlock(&c->erase_completion_lock);
639
640 memset(c->wbuf,0xff,c->wbuf_pagesize);
641 /* adjust write buffer offset, else we get a non contiguous write bug */
642 c->wbuf_ofs += c->wbuf_pagesize;
643 c->wbuf_len = 0;
644 return 0;
645}
646
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000647/* Trigger garbage collection to flush the write-buffer.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000649 outstanding. If ino arg non-zero, do it only if a write for the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 given inode is outstanding. */
651int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
652{
653 uint32_t old_wbuf_ofs;
654 uint32_t old_wbuf_len;
655 int ret = 0;
656
657 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
658
David Woodhouse8aee6ac2005-02-02 22:12:08 +0000659 if (!c->wbuf)
660 return 0;
661
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 down(&c->alloc_sem);
663 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
664 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
665 up(&c->alloc_sem);
666 return 0;
667 }
668
669 old_wbuf_ofs = c->wbuf_ofs;
670 old_wbuf_len = c->wbuf_len;
671
672 if (c->unchecked_size) {
673 /* GC won't make any progress for a while */
674 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
675 down_write(&c->wbuf_sem);
676 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000677 /* retry flushing wbuf in case jffs2_wbuf_recover
678 left some data in the wbuf */
679 if (ret)
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000680 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 up_write(&c->wbuf_sem);
682 } else while (old_wbuf_len &&
683 old_wbuf_ofs == c->wbuf_ofs) {
684
685 up(&c->alloc_sem);
686
687 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
688
689 ret = jffs2_garbage_collect_pass(c);
690 if (ret) {
691 /* GC failed. Flush it with padding instead */
692 down(&c->alloc_sem);
693 down_write(&c->wbuf_sem);
694 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000695 /* retry flushing wbuf in case jffs2_wbuf_recover
696 left some data in the wbuf */
697 if (ret)
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000698 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 up_write(&c->wbuf_sem);
700 break;
701 }
702 down(&c->alloc_sem);
703 }
704
705 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
706
707 up(&c->alloc_sem);
708 return ret;
709}
710
711/* Pad write-buffer to end and write it, wasting space. */
712int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
713{
714 int ret;
715
David Woodhouse8aee6ac2005-02-02 22:12:08 +0000716 if (!c->wbuf)
717 return 0;
718
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 down_write(&c->wbuf_sem);
720 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000721 /* retry - maybe wbuf recover left some data in wbuf. */
722 if (ret)
723 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 up_write(&c->wbuf_sem);
725
726 return ret;
727}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200729static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
730 size_t len)
731{
732 if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
733 return 0;
734
735 if (len > (c->wbuf_pagesize - c->wbuf_len))
736 len = c->wbuf_pagesize - c->wbuf_len;
737 memcpy(c->wbuf + c->wbuf_len, buf, len);
738 c->wbuf_len += (uint32_t) len;
739 return len;
740}
741
742int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
743 unsigned long count, loff_t to, size_t *retlen,
744 uint32_t ino)
745{
746 struct jffs2_eraseblock *jeb;
747 size_t wbuf_retlen, donelen = 0;
748 uint32_t outvec_to = to;
749 int ret, invec;
750
751 /* If not writebuffered flash, don't bother */
Andrew Victor3be36672005-02-09 09:09:05 +0000752 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000754
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 down_write(&c->wbuf_sem);
756
757 /* If wbuf_ofs is not initialized, set it to target address */
758 if (c->wbuf_ofs == 0xFFFFFFFF) {
759 c->wbuf_ofs = PAGE_DIV(to);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000760 c->wbuf_len = PAGE_MOD(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 memset(c->wbuf,0xff,c->wbuf_pagesize);
762 }
763
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200764 /*
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200765 * Sanity checks on target address. It's permitted to write
766 * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
767 * write at the beginning of a new erase block. Anything else,
768 * and you die. New block starts at xxx000c (0-b = block
769 * header)
770 */
Andrew Victor3be36672005-02-09 09:09:05 +0000771 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 /* It's a write to a new block */
773 if (c->wbuf_len) {
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200774 D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx "
775 "causes flush of wbuf at 0x%08x\n",
776 (unsigned long)to, c->wbuf_ofs));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200778 if (ret)
779 goto outerr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 }
781 /* set pointer to new block */
782 c->wbuf_ofs = PAGE_DIV(to);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000783 c->wbuf_len = PAGE_MOD(to);
784 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785
786 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
787 /* We're not writing immediately after the writebuffer. Bad. */
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200788 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write "
789 "to %08lx\n", (unsigned long)to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 if (c->wbuf_len)
791 printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200792 c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 BUG();
794 }
795
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200796 /* adjust alignment offset */
797 if (c->wbuf_len != PAGE_MOD(to)) {
798 c->wbuf_len = PAGE_MOD(to);
799 /* take care of alignment to next page */
800 if (!c->wbuf_len) {
801 c->wbuf_len = c->wbuf_pagesize;
802 ret = __jffs2_flush_wbuf(c, NOPAD);
803 if (ret)
804 goto outerr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 }
806 }
807
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200808 for (invec = 0; invec < count; invec++) {
809 int vlen = invecs[invec].iov_len;
810 uint8_t *v = invecs[invec].iov_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200812 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200814 if (c->wbuf_len == c->wbuf_pagesize) {
815 ret = __jffs2_flush_wbuf(c, NOPAD);
816 if (ret)
817 goto outerr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 }
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200819 vlen -= wbuf_retlen;
820 outvec_to += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 donelen += wbuf_retlen;
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200822 v += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200824 if (vlen >= c->wbuf_pagesize) {
825 ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen),
826 &wbuf_retlen, v);
827 if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
828 goto outfile;
829
830 vlen -= wbuf_retlen;
831 outvec_to += wbuf_retlen;
832 c->wbuf_ofs = outvec_to;
833 donelen += wbuf_retlen;
834 v += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 }
836
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200837 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
838 if (c->wbuf_len == c->wbuf_pagesize) {
839 ret = __jffs2_flush_wbuf(c, NOPAD);
840 if (ret)
841 goto outerr;
842 }
843
844 outvec_to += wbuf_retlen;
845 donelen += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 }
847
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200848 /*
849 * If there's a remainder in the wbuf and it's a non-GC write,
850 * remember that the wbuf affects this ino
851 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 *retlen = donelen;
853
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100854 if (jffs2_sum_active()) {
855 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
856 if (res)
857 return res;
858 }
859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 if (c->wbuf_len && ino)
861 jffs2_wbuf_dirties_inode(c, ino);
862
863 ret = 0;
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200864 up_write(&c->wbuf_sem);
865 return ret;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000866
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200867outfile:
868 /*
869 * At this point we have no problem, c->wbuf is empty. However
870 * refile nextblock to avoid writing again to same address.
871 */
872
873 spin_lock(&c->erase_completion_lock);
874
875 jeb = &c->blocks[outvec_to / c->sector_size];
876 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
877
878 spin_unlock(&c->erase_completion_lock);
879
880outerr:
881 *retlen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 up_write(&c->wbuf_sem);
883 return ret;
884}
885
886/*
887 * This is the entry for flash write.
888 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
889*/
David Woodhouse9bfeb692006-05-26 21:19:05 +0100890int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
891 size_t *retlen, const u_char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892{
893 struct kvec vecs[1];
894
Andrew Victor3be36672005-02-09 09:09:05 +0000895 if (!jffs2_is_writebuffered(c))
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100896 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
898 vecs[0].iov_base = (unsigned char *) buf;
899 vecs[0].iov_len = len;
900 return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
901}
902
903/*
904 Handle readback from writebuffer and ECC failure return
905*/
906int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
907{
908 loff_t orbf = 0, owbf = 0, lwbf = 0;
909 int ret;
910
Andrew Victor3be36672005-02-09 09:09:05 +0000911 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 return c->mtd->read(c->mtd, ofs, len, retlen, buf);
913
Andrew Victor3be36672005-02-09 09:09:05 +0000914 /* Read flash */
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100915 down_read(&c->wbuf_sem);
Thomas Gleixner9223a452006-05-23 17:21:03 +0200916 ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
Andrew Victor3be36672005-02-09 09:09:05 +0000917
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200918 if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
919 if (ret == -EBADMSG)
920 printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx)"
921 " returned ECC error\n", len, ofs);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000922 /*
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200923 * We have the raw data without ECC correction in the buffer,
924 * maybe we are lucky and all data or parts are correct. We
925 * check the node. If data are corrupted node check will sort
926 * it out. We keep this block, it will fail on write or erase
927 * and the we mark it bad. Or should we do that now? But we
928 * should give him a chance. Maybe we had a system crash or
929 * power loss before the ecc write or a erase was completed.
Andrew Victor3be36672005-02-09 09:09:05 +0000930 * So we return success. :)
931 */
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200932 ret = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000933 }
Andrew Victor3be36672005-02-09 09:09:05 +0000934
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 /* if no writebuffer available or write buffer empty, return */
936 if (!c->wbuf_pagesize || !c->wbuf_len)
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100937 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939 /* if we read in a different block, return */
Andrew Victor3be36672005-02-09 09:09:05 +0000940 if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100941 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942
943 if (ofs >= c->wbuf_ofs) {
944 owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
945 if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
946 goto exit;
947 lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000948 if (lwbf > len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 lwbf = len;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000950 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
952 if (orbf > len) /* is write beyond write buffer ? */
953 goto exit;
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200954 lwbf = len - orbf; /* number of bytes to copy */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000955 if (lwbf > c->wbuf_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 lwbf = c->wbuf_len;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000957 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 if (lwbf > 0)
959 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
960
961exit:
962 up_read(&c->wbuf_sem);
963 return ret;
964}
965
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +0200966#define NR_OOB_SCAN_PAGES 4
967
968/* For historical reasons we use only 12 bytes for OOB clean marker */
969#define OOB_CM_SIZE 12
970
971static const struct jffs2_unknown_node oob_cleanmarker =
972{
David Woodhouse566865a2007-04-23 12:07:17 +0100973 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
974 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
975 .totlen = constant_cpu_to_je32(8)
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +0200976};
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978/*
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +0200979 * Check, if the out of band area is empty. This function knows about the clean
980 * marker and if it is present in OOB, treats the OOB as empty anyway.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 */
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200982int jffs2_check_oob_empty(struct jffs2_sb_info *c,
983 struct jffs2_eraseblock *jeb, int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984{
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +0200985 int i, ret;
986 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200987 struct mtd_oob_ops ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +0200989 ops.mode = MTD_OOB_AUTO;
990 ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200991 ops.oobbuf = c->oobbuf;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +0200992 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200993 ops.datbuf = NULL;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +0200994
995 ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +0200996 if (ret || ops.oobretlen != ops.ooblen) {
Andrew Morton7be26bfb2007-02-17 16:02:10 -0800997 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
998 " bytes, read %zd bytes, error %d\n",
999 jeb->offset, ops.ooblen, ops.oobretlen, ret);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001000 if (!ret)
1001 ret = -EIO;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001002 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001004
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001005 for(i = 0; i < ops.ooblen; i++) {
1006 if (mode && i < cmlen)
1007 /* Yeah, we know about the cleanmarker */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 continue;
1009
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001010 if (ops.oobbuf[i] != 0xFF) {
1011 D2(printk(KERN_DEBUG "Found %02x at %x in OOB for "
1012 "%08x\n", ops.oobbuf[i], i, jeb->offset));
1013 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 }
1015 }
1016
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001017 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018}
1019
1020/*
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001021 * Check for a valid cleanmarker.
1022 * Returns: 0 if a valid cleanmarker was found
1023 * 1 if no cleanmarker was found
1024 * negative error code if an error occurred
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001025 */
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001026int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
1027 struct jffs2_eraseblock *jeb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028{
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001029 struct mtd_oob_ops ops;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001030 int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001032 ops.mode = MTD_OOB_AUTO;
1033 ops.ooblen = cmlen;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001034 ops.oobbuf = c->oobbuf;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001035 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001036 ops.datbuf = NULL;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001037
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001038 ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
1039 if (ret || ops.oobretlen != ops.ooblen) {
Andrew Morton7be26bfb2007-02-17 16:02:10 -08001040 printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
1041 " bytes, read %zd bytes, error %d\n",
1042 jeb->offset, ops.ooblen, ops.oobretlen, ret);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001043 if (!ret)
1044 ret = -EIO;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001045 return ret;
1046 }
1047
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001048 return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049}
1050
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001051int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1052 struct jffs2_eraseblock *jeb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053{
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001054 int ret;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001055 struct mtd_oob_ops ops;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001056 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001058 ops.mode = MTD_OOB_AUTO;
1059 ops.ooblen = cmlen;
1060 ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1061 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001062 ops.datbuf = NULL;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001063
1064 ret = c->mtd->write_oob(c->mtd, jeb->offset, &ops);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001065 if (ret || ops.oobretlen != ops.ooblen) {
Andrew Morton7be26bfb2007-02-17 16:02:10 -08001066 printk(KERN_ERR "cannot write OOB for EB at %08x, requested %zd"
1067 " bytes, read %zd bytes, error %d\n",
1068 jeb->offset, ops.ooblen, ops.oobretlen, ret);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001069 if (!ret)
1070 ret = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 return ret;
1072 }
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001073
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 return 0;
1075}
1076
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001077/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 * On NAND we try to mark this block bad. If the block was erased more
1079 * than MAX_ERASE_FAILURES we mark it finaly bad.
1080 * Don't care about failures. This block remains on the erase-pending
1081 * or badblock list as long as nobody manipulates the flash with
1082 * a bootloader or something like that.
1083 */
1084
1085int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1086{
1087 int ret;
1088
1089 /* if the count is < max, we try to write the counter to the 2nd page oob area */
1090 if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1091 return 0;
1092
1093 if (!c->mtd->block_markbad)
1094 return 1; // What else can we do?
1095
Artem Bityutskiy0feba822007-03-08 10:35:10 +02001096 printk(KERN_WARNING "JFFS2: marking eraseblock at %08x\n as bad", bad_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 ret = c->mtd->block_markbad(c->mtd, bad_offset);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001098
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 if (ret) {
1100 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1101 return ret;
1102 }
1103 return 1;
1104}
1105
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001106int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107{
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001108 struct nand_ecclayout *oinfo = c->mtd->ecclayout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 if (!c->mtd->oobsize)
1111 return 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001112
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 /* Cleanmarker is out-of-band, so inline size zero */
1114 c->cleanmarker_size = 0;
1115
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001116 if (!oinfo || oinfo->oobavail == 0) {
1117 printk(KERN_ERR "inconsistent device description\n");
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001118 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 }
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001120
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001121 D1(printk(KERN_DEBUG "JFFS2 using OOB on NAND\n"));
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001122
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001123 c->oobavail = oinfo->oobavail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
1125 /* Initialise write buffer */
1126 init_rwsem(&c->wbuf_sem);
Joern Engel28318772006-05-22 23:18:05 +02001127 c->wbuf_pagesize = c->mtd->writesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 c->wbuf_ofs = 0xFFFFFFFF;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001129
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1131 if (!c->wbuf)
1132 return -ENOMEM;
1133
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001134 c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL);
1135 if (!c->oobbuf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 kfree(c->wbuf);
1137 return -ENOMEM;
1138 }
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001139
1140 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141}
1142
1143void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1144{
1145 kfree(c->wbuf);
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001146 kfree(c->oobbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147}
1148
Andrew Victor8f15fd52005-02-09 09:17:45 +00001149int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1150 c->cleanmarker_size = 0; /* No cleanmarkers needed */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001151
Andrew Victor8f15fd52005-02-09 09:17:45 +00001152 /* Initialize write buffer */
1153 init_rwsem(&c->wbuf_sem);
Andrew Victor8f15fd52005-02-09 09:17:45 +00001154
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001155
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001156 c->wbuf_pagesize = c->mtd->erasesize;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001157
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001158 /* Find a suitable c->sector_size
1159 * - Not too much sectors
1160 * - Sectors have to be at least 4 K + some bytes
1161 * - All known dataflashes have erase sizes of 528 or 1056
1162 * - we take at least 8 eraseblocks and want to have at least 8K size
1163 * - The concatenation should be a power of 2
1164 */
Andrew Victor8f15fd52005-02-09 09:17:45 +00001165
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001166 c->sector_size = 8 * c->mtd->erasesize;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001167
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001168 while (c->sector_size < 8192) {
1169 c->sector_size *= 2;
1170 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001171
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001172 /* It may be necessary to adjust the flash size */
1173 c->flash_size = c->mtd->size;
1174
1175 if ((c->flash_size % c->sector_size) != 0) {
1176 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1177 printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1178 };
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001179
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001180 c->wbuf_ofs = 0xFFFFFFFF;
Andrew Victor8f15fd52005-02-09 09:17:45 +00001181 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1182 if (!c->wbuf)
1183 return -ENOMEM;
1184
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001185 printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
Andrew Victor8f15fd52005-02-09 09:17:45 +00001186
1187 return 0;
1188}
1189
1190void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1191 kfree(c->wbuf);
1192}
Andrew Victor8f15fd52005-02-09 09:17:45 +00001193
Nicolas Pitre59da7212005-08-06 05:51:33 +01001194int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
Joern Engelc8b229d2006-05-22 23:18:12 +02001195 /* Cleanmarker currently occupies whole programming regions,
1196 * either one or 2 for 8Byte STMicro flashes. */
1197 c->cleanmarker_size = max(16u, c->mtd->writesize);
Nicolas Pitre59da7212005-08-06 05:51:33 +01001198
1199 /* Initialize write buffer */
1200 init_rwsem(&c->wbuf_sem);
Joern Engel28318772006-05-22 23:18:05 +02001201 c->wbuf_pagesize = c->mtd->writesize;
Nicolas Pitre59da7212005-08-06 05:51:33 +01001202 c->wbuf_ofs = 0xFFFFFFFF;
1203
1204 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1205 if (!c->wbuf)
1206 return -ENOMEM;
1207
1208 return 0;
1209}
1210
1211void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1212 kfree(c->wbuf);
1213}