blob: 2febece89062d12c9184b0b5ba0a8678e8089154 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
6 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9 *
10 * For licensing information, see the file 'LICENCE' in this directory.
11 *
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +010012 * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/mtd/mtd.h>
19#include <linux/crc32.h>
20#include <linux/mtd/nand.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080021#include <linux/jiffies.h>
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include "nodelist.h"
24
25/* For testing write failures */
26#undef BREAKME
27#undef BREAKMEHEADER
28
29#ifdef BREAKME
30static unsigned char *brokenbuf;
31#endif
32
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +010033#define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
34#define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/* max. erase failures before we mark a block bad */
37#define MAX_ERASE_FAILURES 2
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039struct jffs2_inodirty {
40 uint32_t ino;
41 struct jffs2_inodirty *next;
42};
43
44static struct jffs2_inodirty inodirty_nomem;
45
46static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
47{
48 struct jffs2_inodirty *this = c->wbuf_inodes;
49
50 /* If a malloc failed, consider _everything_ dirty */
51 if (this == &inodirty_nomem)
52 return 1;
53
54 /* If ino == 0, _any_ non-GC writes mean 'yes' */
55 if (this && !ino)
56 return 1;
57
58 /* Look to see if the inode in question is pending in the wbuf */
59 while (this) {
60 if (this->ino == ino)
61 return 1;
62 this = this->next;
63 }
64 return 0;
65}
66
67static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
68{
69 struct jffs2_inodirty *this;
70
71 this = c->wbuf_inodes;
72
73 if (this != &inodirty_nomem) {
74 while (this) {
75 struct jffs2_inodirty *next = this->next;
76 kfree(this);
77 this = next;
78 }
79 }
80 c->wbuf_inodes = NULL;
81}
82
83static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
84{
85 struct jffs2_inodirty *new;
86
87 /* Mark the superblock dirty so that kupdated will flush... */
Artem B. Bityuckiy4d952702005-03-18 09:58:09 +000088 jffs2_erase_pending_trigger(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90 if (jffs2_wbuf_pending_for_ino(c, ino))
91 return;
92
93 new = kmalloc(sizeof(*new), GFP_KERNEL);
94 if (!new) {
95 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
96 jffs2_clear_wbuf_ino_list(c);
97 c->wbuf_inodes = &inodirty_nomem;
98 return;
99 }
100 new->ino = ino;
101 new->next = c->wbuf_inodes;
102 c->wbuf_inodes = new;
103 return;
104}
105
106static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
107{
108 struct list_head *this, *next;
109 static int n;
110
111 if (list_empty(&c->erasable_pending_wbuf_list))
112 return;
113
114 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
115 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
116
117 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
118 list_del(this);
119 if ((jiffies + (n++)) & 127) {
120 /* Most of the time, we just erase it immediately. Otherwise we
121 spend ages scanning it on mount, etc. */
122 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
123 list_add_tail(&jeb->list, &c->erase_pending_list);
124 c->nr_erasing_blocks++;
125 jffs2_erase_pending_trigger(c);
126 } else {
127 /* Sometimes, however, we leave it elsewhere so it doesn't get
128 immediately reused, and we spread the load a bit. */
129 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
130 list_add_tail(&jeb->list, &c->erasable_list);
131 }
132 }
133}
134
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000135#define REFILE_NOTEMPTY 0
136#define REFILE_ANYWAY 1
137
138static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139{
140 D1(printk("About to refile bad block at %08x\n", jeb->offset));
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 /* File the existing block on the bad_used_list.... */
143 if (c->nextblock == jeb)
144 c->nextblock = NULL;
145 else /* Not sure this should ever happen... need more coffee */
146 list_del(&jeb->list);
147 if (jeb->first_node) {
148 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
149 list_add(&jeb->list, &c->bad_used_list);
150 } else {
Estelle Hammache9b88f472005-01-28 18:53:05 +0000151 BUG_ON(allow_empty == REFILE_NOTEMPTY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 /* It has to have had some nodes or we couldn't be here */
153 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
154 list_add(&jeb->list, &c->erase_pending_list);
155 c->nr_erasing_blocks++;
156 jffs2_erase_pending_trigger(c);
157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
David Woodhouse9bfeb692006-05-26 21:19:05 +0100159 if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
160 uint32_t oldfree = jeb->free_size;
161
162 jffs2_link_node_ref(c, jeb,
163 (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
164 oldfree, NULL);
165 /* convert to wasted */
166 c->wasted_size += oldfree;
167 jeb->wasted_size += oldfree;
168 c->dirty_size -= oldfree;
169 jeb->dirty_size -= oldfree;
170 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100172 jffs2_dbg_dump_block_lists_nolock(c);
173 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
174 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175}
176
David Woodhouse9bfeb692006-05-26 21:19:05 +0100177static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
178 struct jffs2_inode_info *f,
179 struct jffs2_raw_node_ref *raw,
180 union jffs2_node_union *node)
181{
182 struct jffs2_node_frag *frag;
183 struct jffs2_full_dirent *fd;
184
185 dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
186 node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
187
188 BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
189 je16_to_cpu(node->u.magic) != 0);
190
191 switch (je16_to_cpu(node->u.nodetype)) {
192 case JFFS2_NODETYPE_INODE:
193 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
194 BUG_ON(!frag);
195 /* Find a frag which refers to the full_dnode we want to modify */
196 while (!frag->node || frag->node->raw != raw) {
197 frag = frag_next(frag);
198 BUG_ON(!frag);
199 }
200 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
201 return &frag->node->raw;
202 break;
203
204 case JFFS2_NODETYPE_DIRENT:
205 for (fd = f->dents; fd; fd = fd->next) {
206 if (fd->raw == raw) {
207 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
208 return &fd->raw;
209 }
210 }
211 BUG();
212 default:
213 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
214 je16_to_cpu(node->u.nodetype));
215 break;
216 }
217 return NULL;
218}
219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220/* Recover from failure to write wbuf. Recover the nodes up to the
221 * wbuf, not the one which we were starting to try to write. */
222
223static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
224{
225 struct jffs2_eraseblock *jeb, *new_jeb;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100226 struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 size_t retlen;
228 int ret;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100229 int nr_refile = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 unsigned char *buf;
231 uint32_t start, end, ofs, len;
232
David Woodhouse046b8b92006-05-25 01:50:35 +0100233 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
234
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 spin_lock(&c->erase_completion_lock);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000236 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100237 spin_unlock(&c->erase_completion_lock);
238
239 BUG_ON(!ref_obsolete(jeb->last_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241 /* Find the first node to be recovered, by skipping over every
242 node which ends before the wbuf starts, or which is obsolete. */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100243 for (next = raw = jeb->first_node; next; raw = next) {
244 next = ref_next(raw);
245
246 if (ref_obsolete(raw) ||
247 (next && ref_offset(next) <= c->wbuf_ofs)) {
248 dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
249 ref_offset(raw), ref_flags(raw),
250 (ref_offset(raw) + ref_totlen(c, jeb, raw)),
251 c->wbuf_ofs);
252 continue;
253 }
254 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
255 ref_offset(raw), ref_flags(raw),
256 (ref_offset(raw) + ref_totlen(c, jeb, raw)));
257
258 first_raw = raw;
259 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 }
261
David Woodhouse9bfeb692006-05-26 21:19:05 +0100262 if (!first_raw) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 /* All nodes were obsolete. Nothing to recover. */
264 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
David Woodhouse9bfeb692006-05-26 21:19:05 +0100265 c->wbuf_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 return;
267 }
268
David Woodhouse9bfeb692006-05-26 21:19:05 +0100269 start = ref_offset(first_raw);
270 end = ref_offset(jeb->last_node);
271 nr_refile = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
David Woodhouse9bfeb692006-05-26 21:19:05 +0100273 /* Count the number of refs which need to be copied */
274 while ((raw = ref_next(raw)) != jeb->last_node)
275 nr_refile++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
David Woodhouse9bfeb692006-05-26 21:19:05 +0100277 dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
278 start, end, end - start, nr_refile);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
280 buf = NULL;
281 if (start < c->wbuf_ofs) {
282 /* First affected node was already partially written.
283 * Attempt to reread the old data into our buffer. */
284
285 buf = kmalloc(end - start, GFP_KERNEL);
286 if (!buf) {
287 printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
288
289 goto read_failed;
290 }
291
292 /* Do the read... */
Thomas Gleixner9223a452006-05-23 17:21:03 +0200293 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) {
296 /* ECC recovered */
297 ret = 0;
298 }
299 if (ret || retlen != c->wbuf_ofs - start) {
300 printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
301
302 kfree(buf);
303 buf = NULL;
304 read_failed:
David Woodhouse9bfeb692006-05-26 21:19:05 +0100305 first_raw = ref_next(first_raw);
306 nr_refile--;
307 while (first_raw && ref_obsolete(first_raw)) {
308 first_raw = ref_next(first_raw);
309 nr_refile--;
310 }
311
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 /* If this was the only node to be recovered, give up */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100313 if (!first_raw) {
314 c->wbuf_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 return;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100316 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
318 /* It wasn't. Go on and try to recover nodes complete in the wbuf */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100319 start = ref_offset(first_raw);
320 dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
321 start, end, end - start, nr_refile);
322
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 } else {
324 /* Read succeeded. Copy the remaining data from the wbuf */
325 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
326 }
327 }
328 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
329 Either 'buf' contains the data, or we find it in the wbuf */
330
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 /* ... and get an allocation of space from a shiny new block instead */
David Woodhouse9fe48542006-05-23 00:38:06 +0100332 ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 if (ret) {
334 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
Estelle Hammache9b88f472005-01-28 18:53:05 +0000335 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 return;
337 }
David Woodhouse9bfeb692006-05-26 21:19:05 +0100338
339 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
340 if (ret) {
341 printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
342 kfree(buf);
343 return;
344 }
345
David Woodhouse9fe48542006-05-23 00:38:06 +0100346 ofs = write_ofs(c);
347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (end-start >= c->wbuf_pagesize) {
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000349 /* Need to do another write immediately, but it's possible
Estelle Hammache9b88f472005-01-28 18:53:05 +0000350 that this is just because the wbuf itself is completely
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000351 full, and there's nothing earlier read back from the
352 flash. Hence 'buf' isn't necessarily what we're writing
Estelle Hammache9b88f472005-01-28 18:53:05 +0000353 from. */
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000354 unsigned char *rewrite_buf = buf?:c->wbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
356
357 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
358 towrite, ofs));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000359
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360#ifdef BREAKMEHEADER
361 static int breakme;
362 if (breakme++ == 20) {
363 printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
364 breakme = 0;
Thomas Gleixner9223a452006-05-23 17:21:03 +0200365 c->mtd->write(c->mtd, ofs, towrite, &retlen,
366 brokenbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 ret = -EIO;
368 } else
369#endif
Thomas Gleixner9223a452006-05-23 17:21:03 +0200370 ret = c->mtd->write(c->mtd, ofs, towrite, &retlen,
371 rewrite_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
373 if (ret || retlen != towrite) {
374 /* Argh. We tried. Really we did. */
375 printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
Estelle Hammache9b88f472005-01-28 18:53:05 +0000376 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
David Woodhouse2f785402006-05-24 02:04:45 +0100378 if (retlen)
David Woodhouse9bfeb692006-05-26 21:19:05 +0100379 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 return;
382 }
383 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
384
385 c->wbuf_len = (end - start) - towrite;
386 c->wbuf_ofs = ofs + towrite;
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000387 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 } else {
390 /* OK, now we're left with the dregs in whichever buffer we're using */
391 if (buf) {
392 memcpy(c->wbuf, buf, end-start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 } else {
394 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
395 }
396 c->wbuf_ofs = ofs;
397 c->wbuf_len = end - start;
398 }
399
400 /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
401 new_jeb = &c->blocks[ofs / c->sector_size];
402
403 spin_lock(&c->erase_completion_lock);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100404 for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
405 uint32_t rawlen = ref_totlen(c, jeb, raw);
406 struct jffs2_inode_cache *ic;
407 struct jffs2_raw_node_ref *new_ref;
408 struct jffs2_raw_node_ref **adjust_ref = NULL;
409 struct jffs2_inode_info *f = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
411 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
David Woodhouse9bfeb692006-05-26 21:19:05 +0100412 rawlen, ref_offset(raw), ref_flags(raw), ofs));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
David Woodhouse9bfeb692006-05-26 21:19:05 +0100414 ic = jffs2_raw_ref_to_ic(raw);
415
416 /* Ick. This XATTR mess should be fixed shortly... */
417 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
418 struct jffs2_xattr_datum *xd = (void *)ic;
419 BUG_ON(xd->node != raw);
420 adjust_ref = &xd->node;
421 raw->next_in_ino = NULL;
422 ic = NULL;
423 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
424 struct jffs2_xattr_datum *xr = (void *)ic;
425 BUG_ON(xr->node != raw);
426 adjust_ref = &xr->node;
427 raw->next_in_ino = NULL;
428 ic = NULL;
429 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
430 struct jffs2_raw_node_ref **p = &ic->nodes;
431
432 /* Remove the old node from the per-inode list */
433 while (*p && *p != (void *)ic) {
434 if (*p == raw) {
435 (*p) = (raw->next_in_ino);
436 raw->next_in_ino = NULL;
437 break;
438 }
439 p = &((*p)->next_in_ino);
440 }
441
442 if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
443 /* If it's an in-core inode, then we have to adjust any
444 full_dirent or full_dnode structure to point to the
445 new version instead of the old */
446 f = jffs2_gc_fetch_inode(c, ic->ino, ic->nlink);
447 if (IS_ERR(f)) {
448 /* Should never happen; it _must_ be present */
449 JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
450 ic->ino, PTR_ERR(f));
451 BUG();
452 }
453 /* We don't lock f->sem. There's a number of ways we could
454 end up in here with it already being locked, and nobody's
455 going to modify it on us anyway because we hold the
456 alloc_sem. We're only changing one ->raw pointer too,
457 which we can get away with without upsetting readers. */
458 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
459 (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
460 } else if (unlikely(ic->state != INO_STATE_PRESENT &&
461 ic->state != INO_STATE_CHECKEDABSENT &&
462 ic->state != INO_STATE_GC)) {
463 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
464 BUG();
465 }
466 }
467
468 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
469
470 if (adjust_ref) {
471 BUG_ON(*adjust_ref != raw);
472 *adjust_ref = new_ref;
473 }
474 if (f)
475 jffs2_gc_release_inode(c, f);
476
477 if (!ref_obsolete(raw)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 jeb->dirty_size += rawlen;
479 jeb->used_size -= rawlen;
480 c->dirty_size += rawlen;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100481 c->used_size -= rawlen;
482 raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
483 BUG_ON(raw->next_in_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 ofs += rawlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 }
487
David Woodhouse9bfeb692006-05-26 21:19:05 +0100488 kfree(buf);
489
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 /* Fix up the original jeb now it's on the bad_list */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100491 if (first_raw == jeb->first_node) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
493 list_del(&jeb->list);
494 list_add(&jeb->list, &c->erase_pending_list);
495 c->nr_erasing_blocks++;
496 jffs2_erase_pending_trigger(c);
497 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100499 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100500 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100502 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100503 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
505 spin_unlock(&c->erase_completion_lock);
506
David Woodhouse9bfeb692006-05-26 21:19:05 +0100507 D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len));
508
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509}
510
511/* Meaning of pad argument:
512 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
513 1: Pad, do not adjust nextblock free_size
514 2: Pad, adjust nextblock free_size
515*/
516#define NOPAD 0
517#define PAD_NOACCOUNT 1
518#define PAD_ACCOUNTING 2
519
520static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
521{
David Woodhouse9bfeb692006-05-26 21:19:05 +0100522 struct jffs2_eraseblock *wbuf_jeb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 int ret;
524 size_t retlen;
525
Andrew Victor3be36672005-02-09 09:09:05 +0000526 /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 del_timer() the timer we never initialised. */
Andrew Victor3be36672005-02-09 09:09:05 +0000528 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 return 0;
530
531 if (!down_trylock(&c->alloc_sem)) {
532 up(&c->alloc_sem);
533 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
534 BUG();
535 }
536
Andrew Victor3be36672005-02-09 09:09:05 +0000537 if (!c->wbuf_len) /* already checked c->wbuf above */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 return 0;
539
David Woodhouse9bfeb692006-05-26 21:19:05 +0100540 wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
541 if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
David Woodhouse2f785402006-05-24 02:04:45 +0100542 return -ENOMEM;
543
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 /* claim remaining space on the page
545 this happens, if we have a change to a new block,
546 or if fsync forces us to flush the writebuffer.
547 if we have a switch to next page, we will not have
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000548 enough remaining space for this.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 */
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +0100550 if (pad ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 c->wbuf_len = PAD(c->wbuf_len);
552
553 /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
554 with 8 byte page size */
555 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
558 struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
559 padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
560 padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
561 padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
562 padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
563 }
564 }
565 /* else jffs2_flash_writev has actually filled in the rest of the
566 buffer for us, and will deal with the node refs etc. later. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000567
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568#ifdef BREAKME
569 static int breakme;
570 if (breakme++ == 20) {
571 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
572 breakme = 0;
Thomas Gleixner9223a452006-05-23 17:21:03 +0200573 c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
574 brokenbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 ret = -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000576 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577#endif
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000578
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
580
581 if (ret || retlen != c->wbuf_pagesize) {
582 if (ret)
583 printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
584 else {
585 printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
586 retlen, c->wbuf_pagesize);
587 ret = -EIO;
588 }
589
590 jffs2_wbuf_recover(c);
591
592 return ret;
593 }
594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 /* Adjust free size of the block if we padded. */
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +0100596 if (pad) {
David Woodhouse0bcc0992006-05-21 13:00:54 +0100597 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
David Woodhouse9bfeb692006-05-26 21:19:05 +0100600 (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000602 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 padded. If there is less free space in the block than that,
604 something screwed up */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100605 if (wbuf_jeb->free_size < waste) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
David Woodhouse0bcc0992006-05-21 13:00:54 +0100607 c->wbuf_ofs, c->wbuf_len, waste);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
David Woodhouse9bfeb692006-05-26 21:19:05 +0100609 wbuf_jeb->offset, wbuf_jeb->free_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 BUG();
611 }
David Woodhouse0bcc0992006-05-21 13:00:54 +0100612
613 spin_lock(&c->erase_completion_lock);
614
David Woodhouse9bfeb692006-05-26 21:19:05 +0100615 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
David Woodhouse0bcc0992006-05-21 13:00:54 +0100616 /* FIXME: that made it count as dirty. Convert to wasted */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100617 wbuf_jeb->dirty_size -= waste;
David Woodhouse0bcc0992006-05-21 13:00:54 +0100618 c->dirty_size -= waste;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100619 wbuf_jeb->wasted_size += waste;
David Woodhouse0bcc0992006-05-21 13:00:54 +0100620 c->wasted_size += waste;
621 } else
622 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623
624 /* Stick any now-obsoleted blocks on the erase_pending_list */
625 jffs2_refile_wbuf_blocks(c);
626 jffs2_clear_wbuf_ino_list(c);
627 spin_unlock(&c->erase_completion_lock);
628
629 memset(c->wbuf,0xff,c->wbuf_pagesize);
630 /* adjust write buffer offset, else we get a non contiguous write bug */
631 c->wbuf_ofs += c->wbuf_pagesize;
632 c->wbuf_len = 0;
633 return 0;
634}
635
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000636/* Trigger garbage collection to flush the write-buffer.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000638 outstanding. If ino arg non-zero, do it only if a write for the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 given inode is outstanding. */
640int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
641{
642 uint32_t old_wbuf_ofs;
643 uint32_t old_wbuf_len;
644 int ret = 0;
645
646 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
647
David Woodhouse8aee6ac2005-02-02 22:12:08 +0000648 if (!c->wbuf)
649 return 0;
650
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 down(&c->alloc_sem);
652 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
653 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
654 up(&c->alloc_sem);
655 return 0;
656 }
657
658 old_wbuf_ofs = c->wbuf_ofs;
659 old_wbuf_len = c->wbuf_len;
660
661 if (c->unchecked_size) {
662 /* GC won't make any progress for a while */
663 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
664 down_write(&c->wbuf_sem);
665 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000666 /* retry flushing wbuf in case jffs2_wbuf_recover
667 left some data in the wbuf */
668 if (ret)
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000669 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 up_write(&c->wbuf_sem);
671 } else while (old_wbuf_len &&
672 old_wbuf_ofs == c->wbuf_ofs) {
673
674 up(&c->alloc_sem);
675
676 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
677
678 ret = jffs2_garbage_collect_pass(c);
679 if (ret) {
680 /* GC failed. Flush it with padding instead */
681 down(&c->alloc_sem);
682 down_write(&c->wbuf_sem);
683 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000684 /* retry flushing wbuf in case jffs2_wbuf_recover
685 left some data in the wbuf */
686 if (ret)
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000687 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 up_write(&c->wbuf_sem);
689 break;
690 }
691 down(&c->alloc_sem);
692 }
693
694 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
695
696 up(&c->alloc_sem);
697 return ret;
698}
699
700/* Pad write-buffer to end and write it, wasting space. */
701int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
702{
703 int ret;
704
David Woodhouse8aee6ac2005-02-02 22:12:08 +0000705 if (!c->wbuf)
706 return 0;
707
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 down_write(&c->wbuf_sem);
709 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000710 /* retry - maybe wbuf recover left some data in wbuf. */
711 if (ret)
712 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 up_write(&c->wbuf_sem);
714
715 return ret;
716}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200718static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
719 size_t len)
720{
721 if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
722 return 0;
723
724 if (len > (c->wbuf_pagesize - c->wbuf_len))
725 len = c->wbuf_pagesize - c->wbuf_len;
726 memcpy(c->wbuf + c->wbuf_len, buf, len);
727 c->wbuf_len += (uint32_t) len;
728 return len;
729}
730
731int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
732 unsigned long count, loff_t to, size_t *retlen,
733 uint32_t ino)
734{
735 struct jffs2_eraseblock *jeb;
736 size_t wbuf_retlen, donelen = 0;
737 uint32_t outvec_to = to;
738 int ret, invec;
739
740 /* If not writebuffered flash, don't bother */
Andrew Victor3be36672005-02-09 09:09:05 +0000741 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000743
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 down_write(&c->wbuf_sem);
745
746 /* If wbuf_ofs is not initialized, set it to target address */
747 if (c->wbuf_ofs == 0xFFFFFFFF) {
748 c->wbuf_ofs = PAGE_DIV(to);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000749 c->wbuf_len = PAGE_MOD(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 memset(c->wbuf,0xff,c->wbuf_pagesize);
751 }
752
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200753 /*
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200754 * Sanity checks on target address. It's permitted to write
755 * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
756 * write at the beginning of a new erase block. Anything else,
757 * and you die. New block starts at xxx000c (0-b = block
758 * header)
759 */
Andrew Victor3be36672005-02-09 09:09:05 +0000760 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 /* It's a write to a new block */
762 if (c->wbuf_len) {
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200763 D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx "
764 "causes flush of wbuf at 0x%08x\n",
765 (unsigned long)to, c->wbuf_ofs));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200767 if (ret)
768 goto outerr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 }
770 /* set pointer to new block */
771 c->wbuf_ofs = PAGE_DIV(to);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000772 c->wbuf_len = PAGE_MOD(to);
773 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
775 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
776 /* We're not writing immediately after the writebuffer. Bad. */
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200777 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write "
778 "to %08lx\n", (unsigned long)to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 if (c->wbuf_len)
780 printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200781 c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 BUG();
783 }
784
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200785 /* adjust alignment offset */
786 if (c->wbuf_len != PAGE_MOD(to)) {
787 c->wbuf_len = PAGE_MOD(to);
788 /* take care of alignment to next page */
789 if (!c->wbuf_len) {
790 c->wbuf_len = c->wbuf_pagesize;
791 ret = __jffs2_flush_wbuf(c, NOPAD);
792 if (ret)
793 goto outerr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 }
795 }
796
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200797 for (invec = 0; invec < count; invec++) {
798 int vlen = invecs[invec].iov_len;
799 uint8_t *v = invecs[invec].iov_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200801 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200803 if (c->wbuf_len == c->wbuf_pagesize) {
804 ret = __jffs2_flush_wbuf(c, NOPAD);
805 if (ret)
806 goto outerr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 }
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200808 vlen -= wbuf_retlen;
809 outvec_to += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 donelen += wbuf_retlen;
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200811 v += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200813 if (vlen >= c->wbuf_pagesize) {
814 ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen),
815 &wbuf_retlen, v);
816 if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
817 goto outfile;
818
819 vlen -= wbuf_retlen;
820 outvec_to += wbuf_retlen;
821 c->wbuf_ofs = outvec_to;
822 donelen += wbuf_retlen;
823 v += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 }
825
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200826 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
827 if (c->wbuf_len == c->wbuf_pagesize) {
828 ret = __jffs2_flush_wbuf(c, NOPAD);
829 if (ret)
830 goto outerr;
831 }
832
833 outvec_to += wbuf_retlen;
834 donelen += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 }
836
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200837 /*
838 * If there's a remainder in the wbuf and it's a non-GC write,
839 * remember that the wbuf affects this ino
840 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 *retlen = donelen;
842
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100843 if (jffs2_sum_active()) {
844 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
845 if (res)
846 return res;
847 }
848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 if (c->wbuf_len && ino)
850 jffs2_wbuf_dirties_inode(c, ino);
851
852 ret = 0;
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200853 up_write(&c->wbuf_sem);
854 return ret;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000855
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200856outfile:
857 /*
858 * At this point we have no problem, c->wbuf is empty. However
859 * refile nextblock to avoid writing again to same address.
860 */
861
862 spin_lock(&c->erase_completion_lock);
863
864 jeb = &c->blocks[outvec_to / c->sector_size];
865 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
866
867 spin_unlock(&c->erase_completion_lock);
868
869outerr:
870 *retlen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 up_write(&c->wbuf_sem);
872 return ret;
873}
874
875/*
876 * This is the entry for flash write.
877 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
878*/
David Woodhouse9bfeb692006-05-26 21:19:05 +0100879int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
880 size_t *retlen, const u_char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881{
882 struct kvec vecs[1];
883
Andrew Victor3be36672005-02-09 09:09:05 +0000884 if (!jffs2_is_writebuffered(c))
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100885 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
887 vecs[0].iov_base = (unsigned char *) buf;
888 vecs[0].iov_len = len;
889 return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
890}
891
892/*
893 Handle readback from writebuffer and ECC failure return
894*/
895int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
896{
897 loff_t orbf = 0, owbf = 0, lwbf = 0;
898 int ret;
899
Andrew Victor3be36672005-02-09 09:09:05 +0000900 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 return c->mtd->read(c->mtd, ofs, len, retlen, buf);
902
Andrew Victor3be36672005-02-09 09:09:05 +0000903 /* Read flash */
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100904 down_read(&c->wbuf_sem);
Thomas Gleixner9223a452006-05-23 17:21:03 +0200905 ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
Andrew Victor3be36672005-02-09 09:09:05 +0000906
907 if ( (ret == -EBADMSG) && (*retlen == len) ) {
908 printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
909 len, ofs);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000910 /*
911 * We have the raw data without ECC correction in the buffer, maybe
Andrew Victor3be36672005-02-09 09:09:05 +0000912 * we are lucky and all data or parts are correct. We check the node.
913 * If data are corrupted node check will sort it out.
914 * We keep this block, it will fail on write or erase and the we
915 * mark it bad. Or should we do that now? But we should give him a chance.
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000916 * Maybe we had a system crash or power loss before the ecc write or
Andrew Victor3be36672005-02-09 09:09:05 +0000917 * a erase was completed.
918 * So we return success. :)
919 */
920 ret = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000921 }
Andrew Victor3be36672005-02-09 09:09:05 +0000922
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 /* if no writebuffer available or write buffer empty, return */
924 if (!c->wbuf_pagesize || !c->wbuf_len)
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100925 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
927 /* if we read in a different block, return */
Andrew Victor3be36672005-02-09 09:09:05 +0000928 if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100929 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930
931 if (ofs >= c->wbuf_ofs) {
932 owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
933 if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
934 goto exit;
935 lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000936 if (lwbf > len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 lwbf = len;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000938 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
940 if (orbf > len) /* is write beyond write buffer ? */
941 goto exit;
942 lwbf = len - orbf; /* number of bytes to copy */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000943 if (lwbf > c->wbuf_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 lwbf = c->wbuf_len;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000945 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 if (lwbf > 0)
947 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
948
949exit:
950 up_read(&c->wbuf_sem);
951 return ret;
952}
953
954/*
955 * Check, if the out of band area is empty
956 */
957int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode)
958{
959 unsigned char *buf;
960 int ret = 0;
961 int i,len,page;
962 size_t retlen;
963 int oob_size;
964
965 /* allocate a buffer for all oob data in this sector */
966 oob_size = c->mtd->oobsize;
967 len = 4 * oob_size;
968 buf = kmalloc(len, GFP_KERNEL);
969 if (!buf) {
970 printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n");
971 return -ENOMEM;
972 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000973 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 * if mode = 0, we scan for a total empty oob area, else we have
975 * to take care of the cleanmarker in the first page of the block
976 */
977 ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf);
978 if (ret) {
979 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
980 goto out;
981 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 if (retlen < len) {
984 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read "
985 "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset));
986 ret = -EIO;
987 goto out;
988 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000989
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 /* Special check for first page */
991 for(i = 0; i < oob_size ; i++) {
992 /* Yeah, we know about the cleanmarker. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000993 if (mode && i >= c->fsdata_pos &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 i < c->fsdata_pos + c->fsdata_len)
995 continue;
996
997 if (buf[i] != 0xFF) {
998 D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n",
Artem B. Bityutskiy730554d2005-07-17 07:56:26 +0100999 buf[i], i, jeb->offset));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001000 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 goto out;
1002 }
1003 }
1004
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001005 /* we know, we are aligned :) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 for (page = oob_size; page < len; page += sizeof(long)) {
1007 unsigned long dat = *(unsigned long *)(&buf[page]);
1008 if(dat != -1) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001009 ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 goto out;
1011 }
1012 }
1013
1014out:
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001015 kfree(buf);
1016
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 return ret;
1018}
1019
1020/*
1021* Scan for a valid cleanmarker and for bad blocks
1022* For virtual blocks (concatenated physical blocks) check the cleanmarker
1023* only in the first page of the first physical block, but scan for bad blocks in all
1024* physical blocks
1025*/
1026int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1027{
1028 struct jffs2_unknown_node n;
1029 unsigned char buf[2 * NAND_MAX_OOBSIZE];
1030 unsigned char *p;
1031 int ret, i, cnt, retval = 0;
1032 size_t retlen, offset;
1033 int oob_size;
1034
1035 offset = jeb->offset;
1036 oob_size = c->mtd->oobsize;
1037
1038 /* Loop through the physical blocks */
1039 for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) {
1040 /* Check first if the block is bad. */
1041 if (c->mtd->block_isbad (c->mtd, offset)) {
1042 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset));
1043 return 2;
1044 }
1045 /*
1046 * We read oob data from page 0 and 1 of the block.
1047 * page 0 contains cleanmarker and badblock info
1048 * page 1 contains failure count of this block
1049 */
1050 ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf);
1051
1052 if (ret) {
1053 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
1054 return ret;
1055 }
1056 if (retlen < (oob_size << 1)) {
1057 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset));
1058 return -EIO;
1059 }
1060
1061 /* Check cleanmarker only on the first physical block */
1062 if (!cnt) {
1063 n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
1064 n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER);
1065 n.totlen = cpu_to_je32 (8);
1066 p = (unsigned char *) &n;
1067
1068 for (i = 0; i < c->fsdata_len; i++) {
1069 if (buf[c->fsdata_pos + i] != p[i]) {
1070 retval = 1;
1071 }
1072 }
1073 D1(if (retval == 1) {
1074 printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset);
David Woodhouse9bfeb692006-05-26 21:19:05 +01001075 printk(KERN_WARNING "OOB at %08zx was ", offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 for (i=0; i < oob_size; i++) {
1077 printk("%02x ", buf[i]);
1078 }
1079 printk("\n");
1080 })
1081 }
1082 offset += c->mtd->erasesize;
1083 }
1084 return retval;
1085}
1086
1087int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1088{
1089 struct jffs2_unknown_node n;
1090 int ret;
1091 size_t retlen;
1092
1093 n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
1094 n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
1095 n.totlen = cpu_to_je32(8);
1096
1097 ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001098
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 if (ret) {
1100 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1101 return ret;
1102 }
1103 if (retlen != c->fsdata_len) {
1104 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len));
1105 return ret;
1106 }
1107 return 0;
1108}
1109
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001110/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 * On NAND we try to mark this block bad. If the block was erased more
1112 * than MAX_ERASE_FAILURES we mark it finaly bad.
1113 * Don't care about failures. This block remains on the erase-pending
1114 * or badblock list as long as nobody manipulates the flash with
1115 * a bootloader or something like that.
1116 */
1117
1118int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1119{
1120 int ret;
1121
1122 /* if the count is < max, we try to write the counter to the 2nd page oob area */
1123 if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1124 return 0;
1125
1126 if (!c->mtd->block_markbad)
1127 return 1; // What else can we do?
1128
1129 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
1130 ret = c->mtd->block_markbad(c->mtd, bad_offset);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001131
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 if (ret) {
1133 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1134 return ret;
1135 }
1136 return 1;
1137}
1138
1139#define NAND_JFFS2_OOB16_FSDALEN 8
1140
1141static struct nand_oobinfo jffs2_oobinfo_docecc = {
1142 .useecc = MTD_NANDECC_PLACE,
1143 .eccbytes = 6,
1144 .eccpos = {0,1,2,3,4,5}
1145};
1146
1147
1148static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
1149{
1150 struct nand_oobinfo *oinfo = &c->mtd->oobinfo;
1151
1152 /* Do this only, if we have an oob buffer */
1153 if (!c->mtd->oobsize)
1154 return 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001155
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 /* Cleanmarker is out-of-band, so inline size zero */
1157 c->cleanmarker_size = 0;
1158
1159 /* Should we use autoplacement ? */
1160 if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) {
1161 D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
1162 /* Get the position of the free bytes */
1163 if (!oinfo->oobfree[0][1]) {
1164 printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n");
1165 return -ENOSPC;
1166 }
1167 c->fsdata_pos = oinfo->oobfree[0][0];
1168 c->fsdata_len = oinfo->oobfree[0][1];
1169 if (c->fsdata_len > 8)
1170 c->fsdata_len = 8;
1171 } else {
1172 /* This is just a legacy fallback and should go away soon */
1173 switch(c->mtd->ecctype) {
1174 case MTD_ECC_RS_DiskOnChip:
1175 printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n");
1176 c->oobinfo = &jffs2_oobinfo_docecc;
1177 c->fsdata_pos = 6;
1178 c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
1179 c->badblock_pos = 15;
1180 break;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001181
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 default:
1183 D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
1184 return -EINVAL;
1185 }
1186 }
1187 return 0;
1188}
1189
1190int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1191{
1192 int res;
1193
1194 /* Initialise write buffer */
1195 init_rwsem(&c->wbuf_sem);
Joern Engel28318772006-05-22 23:18:05 +02001196 c->wbuf_pagesize = c->mtd->writesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 c->wbuf_ofs = 0xFFFFFFFF;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001198
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1200 if (!c->wbuf)
1201 return -ENOMEM;
1202
1203 res = jffs2_nand_set_oobinfo(c);
1204
1205#ifdef BREAKME
1206 if (!brokenbuf)
1207 brokenbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1208 if (!brokenbuf) {
1209 kfree(c->wbuf);
1210 return -ENOMEM;
1211 }
1212 memset(brokenbuf, 0xdb, c->wbuf_pagesize);
1213#endif
1214 return res;
1215}
1216
1217void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1218{
1219 kfree(c->wbuf);
1220}
1221
Andrew Victor8f15fd52005-02-09 09:17:45 +00001222int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1223 c->cleanmarker_size = 0; /* No cleanmarkers needed */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001224
Andrew Victor8f15fd52005-02-09 09:17:45 +00001225 /* Initialize write buffer */
1226 init_rwsem(&c->wbuf_sem);
Andrew Victor8f15fd52005-02-09 09:17:45 +00001227
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001228
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001229 c->wbuf_pagesize = c->mtd->erasesize;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001230
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001231 /* Find a suitable c->sector_size
1232 * - Not too much sectors
1233 * - Sectors have to be at least 4 K + some bytes
1234 * - All known dataflashes have erase sizes of 528 or 1056
1235 * - we take at least 8 eraseblocks and want to have at least 8K size
1236 * - The concatenation should be a power of 2
1237 */
Andrew Victor8f15fd52005-02-09 09:17:45 +00001238
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001239 c->sector_size = 8 * c->mtd->erasesize;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001240
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001241 while (c->sector_size < 8192) {
1242 c->sector_size *= 2;
1243 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001244
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001245 /* It may be necessary to adjust the flash size */
1246 c->flash_size = c->mtd->size;
1247
1248 if ((c->flash_size % c->sector_size) != 0) {
1249 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1250 printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1251 };
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001252
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001253 c->wbuf_ofs = 0xFFFFFFFF;
Andrew Victor8f15fd52005-02-09 09:17:45 +00001254 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1255 if (!c->wbuf)
1256 return -ENOMEM;
1257
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001258 printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
Andrew Victor8f15fd52005-02-09 09:17:45 +00001259
1260 return 0;
1261}
1262
1263void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1264 kfree(c->wbuf);
1265}
Andrew Victor8f15fd52005-02-09 09:17:45 +00001266
Nicolas Pitre59da7212005-08-06 05:51:33 +01001267int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
Joern Engelc8b229d2006-05-22 23:18:12 +02001268 /* Cleanmarker currently occupies whole programming regions,
1269 * either one or 2 for 8Byte STMicro flashes. */
1270 c->cleanmarker_size = max(16u, c->mtd->writesize);
Nicolas Pitre59da7212005-08-06 05:51:33 +01001271
1272 /* Initialize write buffer */
1273 init_rwsem(&c->wbuf_sem);
Joern Engel28318772006-05-22 23:18:05 +02001274 c->wbuf_pagesize = c->mtd->writesize;
Nicolas Pitre59da7212005-08-06 05:51:33 +01001275 c->wbuf_ofs = 0xFFFFFFFF;
1276
1277 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1278 if (!c->wbuf)
1279 return -ENOMEM;
1280
1281 return 0;
1282}
1283
1284void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1285 kfree(c->wbuf);
1286}