blob: 89a6ec0fa7662c855806b942ad14ddc199537646 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
David Woodhousec00c3102007-04-25 14:16:47 +01004 * Copyright © 2001-2007 Red Hat, Inc.
5 * Copyright © 2004 Thomas Gleixner <tglx@linutronix.de>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9 *
10 * For licensing information, see the file 'LICENCE' in this directory.
11 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/mtd/mtd.h>
17#include <linux/crc32.h>
18#include <linux/mtd/nand.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080019#include <linux/jiffies.h>
Al Viro914e2632006-10-18 13:55:46 -040020#include <linux/sched.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080021
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "nodelist.h"
23
24/* For testing write failures */
25#undef BREAKME
26#undef BREAKMEHEADER
27
28#ifdef BREAKME
29static unsigned char *brokenbuf;
30#endif
31
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +010032#define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
33#define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035/* max. erase failures before we mark a block bad */
36#define MAX_ERASE_FAILURES 2
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038struct jffs2_inodirty {
39 uint32_t ino;
40 struct jffs2_inodirty *next;
41};
42
43static struct jffs2_inodirty inodirty_nomem;
44
45static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
46{
47 struct jffs2_inodirty *this = c->wbuf_inodes;
48
49 /* If a malloc failed, consider _everything_ dirty */
50 if (this == &inodirty_nomem)
51 return 1;
52
53 /* If ino == 0, _any_ non-GC writes mean 'yes' */
54 if (this && !ino)
55 return 1;
56
57 /* Look to see if the inode in question is pending in the wbuf */
58 while (this) {
59 if (this->ino == ino)
60 return 1;
61 this = this->next;
62 }
63 return 0;
64}
65
66static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
67{
68 struct jffs2_inodirty *this;
69
70 this = c->wbuf_inodes;
71
72 if (this != &inodirty_nomem) {
73 while (this) {
74 struct jffs2_inodirty *next = this->next;
75 kfree(this);
76 this = next;
77 }
78 }
79 c->wbuf_inodes = NULL;
80}
81
82static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
83{
84 struct jffs2_inodirty *new;
85
86 /* Mark the superblock dirty so that kupdated will flush... */
Joakim Tjernlund64a5c2e2010-05-19 17:13:19 +010087 jffs2_dirty_trigger(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
89 if (jffs2_wbuf_pending_for_ino(c, ino))
90 return;
91
92 new = kmalloc(sizeof(*new), GFP_KERNEL);
93 if (!new) {
Joe Perches9c261b32012-02-15 15:56:43 -080094 jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 jffs2_clear_wbuf_ino_list(c);
96 c->wbuf_inodes = &inodirty_nomem;
97 return;
98 }
99 new->ino = ino;
100 new->next = c->wbuf_inodes;
101 c->wbuf_inodes = new;
102 return;
103}
104
105static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
106{
107 struct list_head *this, *next;
108 static int n;
109
110 if (list_empty(&c->erasable_pending_wbuf_list))
111 return;
112
113 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
114 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
115
Joe Perches9c261b32012-02-15 15:56:43 -0800116 jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
117 jeb->offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 list_del(this);
119 if ((jiffies + (n++)) & 127) {
120 /* Most of the time, we just erase it immediately. Otherwise we
121 spend ages scanning it on mount, etc. */
Joe Perches9c261b32012-02-15 15:56:43 -0800122 jffs2_dbg(1, "...and adding to erase_pending_list\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 list_add_tail(&jeb->list, &c->erase_pending_list);
124 c->nr_erasing_blocks++;
David Woodhouseae3b6ba2010-05-19 17:05:14 +0100125 jffs2_garbage_collect_trigger(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 } else {
127 /* Sometimes, however, we leave it elsewhere so it doesn't get
128 immediately reused, and we spread the load a bit. */
Joe Perches9c261b32012-02-15 15:56:43 -0800129 jffs2_dbg(1, "...and adding to erasable_list\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 list_add_tail(&jeb->list, &c->erasable_list);
131 }
132 }
133}
134
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000135#define REFILE_NOTEMPTY 0
136#define REFILE_ANYWAY 1
137
138static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139{
Joe Perches9c261b32012-02-15 15:56:43 -0800140 jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 /* File the existing block on the bad_used_list.... */
143 if (c->nextblock == jeb)
144 c->nextblock = NULL;
145 else /* Not sure this should ever happen... need more coffee */
146 list_del(&jeb->list);
147 if (jeb->first_node) {
Joe Perches9c261b32012-02-15 15:56:43 -0800148 jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
149 jeb->offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 list_add(&jeb->list, &c->bad_used_list);
151 } else {
Estelle Hammache9b88f472005-01-28 18:53:05 +0000152 BUG_ON(allow_empty == REFILE_NOTEMPTY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 /* It has to have had some nodes or we couldn't be here */
Joe Perches9c261b32012-02-15 15:56:43 -0800154 jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
155 jeb->offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 list_add(&jeb->list, &c->erase_pending_list);
157 c->nr_erasing_blocks++;
David Woodhouseae3b6ba2010-05-19 17:05:14 +0100158 jffs2_garbage_collect_trigger(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
David Woodhouse9bfeb692006-05-26 21:19:05 +0100161 if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
162 uint32_t oldfree = jeb->free_size;
163
164 jffs2_link_node_ref(c, jeb,
165 (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
166 oldfree, NULL);
167 /* convert to wasted */
168 c->wasted_size += oldfree;
169 jeb->wasted_size += oldfree;
170 c->dirty_size -= oldfree;
171 jeb->dirty_size -= oldfree;
172 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100174 jffs2_dbg_dump_block_lists_nolock(c);
175 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
176 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177}
178
David Woodhouse9bfeb692006-05-26 21:19:05 +0100179static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
180 struct jffs2_inode_info *f,
181 struct jffs2_raw_node_ref *raw,
182 union jffs2_node_union *node)
183{
184 struct jffs2_node_frag *frag;
185 struct jffs2_full_dirent *fd;
186
187 dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
188 node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
189
190 BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
191 je16_to_cpu(node->u.magic) != 0);
192
193 switch (je16_to_cpu(node->u.nodetype)) {
194 case JFFS2_NODETYPE_INODE:
David Woodhouseddc58bd2006-05-27 13:15:16 +0100195 if (f->metadata && f->metadata->raw == raw) {
196 dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
197 return &f->metadata->raw;
198 }
David Woodhouse9bfeb692006-05-26 21:19:05 +0100199 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
200 BUG_ON(!frag);
201 /* Find a frag which refers to the full_dnode we want to modify */
202 while (!frag->node || frag->node->raw != raw) {
203 frag = frag_next(frag);
204 BUG_ON(!frag);
205 }
206 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
207 return &frag->node->raw;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100208
209 case JFFS2_NODETYPE_DIRENT:
210 for (fd = f->dents; fd; fd = fd->next) {
211 if (fd->raw == raw) {
212 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
213 return &fd->raw;
214 }
215 }
216 BUG();
David Woodhouseddc58bd2006-05-27 13:15:16 +0100217
David Woodhouse9bfeb692006-05-26 21:19:05 +0100218 default:
219 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
220 je16_to_cpu(node->u.nodetype));
221 break;
222 }
223 return NULL;
224}
225
David Woodhousea6bc4322007-07-11 14:23:54 +0100226#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
227static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
228 uint32_t ofs)
229{
230 int ret;
231 size_t retlen;
232 char *eccstr;
233
Artem Bityutskiy329ad392011-12-23 17:30:16 +0200234 ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
David Woodhousea6bc4322007-07-11 14:23:54 +0100235 if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
Joe Perchesda320f02012-02-15 15:56:44 -0800236 pr_warn("%s(): Read back of page at %08x failed: %d\n",
237 __func__, c->wbuf_ofs, ret);
David Woodhousea6bc4322007-07-11 14:23:54 +0100238 return ret;
239 } else if (retlen != c->wbuf_pagesize) {
Joe Perchesda320f02012-02-15 15:56:44 -0800240 pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n",
241 __func__, ofs, retlen, c->wbuf_pagesize);
David Woodhousea6bc4322007-07-11 14:23:54 +0100242 return -EIO;
243 }
244 if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
245 return 0;
246
247 if (ret == -EUCLEAN)
248 eccstr = "corrected";
249 else if (ret == -EBADMSG)
250 eccstr = "correction failed";
251 else
252 eccstr = "OK or unused";
253
Joe Perchesda320f02012-02-15 15:56:44 -0800254 pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n",
255 eccstr, c->wbuf_ofs);
David Woodhousea6bc4322007-07-11 14:23:54 +0100256 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
257 c->wbuf, c->wbuf_pagesize, 0);
258
Joe Perchesda320f02012-02-15 15:56:44 -0800259 pr_warn("Read back:\n");
David Woodhousea6bc4322007-07-11 14:23:54 +0100260 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
261 c->wbuf_verify, c->wbuf_pagesize, 0);
262
263 return -EIO;
264}
265#else
266#define jffs2_verify_write(c,b,o) (0)
267#endif
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269/* Recover from failure to write wbuf. Recover the nodes up to the
270 * wbuf, not the one which we were starting to try to write. */
271
272static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
273{
274 struct jffs2_eraseblock *jeb, *new_jeb;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100275 struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 size_t retlen;
277 int ret;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100278 int nr_refile = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 unsigned char *buf;
280 uint32_t start, end, ofs, len;
281
David Woodhouse046b8b92006-05-25 01:50:35 +0100282 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
283
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 spin_lock(&c->erase_completion_lock);
Vitaly Wool180bfb32007-03-06 17:01:04 +0300285 if (c->wbuf_ofs % c->mtd->erasesize)
286 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
287 else
288 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100289 spin_unlock(&c->erase_completion_lock);
290
291 BUG_ON(!ref_obsolete(jeb->last_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
293 /* Find the first node to be recovered, by skipping over every
294 node which ends before the wbuf starts, or which is obsolete. */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100295 for (next = raw = jeb->first_node; next; raw = next) {
296 next = ref_next(raw);
297
298 if (ref_obsolete(raw) ||
299 (next && ref_offset(next) <= c->wbuf_ofs)) {
300 dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
301 ref_offset(raw), ref_flags(raw),
302 (ref_offset(raw) + ref_totlen(c, jeb, raw)),
303 c->wbuf_ofs);
304 continue;
305 }
306 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
307 ref_offset(raw), ref_flags(raw),
308 (ref_offset(raw) + ref_totlen(c, jeb, raw)));
309
310 first_raw = raw;
311 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 }
313
David Woodhouse9bfeb692006-05-26 21:19:05 +0100314 if (!first_raw) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 /* All nodes were obsolete. Nothing to recover. */
Joe Perches9c261b32012-02-15 15:56:43 -0800316 jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
David Woodhouse9bfeb692006-05-26 21:19:05 +0100317 c->wbuf_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 return;
319 }
320
David Woodhouse9bfeb692006-05-26 21:19:05 +0100321 start = ref_offset(first_raw);
322 end = ref_offset(jeb->last_node);
323 nr_refile = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
David Woodhouse9bfeb692006-05-26 21:19:05 +0100325 /* Count the number of refs which need to be copied */
326 while ((raw = ref_next(raw)) != jeb->last_node)
327 nr_refile++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
David Woodhouse9bfeb692006-05-26 21:19:05 +0100329 dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
330 start, end, end - start, nr_refile);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332 buf = NULL;
333 if (start < c->wbuf_ofs) {
334 /* First affected node was already partially written.
335 * Attempt to reread the old data into our buffer. */
336
337 buf = kmalloc(end - start, GFP_KERNEL);
338 if (!buf) {
Joe Perchesda320f02012-02-15 15:56:44 -0800339 pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 goto read_failed;
342 }
343
344 /* Do the read... */
Artem Bityutskiy329ad392011-12-23 17:30:16 +0200345 ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen,
346 buf);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000347
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200348 /* ECC recovered ? */
349 if ((ret == -EUCLEAN || ret == -EBADMSG) &&
350 (retlen == c->wbuf_ofs - start))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 ret = 0;
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 if (ret || retlen != c->wbuf_ofs - start) {
Joe Perchesda320f02012-02-15 15:56:44 -0800354 pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355
356 kfree(buf);
357 buf = NULL;
358 read_failed:
David Woodhouse9bfeb692006-05-26 21:19:05 +0100359 first_raw = ref_next(first_raw);
360 nr_refile--;
361 while (first_raw && ref_obsolete(first_raw)) {
362 first_raw = ref_next(first_raw);
363 nr_refile--;
364 }
365
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 /* If this was the only node to be recovered, give up */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100367 if (!first_raw) {
368 c->wbuf_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 return;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100370 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
372 /* It wasn't. Go on and try to recover nodes complete in the wbuf */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100373 start = ref_offset(first_raw);
374 dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
375 start, end, end - start, nr_refile);
376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 } else {
378 /* Read succeeded. Copy the remaining data from the wbuf */
379 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
380 }
381 }
382 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
383 Either 'buf' contains the data, or we find it in the wbuf */
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 /* ... and get an allocation of space from a shiny new block instead */
David Woodhouse9fe48542006-05-23 00:38:06 +0100386 ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 if (ret) {
Joe Perchesda320f02012-02-15 15:56:44 -0800388 pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n");
Estelle Hammache9b88f472005-01-28 18:53:05 +0000389 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 return;
391 }
David Woodhouse9bfeb692006-05-26 21:19:05 +0100392
Adrian Hunter7f762ab2007-04-04 13:47:53 +0300393 /* The summary is not recovered, so it must be disabled for this erase block */
394 jffs2_sum_disable_collecting(c->summary);
395
David Woodhouse9bfeb692006-05-26 21:19:05 +0100396 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
397 if (ret) {
Joe Perchesda320f02012-02-15 15:56:44 -0800398 pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
David Woodhouse9bfeb692006-05-26 21:19:05 +0100399 kfree(buf);
400 return;
401 }
402
David Woodhouse9fe48542006-05-23 00:38:06 +0100403 ofs = write_ofs(c);
404
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 if (end-start >= c->wbuf_pagesize) {
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000406 /* Need to do another write immediately, but it's possible
Estelle Hammache9b88f472005-01-28 18:53:05 +0000407 that this is just because the wbuf itself is completely
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000408 full, and there's nothing earlier read back from the
409 flash. Hence 'buf' isn't necessarily what we're writing
Estelle Hammache9b88f472005-01-28 18:53:05 +0000410 from. */
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000411 unsigned char *rewrite_buf = buf?:c->wbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
413
Joe Perches9c261b32012-02-15 15:56:43 -0800414 jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
415 towrite, ofs);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417#ifdef BREAKMEHEADER
418 static int breakme;
419 if (breakme++ == 20) {
Joe Perchesda320f02012-02-15 15:56:44 -0800420 pr_notice("Faking write error at 0x%08x\n", ofs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 breakme = 0;
Artem Bityutskiyeda95cb2011-12-23 17:35:41 +0200422 mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 ret = -EIO;
424 } else
425#endif
Artem Bityutskiyeda95cb2011-12-23 17:35:41 +0200426 ret = mtd_write(c->mtd, ofs, towrite, &retlen,
427 rewrite_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
David Woodhousea6bc4322007-07-11 14:23:54 +0100429 if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 /* Argh. We tried. Really we did. */
Joe Perchesda320f02012-02-15 15:56:44 -0800431 pr_crit("Recovery of wbuf failed due to a second write error\n");
Estelle Hammache9b88f472005-01-28 18:53:05 +0000432 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
David Woodhouse2f785402006-05-24 02:04:45 +0100434 if (retlen)
David Woodhouse9bfeb692006-05-26 21:19:05 +0100435 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 return;
438 }
Joe Perchesda320f02012-02-15 15:56:44 -0800439 pr_notice("Recovery of wbuf succeeded to %08x\n", ofs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
441 c->wbuf_len = (end - start) - towrite;
442 c->wbuf_ofs = ofs + towrite;
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000443 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 } else {
446 /* OK, now we're left with the dregs in whichever buffer we're using */
447 if (buf) {
448 memcpy(c->wbuf, buf, end-start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 } else {
450 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
451 }
452 c->wbuf_ofs = ofs;
453 c->wbuf_len = end - start;
454 }
455
456 /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
457 new_jeb = &c->blocks[ofs / c->sector_size];
458
459 spin_lock(&c->erase_completion_lock);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100460 for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
461 uint32_t rawlen = ref_totlen(c, jeb, raw);
462 struct jffs2_inode_cache *ic;
463 struct jffs2_raw_node_ref *new_ref;
464 struct jffs2_raw_node_ref **adjust_ref = NULL;
465 struct jffs2_inode_info *f = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
Joe Perches9c261b32012-02-15 15:56:43 -0800467 jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
468 rawlen, ref_offset(raw), ref_flags(raw), ofs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
David Woodhouse9bfeb692006-05-26 21:19:05 +0100470 ic = jffs2_raw_ref_to_ic(raw);
471
472 /* Ick. This XATTR mess should be fixed shortly... */
473 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
474 struct jffs2_xattr_datum *xd = (void *)ic;
475 BUG_ON(xd->node != raw);
476 adjust_ref = &xd->node;
477 raw->next_in_ino = NULL;
478 ic = NULL;
479 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
480 struct jffs2_xattr_datum *xr = (void *)ic;
481 BUG_ON(xr->node != raw);
482 adjust_ref = &xr->node;
483 raw->next_in_ino = NULL;
484 ic = NULL;
485 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
486 struct jffs2_raw_node_ref **p = &ic->nodes;
487
488 /* Remove the old node from the per-inode list */
489 while (*p && *p != (void *)ic) {
490 if (*p == raw) {
491 (*p) = (raw->next_in_ino);
492 raw->next_in_ino = NULL;
493 break;
494 }
495 p = &((*p)->next_in_ino);
496 }
497
498 if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
499 /* If it's an in-core inode, then we have to adjust any
500 full_dirent or full_dnode structure to point to the
501 new version instead of the old */
David Woodhouse27c72b02008-05-01 18:47:17 +0100502 f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100503 if (IS_ERR(f)) {
504 /* Should never happen; it _must_ be present */
505 JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
506 ic->ino, PTR_ERR(f));
507 BUG();
508 }
509 /* We don't lock f->sem. There's a number of ways we could
510 end up in here with it already being locked, and nobody's
511 going to modify it on us anyway because we hold the
512 alloc_sem. We're only changing one ->raw pointer too,
513 which we can get away with without upsetting readers. */
514 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
515 (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
516 } else if (unlikely(ic->state != INO_STATE_PRESENT &&
517 ic->state != INO_STATE_CHECKEDABSENT &&
518 ic->state != INO_STATE_GC)) {
519 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
520 BUG();
521 }
522 }
523
524 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
525
526 if (adjust_ref) {
527 BUG_ON(*adjust_ref != raw);
528 *adjust_ref = new_ref;
529 }
530 if (f)
531 jffs2_gc_release_inode(c, f);
532
533 if (!ref_obsolete(raw)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 jeb->dirty_size += rawlen;
535 jeb->used_size -= rawlen;
536 c->dirty_size += rawlen;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100537 c->used_size -= rawlen;
538 raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
539 BUG_ON(raw->next_in_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 ofs += rawlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 }
543
David Woodhouse9bfeb692006-05-26 21:19:05 +0100544 kfree(buf);
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 /* Fix up the original jeb now it's on the bad_list */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100547 if (first_raw == jeb->first_node) {
Joe Perches9c261b32012-02-15 15:56:43 -0800548 jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
549 jeb->offset);
Akinobu Mitaf1166292006-06-26 00:24:46 -0700550 list_move(&jeb->list, &c->erase_pending_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 c->nr_erasing_blocks++;
David Woodhouseae3b6ba2010-05-19 17:05:14 +0100552 jffs2_garbage_collect_trigger(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100555 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100556 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100558 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100559 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
561 spin_unlock(&c->erase_completion_lock);
562
Joe Perches9c261b32012-02-15 15:56:43 -0800563 jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
564 c->wbuf_ofs, c->wbuf_len);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100565
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566}
567
568/* Meaning of pad argument:
569 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
570 1: Pad, do not adjust nextblock free_size
571 2: Pad, adjust nextblock free_size
572*/
573#define NOPAD 0
574#define PAD_NOACCOUNT 1
575#define PAD_ACCOUNTING 2
576
577static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
578{
David Woodhouse9bfeb692006-05-26 21:19:05 +0100579 struct jffs2_eraseblock *wbuf_jeb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 int ret;
581 size_t retlen;
582
Andrew Victor3be36672005-02-09 09:09:05 +0000583 /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 del_timer() the timer we never initialised. */
Andrew Victor3be36672005-02-09 09:09:05 +0000585 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 return 0;
587
Alexey Khoroshilov51b11e32011-06-28 00:21:30 +0400588 if (!mutex_is_locked(&c->alloc_sem)) {
Joe Perchesda320f02012-02-15 15:56:44 -0800589 pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 BUG();
591 }
592
Andrew Victor3be36672005-02-09 09:09:05 +0000593 if (!c->wbuf_len) /* already checked c->wbuf above */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 return 0;
595
David Woodhouse9bfeb692006-05-26 21:19:05 +0100596 wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
597 if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
David Woodhouse2f785402006-05-24 02:04:45 +0100598 return -ENOMEM;
599
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 /* claim remaining space on the page
601 this happens, if we have a change to a new block,
602 or if fsync forces us to flush the writebuffer.
603 if we have a switch to next page, we will not have
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000604 enough remaining space for this.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 */
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +0100606 if (pad ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 c->wbuf_len = PAD(c->wbuf_len);
608
609 /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
610 with 8 byte page size */
611 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000612
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
614 struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
615 padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
616 padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
617 padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
618 padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
619 }
620 }
621 /* else jffs2_flash_writev has actually filled in the rest of the
622 buffer for us, and will deal with the node refs etc. later. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000623
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624#ifdef BREAKME
625 static int breakme;
626 if (breakme++ == 20) {
Joe Perchesda320f02012-02-15 15:56:44 -0800627 pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 breakme = 0;
Artem Bityutskiyeda95cb2011-12-23 17:35:41 +0200629 mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
630 brokenbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 ret = -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000632 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633#endif
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000634
Artem Bityutskiyeda95cb2011-12-23 17:35:41 +0200635 ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
636 &retlen, c->wbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
David Woodhousea6bc4322007-07-11 14:23:54 +0100638 if (ret) {
Joe Perchesda320f02012-02-15 15:56:44 -0800639 pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret);
David Woodhousea6bc4322007-07-11 14:23:54 +0100640 goto wfail;
641 } else if (retlen != c->wbuf_pagesize) {
Joe Perchesda320f02012-02-15 15:56:44 -0800642 pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
643 retlen, c->wbuf_pagesize);
David Woodhousea6bc4322007-07-11 14:23:54 +0100644 ret = -EIO;
645 goto wfail;
646 } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
647 wfail:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 jffs2_wbuf_recover(c);
649
650 return ret;
651 }
652
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 /* Adjust free size of the block if we padded. */
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +0100654 if (pad) {
David Woodhouse0bcc0992006-05-21 13:00:54 +0100655 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
Joe Perches9c261b32012-02-15 15:56:43 -0800657 jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
658 (wbuf_jeb == c->nextblock) ? "next" : "",
659 wbuf_jeb->offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000661 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 padded. If there is less free space in the block than that,
663 something screwed up */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100664 if (wbuf_jeb->free_size < waste) {
Joe Perchesda320f02012-02-15 15:56:44 -0800665 pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
666 c->wbuf_ofs, c->wbuf_len, waste);
667 pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
668 wbuf_jeb->offset, wbuf_jeb->free_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 BUG();
670 }
David Woodhouse0bcc0992006-05-21 13:00:54 +0100671
672 spin_lock(&c->erase_completion_lock);
673
David Woodhouse9bfeb692006-05-26 21:19:05 +0100674 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
David Woodhouse0bcc0992006-05-21 13:00:54 +0100675 /* FIXME: that made it count as dirty. Convert to wasted */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100676 wbuf_jeb->dirty_size -= waste;
David Woodhouse0bcc0992006-05-21 13:00:54 +0100677 c->dirty_size -= waste;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100678 wbuf_jeb->wasted_size += waste;
David Woodhouse0bcc0992006-05-21 13:00:54 +0100679 c->wasted_size += waste;
680 } else
681 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
683 /* Stick any now-obsoleted blocks on the erase_pending_list */
684 jffs2_refile_wbuf_blocks(c);
685 jffs2_clear_wbuf_ino_list(c);
686 spin_unlock(&c->erase_completion_lock);
687
688 memset(c->wbuf,0xff,c->wbuf_pagesize);
689 /* adjust write buffer offset, else we get a non contiguous write bug */
Alexander Belyakov5bf17232008-10-17 19:19:13 +0400690 c->wbuf_ofs += c->wbuf_pagesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 c->wbuf_len = 0;
692 return 0;
693}
694
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000695/* Trigger garbage collection to flush the write-buffer.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000697 outstanding. If ino arg non-zero, do it only if a write for the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 given inode is outstanding. */
699int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
700{
701 uint32_t old_wbuf_ofs;
702 uint32_t old_wbuf_len;
703 int ret = 0;
704
Joe Perches9c261b32012-02-15 15:56:43 -0800705 jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
David Woodhouse8aee6ac2005-02-02 22:12:08 +0000707 if (!c->wbuf)
708 return 0;
709
David Woodhouseced22072008-04-22 15:13:40 +0100710 mutex_lock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
Joe Perches9c261b32012-02-15 15:56:43 -0800712 jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
David Woodhouseced22072008-04-22 15:13:40 +0100713 mutex_unlock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 return 0;
715 }
716
717 old_wbuf_ofs = c->wbuf_ofs;
718 old_wbuf_len = c->wbuf_len;
719
720 if (c->unchecked_size) {
721 /* GC won't make any progress for a while */
Joe Perches9c261b32012-02-15 15:56:43 -0800722 jffs2_dbg(1, "%s(): padding. Not finished checking\n",
723 __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 down_write(&c->wbuf_sem);
725 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000726 /* retry flushing wbuf in case jffs2_wbuf_recover
727 left some data in the wbuf */
728 if (ret)
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000729 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 up_write(&c->wbuf_sem);
731 } else while (old_wbuf_len &&
732 old_wbuf_ofs == c->wbuf_ofs) {
733
David Woodhouseced22072008-04-22 15:13:40 +0100734 mutex_unlock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735
Joe Perches9c261b32012-02-15 15:56:43 -0800736 jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
738 ret = jffs2_garbage_collect_pass(c);
739 if (ret) {
740 /* GC failed. Flush it with padding instead */
David Woodhouseced22072008-04-22 15:13:40 +0100741 mutex_lock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 down_write(&c->wbuf_sem);
743 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000744 /* retry flushing wbuf in case jffs2_wbuf_recover
745 left some data in the wbuf */
746 if (ret)
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000747 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 up_write(&c->wbuf_sem);
749 break;
750 }
David Woodhouseced22072008-04-22 15:13:40 +0100751 mutex_lock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 }
753
Joe Perches9c261b32012-02-15 15:56:43 -0800754 jffs2_dbg(1, "%s(): ends...\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
David Woodhouseced22072008-04-22 15:13:40 +0100756 mutex_unlock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 return ret;
758}
759
760/* Pad write-buffer to end and write it, wasting space. */
761int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
762{
763 int ret;
764
David Woodhouse8aee6ac2005-02-02 22:12:08 +0000765 if (!c->wbuf)
766 return 0;
767
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 down_write(&c->wbuf_sem);
769 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000770 /* retry - maybe wbuf recover left some data in wbuf. */
771 if (ret)
772 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 up_write(&c->wbuf_sem);
774
775 return ret;
776}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200778static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
779 size_t len)
780{
781 if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
782 return 0;
783
784 if (len > (c->wbuf_pagesize - c->wbuf_len))
785 len = c->wbuf_pagesize - c->wbuf_len;
786 memcpy(c->wbuf + c->wbuf_len, buf, len);
787 c->wbuf_len += (uint32_t) len;
788 return len;
789}
790
791int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
792 unsigned long count, loff_t to, size_t *retlen,
793 uint32_t ino)
794{
795 struct jffs2_eraseblock *jeb;
796 size_t wbuf_retlen, donelen = 0;
797 uint32_t outvec_to = to;
798 int ret, invec;
799
800 /* If not writebuffered flash, don't bother */
Andrew Victor3be36672005-02-09 09:09:05 +0000801 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000803
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 down_write(&c->wbuf_sem);
805
806 /* If wbuf_ofs is not initialized, set it to target address */
807 if (c->wbuf_ofs == 0xFFFFFFFF) {
808 c->wbuf_ofs = PAGE_DIV(to);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000809 c->wbuf_len = PAGE_MOD(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 memset(c->wbuf,0xff,c->wbuf_pagesize);
811 }
812
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200813 /*
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200814 * Sanity checks on target address. It's permitted to write
815 * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
816 * write at the beginning of a new erase block. Anything else,
817 * and you die. New block starts at xxx000c (0-b = block
818 * header)
819 */
Andrew Victor3be36672005-02-09 09:09:05 +0000820 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 /* It's a write to a new block */
822 if (c->wbuf_len) {
Joe Perches9c261b32012-02-15 15:56:43 -0800823 jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
824 __func__, (unsigned long)to, c->wbuf_ofs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200826 if (ret)
827 goto outerr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 }
829 /* set pointer to new block */
830 c->wbuf_ofs = PAGE_DIV(to);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000831 c->wbuf_len = PAGE_MOD(to);
832 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833
834 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
835 /* We're not writing immediately after the writebuffer. Bad. */
Joe Perchesda320f02012-02-15 15:56:44 -0800836 pr_crit("%s(): Non-contiguous write to %08lx\n",
837 __func__, (unsigned long)to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 if (c->wbuf_len)
Joe Perchesda320f02012-02-15 15:56:44 -0800839 pr_crit("wbuf was previously %08x-%08x\n",
840 c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 BUG();
842 }
843
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200844 /* adjust alignment offset */
845 if (c->wbuf_len != PAGE_MOD(to)) {
846 c->wbuf_len = PAGE_MOD(to);
847 /* take care of alignment to next page */
848 if (!c->wbuf_len) {
849 c->wbuf_len = c->wbuf_pagesize;
850 ret = __jffs2_flush_wbuf(c, NOPAD);
851 if (ret)
852 goto outerr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 }
854 }
855
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200856 for (invec = 0; invec < count; invec++) {
857 int vlen = invecs[invec].iov_len;
858 uint8_t *v = invecs[invec].iov_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200860 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200862 if (c->wbuf_len == c->wbuf_pagesize) {
863 ret = __jffs2_flush_wbuf(c, NOPAD);
864 if (ret)
865 goto outerr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 }
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200867 vlen -= wbuf_retlen;
868 outvec_to += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 donelen += wbuf_retlen;
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200870 v += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200872 if (vlen >= c->wbuf_pagesize) {
Artem Bityutskiyeda95cb2011-12-23 17:35:41 +0200873 ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen),
874 &wbuf_retlen, v);
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200875 if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
876 goto outfile;
877
878 vlen -= wbuf_retlen;
879 outvec_to += wbuf_retlen;
880 c->wbuf_ofs = outvec_to;
881 donelen += wbuf_retlen;
882 v += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 }
884
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200885 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
886 if (c->wbuf_len == c->wbuf_pagesize) {
887 ret = __jffs2_flush_wbuf(c, NOPAD);
888 if (ret)
889 goto outerr;
890 }
891
892 outvec_to += wbuf_retlen;
893 donelen += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 }
895
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200896 /*
897 * If there's a remainder in the wbuf and it's a non-GC write,
898 * remember that the wbuf affects this ino
899 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 *retlen = donelen;
901
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100902 if (jffs2_sum_active()) {
903 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
904 if (res)
905 return res;
906 }
907
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 if (c->wbuf_len && ino)
909 jffs2_wbuf_dirties_inode(c, ino);
910
911 ret = 0;
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200912 up_write(&c->wbuf_sem);
913 return ret;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000914
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200915outfile:
916 /*
917 * At this point we have no problem, c->wbuf is empty. However
918 * refile nextblock to avoid writing again to same address.
919 */
920
921 spin_lock(&c->erase_completion_lock);
922
923 jeb = &c->blocks[outvec_to / c->sector_size];
924 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
925
926 spin_unlock(&c->erase_completion_lock);
927
928outerr:
929 *retlen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 up_write(&c->wbuf_sem);
931 return ret;
932}
933
934/*
935 * This is the entry for flash write.
936 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
937*/
David Woodhouse9bfeb692006-05-26 21:19:05 +0100938int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
939 size_t *retlen, const u_char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940{
941 struct kvec vecs[1];
942
Andrew Victor3be36672005-02-09 09:09:05 +0000943 if (!jffs2_is_writebuffered(c))
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100944 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
946 vecs[0].iov_base = (unsigned char *) buf;
947 vecs[0].iov_len = len;
948 return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
949}
950
951/*
952 Handle readback from writebuffer and ECC failure return
953*/
954int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
955{
956 loff_t orbf = 0, owbf = 0, lwbf = 0;
957 int ret;
958
Andrew Victor3be36672005-02-09 09:09:05 +0000959 if (!jffs2_is_writebuffered(c))
Artem Bityutskiy329ad392011-12-23 17:30:16 +0200960 return mtd_read(c->mtd, ofs, len, retlen, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961
Andrew Victor3be36672005-02-09 09:09:05 +0000962 /* Read flash */
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100963 down_read(&c->wbuf_sem);
Artem Bityutskiy329ad392011-12-23 17:30:16 +0200964 ret = mtd_read(c->mtd, ofs, len, retlen, buf);
Andrew Victor3be36672005-02-09 09:09:05 +0000965
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200966 if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
967 if (ret == -EBADMSG)
Joe Perchesda320f02012-02-15 15:56:44 -0800968 pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
969 len, ofs);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000970 /*
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200971 * We have the raw data without ECC correction in the buffer,
972 * maybe we are lucky and all data or parts are correct. We
973 * check the node. If data are corrupted node check will sort
974 * it out. We keep this block, it will fail on write or erase
975 * and the we mark it bad. Or should we do that now? But we
976 * should give him a chance. Maybe we had a system crash or
977 * power loss before the ecc write or a erase was completed.
Andrew Victor3be36672005-02-09 09:09:05 +0000978 * So we return success. :)
979 */
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200980 ret = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000981 }
Andrew Victor3be36672005-02-09 09:09:05 +0000982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 /* if no writebuffer available or write buffer empty, return */
984 if (!c->wbuf_pagesize || !c->wbuf_len)
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100985 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
987 /* if we read in a different block, return */
Andrew Victor3be36672005-02-09 09:09:05 +0000988 if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100989 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
991 if (ofs >= c->wbuf_ofs) {
992 owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
993 if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
994 goto exit;
995 lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000996 if (lwbf > len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 lwbf = len;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000998 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
1000 if (orbf > len) /* is write beyond write buffer ? */
1001 goto exit;
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +02001002 lwbf = len - orbf; /* number of bytes to copy */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001003 if (lwbf > c->wbuf_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 lwbf = c->wbuf_len;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001005 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 if (lwbf > 0)
1007 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
1008
1009exit:
1010 up_read(&c->wbuf_sem);
1011 return ret;
1012}
1013
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001014#define NR_OOB_SCAN_PAGES 4
1015
David Woodhouse09b3fba2007-08-09 17:28:20 +08001016/* For historical reasons we use only 8 bytes for OOB clean marker */
1017#define OOB_CM_SIZE 8
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001018
1019static const struct jffs2_unknown_node oob_cleanmarker =
1020{
David Woodhouse566865a2007-04-23 12:07:17 +01001021 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
1022 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
1023 .totlen = constant_cpu_to_je32(8)
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001024};
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001025
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026/*
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001027 * Check, if the out of band area is empty. This function knows about the clean
1028 * marker and if it is present in OOB, treats the OOB as empty anyway.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 */
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001030int jffs2_check_oob_empty(struct jffs2_sb_info *c,
1031 struct jffs2_eraseblock *jeb, int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032{
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001033 int i, ret;
1034 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001035 struct mtd_oob_ops ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
Brian Norris0612b9d2011-08-30 18:45:40 -07001037 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001038 ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001039 ops.oobbuf = c->oobbuf;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001040 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001041 ops.datbuf = NULL;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001042
Artem Bityutskiyfd2819b2011-12-23 18:27:05 +02001043 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001044 if (ret || ops.oobretlen != ops.ooblen) {
Joe Perchesda320f02012-02-15 15:56:44 -08001045 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1046 jeb->offset, ops.ooblen, ops.oobretlen, ret);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001047 if (!ret)
1048 ret = -EIO;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001049 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001051
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001052 for(i = 0; i < ops.ooblen; i++) {
1053 if (mode && i < cmlen)
1054 /* Yeah, we know about the cleanmarker */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 continue;
1056
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001057 if (ops.oobbuf[i] != 0xFF) {
Joe Perches9c261b32012-02-15 15:56:43 -08001058 jffs2_dbg(2, "Found %02x at %x in OOB for "
1059 "%08x\n", ops.oobbuf[i], i, jeb->offset);
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001060 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 }
1062 }
1063
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001064 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065}
1066
1067/*
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001068 * Check for a valid cleanmarker.
1069 * Returns: 0 if a valid cleanmarker was found
David Woodhouseef53cb02007-07-10 10:01:22 +01001070 * 1 if no cleanmarker was found
1071 * negative error code if an error occurred
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001072 */
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001073int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
1074 struct jffs2_eraseblock *jeb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075{
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001076 struct mtd_oob_ops ops;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001077 int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
Brian Norris0612b9d2011-08-30 18:45:40 -07001079 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001080 ops.ooblen = cmlen;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001081 ops.oobbuf = c->oobbuf;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001082 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001083 ops.datbuf = NULL;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001084
Artem Bityutskiyfd2819b2011-12-23 18:27:05 +02001085 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001086 if (ret || ops.oobretlen != ops.ooblen) {
Joe Perchesda320f02012-02-15 15:56:44 -08001087 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1088 jeb->offset, ops.ooblen, ops.oobretlen, ret);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001089 if (!ret)
1090 ret = -EIO;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001091 return ret;
1092 }
1093
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001094 return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095}
1096
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001097int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1098 struct jffs2_eraseblock *jeb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099{
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001100 int ret;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001101 struct mtd_oob_ops ops;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001102 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
Brian Norris0612b9d2011-08-30 18:45:40 -07001104 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001105 ops.ooblen = cmlen;
1106 ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1107 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001108 ops.datbuf = NULL;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001109
Artem Bityutskiya2cc5ba2011-12-23 18:29:55 +02001110 ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001111 if (ret || ops.oobretlen != ops.ooblen) {
Joe Perchesda320f02012-02-15 15:56:44 -08001112 pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1113 jeb->offset, ops.ooblen, ops.oobretlen, ret);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001114 if (!ret)
1115 ret = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 return ret;
1117 }
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001118
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 return 0;
1120}
1121
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001122/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 * On NAND we try to mark this block bad. If the block was erased more
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001124 * than MAX_ERASE_FAILURES we mark it finally bad.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 * Don't care about failures. This block remains on the erase-pending
1126 * or badblock list as long as nobody manipulates the flash with
1127 * a bootloader or something like that.
1128 */
1129
1130int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1131{
1132 int ret;
1133
1134 /* if the count is < max, we try to write the counter to the 2nd page oob area */
1135 if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1136 return 0;
1137
Joe Perchesda320f02012-02-15 15:56:44 -08001138 pr_warn("JFFS2: marking eraseblock at %08x as bad\n", bad_offset);
Artem Bityutskiy5942ddb2011-12-23 19:37:38 +02001139 ret = mtd_block_markbad(c->mtd, bad_offset);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001140
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 if (ret) {
Joe Perches9c261b32012-02-15 15:56:43 -08001142 jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
1143 __func__, jeb->offset, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 return ret;
1145 }
1146 return 1;
1147}
1148
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001149int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150{
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001151 struct nand_ecclayout *oinfo = c->mtd->ecclayout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 if (!c->mtd->oobsize)
1154 return 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001155
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 /* Cleanmarker is out-of-band, so inline size zero */
1157 c->cleanmarker_size = 0;
1158
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001159 if (!oinfo || oinfo->oobavail == 0) {
Joe Perchesda320f02012-02-15 15:56:44 -08001160 pr_err("inconsistent device description\n");
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001161 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 }
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001163
Joe Perches9c261b32012-02-15 15:56:43 -08001164 jffs2_dbg(1, "JFFS2 using OOB on NAND\n");
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001165
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001166 c->oobavail = oinfo->oobavail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
1168 /* Initialise write buffer */
1169 init_rwsem(&c->wbuf_sem);
Joern Engel28318772006-05-22 23:18:05 +02001170 c->wbuf_pagesize = c->mtd->writesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 c->wbuf_ofs = 0xFFFFFFFF;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001172
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1174 if (!c->wbuf)
1175 return -ENOMEM;
1176
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001177 c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL);
1178 if (!c->oobbuf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 kfree(c->wbuf);
1180 return -ENOMEM;
1181 }
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001182
David Woodhousea6bc4322007-07-11 14:23:54 +01001183#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1184 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1185 if (!c->wbuf_verify) {
1186 kfree(c->oobbuf);
1187 kfree(c->wbuf);
1188 return -ENOMEM;
1189 }
1190#endif
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001191 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192}
1193
1194void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1195{
David Woodhousea6bc4322007-07-11 14:23:54 +01001196#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1197 kfree(c->wbuf_verify);
1198#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 kfree(c->wbuf);
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001200 kfree(c->oobbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201}
1202
Andrew Victor8f15fd52005-02-09 09:17:45 +00001203int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1204 c->cleanmarker_size = 0; /* No cleanmarkers needed */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001205
Andrew Victor8f15fd52005-02-09 09:17:45 +00001206 /* Initialize write buffer */
1207 init_rwsem(&c->wbuf_sem);
Andrew Victor8f15fd52005-02-09 09:17:45 +00001208
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001209
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001210 c->wbuf_pagesize = c->mtd->erasesize;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001211
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001212 /* Find a suitable c->sector_size
1213 * - Not too much sectors
1214 * - Sectors have to be at least 4 K + some bytes
1215 * - All known dataflashes have erase sizes of 528 or 1056
1216 * - we take at least 8 eraseblocks and want to have at least 8K size
1217 * - The concatenation should be a power of 2
1218 */
Andrew Victor8f15fd52005-02-09 09:17:45 +00001219
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001220 c->sector_size = 8 * c->mtd->erasesize;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001221
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001222 while (c->sector_size < 8192) {
1223 c->sector_size *= 2;
1224 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001225
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001226 /* It may be necessary to adjust the flash size */
1227 c->flash_size = c->mtd->size;
1228
1229 if ((c->flash_size % c->sector_size) != 0) {
1230 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
Joe Perchesda320f02012-02-15 15:56:44 -08001231 pr_warn("JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001232 };
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001233
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001234 c->wbuf_ofs = 0xFFFFFFFF;
Andrew Victor8f15fd52005-02-09 09:17:45 +00001235 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1236 if (!c->wbuf)
1237 return -ENOMEM;
1238
michaelcca15842008-04-18 13:44:17 -07001239#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1240 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1241 if (!c->wbuf_verify) {
1242 kfree(c->oobbuf);
1243 kfree(c->wbuf);
1244 return -ENOMEM;
1245 }
1246#endif
1247
Joe Perchesda320f02012-02-15 15:56:44 -08001248 pr_info("JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n",
1249 c->wbuf_pagesize, c->sector_size);
Andrew Victor8f15fd52005-02-09 09:17:45 +00001250
1251 return 0;
1252}
1253
1254void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
michaelcca15842008-04-18 13:44:17 -07001255#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1256 kfree(c->wbuf_verify);
1257#endif
Andrew Victor8f15fd52005-02-09 09:17:45 +00001258 kfree(c->wbuf);
1259}
Andrew Victor8f15fd52005-02-09 09:17:45 +00001260
Nicolas Pitre59da7212005-08-06 05:51:33 +01001261int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
Joern Engelc8b229d2006-05-22 23:18:12 +02001262 /* Cleanmarker currently occupies whole programming regions,
1263 * either one or 2 for 8Byte STMicro flashes. */
1264 c->cleanmarker_size = max(16u, c->mtd->writesize);
Nicolas Pitre59da7212005-08-06 05:51:33 +01001265
1266 /* Initialize write buffer */
1267 init_rwsem(&c->wbuf_sem);
Joern Engel28318772006-05-22 23:18:05 +02001268 c->wbuf_pagesize = c->mtd->writesize;
Nicolas Pitre59da7212005-08-06 05:51:33 +01001269 c->wbuf_ofs = 0xFFFFFFFF;
1270
1271 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1272 if (!c->wbuf)
1273 return -ENOMEM;
1274
Massimo Cirillobc8cec02009-08-27 10:44:09 +02001275#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1276 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1277 if (!c->wbuf_verify) {
1278 kfree(c->wbuf);
1279 return -ENOMEM;
1280 }
1281#endif
Nicolas Pitre59da7212005-08-06 05:51:33 +01001282 return 0;
1283}
1284
1285void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
Massimo Cirillobc8cec02009-08-27 10:44:09 +02001286#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1287 kfree(c->wbuf_verify);
1288#endif
Nicolas Pitre59da7212005-08-06 05:51:33 +01001289 kfree(c->wbuf);
1290}
Artem Bityutskiy0029da32006-10-04 19:15:21 +03001291
1292int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
1293 c->cleanmarker_size = 0;
1294
1295 if (c->mtd->writesize == 1)
1296 /* We do not need write-buffer */
1297 return 0;
1298
1299 init_rwsem(&c->wbuf_sem);
1300
1301 c->wbuf_pagesize = c->mtd->writesize;
1302 c->wbuf_ofs = 0xFFFFFFFF;
1303 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1304 if (!c->wbuf)
1305 return -ENOMEM;
1306
Joe Perchesda320f02012-02-15 15:56:44 -08001307 pr_info("JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n",
1308 c->wbuf_pagesize, c->sector_size);
Artem Bityutskiy0029da32006-10-04 19:15:21 +03001309
1310 return 0;
1311}
1312
1313void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) {
1314 kfree(c->wbuf);
1315}