blob: 74d9be19df3f1fff1d7defdc7824c90240a302f6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
David Woodhousec00c3102007-04-25 14:16:47 +01004 * Copyright © 2001-2007 Red Hat, Inc.
5 * Copyright © 2004 Thomas Gleixner <tglx@linutronix.de>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9 *
10 * For licensing information, see the file 'LICENCE' in this directory.
11 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
13
Joe Perches5a528952012-02-15 15:56:45 -080014#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/kernel.h>
17#include <linux/slab.h>
18#include <linux/mtd/mtd.h>
19#include <linux/crc32.h>
20#include <linux/mtd/nand.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080021#include <linux/jiffies.h>
Al Viro914e2632006-10-18 13:55:46 -040022#include <linux/sched.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080023
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "nodelist.h"
25
26/* For testing write failures */
27#undef BREAKME
28#undef BREAKMEHEADER
29
30#ifdef BREAKME
31static unsigned char *brokenbuf;
32#endif
33
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +010034#define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
35#define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037/* max. erase failures before we mark a block bad */
38#define MAX_ERASE_FAILURES 2
39
Linus Torvalds1da177e2005-04-16 15:20:36 -070040struct jffs2_inodirty {
41 uint32_t ino;
42 struct jffs2_inodirty *next;
43};
44
45static struct jffs2_inodirty inodirty_nomem;
46
47static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
48{
49 struct jffs2_inodirty *this = c->wbuf_inodes;
50
51 /* If a malloc failed, consider _everything_ dirty */
52 if (this == &inodirty_nomem)
53 return 1;
54
55 /* If ino == 0, _any_ non-GC writes mean 'yes' */
56 if (this && !ino)
57 return 1;
58
59 /* Look to see if the inode in question is pending in the wbuf */
60 while (this) {
61 if (this->ino == ino)
62 return 1;
63 this = this->next;
64 }
65 return 0;
66}
67
68static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
69{
70 struct jffs2_inodirty *this;
71
72 this = c->wbuf_inodes;
73
74 if (this != &inodirty_nomem) {
75 while (this) {
76 struct jffs2_inodirty *next = this->next;
77 kfree(this);
78 this = next;
79 }
80 }
81 c->wbuf_inodes = NULL;
82}
83
84static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
85{
86 struct jffs2_inodirty *new;
87
88 /* Mark the superblock dirty so that kupdated will flush... */
Joakim Tjernlund64a5c2e2010-05-19 17:13:19 +010089 jffs2_dirty_trigger(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91 if (jffs2_wbuf_pending_for_ino(c, ino))
92 return;
93
94 new = kmalloc(sizeof(*new), GFP_KERNEL);
95 if (!new) {
Joe Perches9c261b32012-02-15 15:56:43 -080096 jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 jffs2_clear_wbuf_ino_list(c);
98 c->wbuf_inodes = &inodirty_nomem;
99 return;
100 }
101 new->ino = ino;
102 new->next = c->wbuf_inodes;
103 c->wbuf_inodes = new;
104 return;
105}
106
107static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
108{
109 struct list_head *this, *next;
110 static int n;
111
112 if (list_empty(&c->erasable_pending_wbuf_list))
113 return;
114
115 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
116 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
117
Joe Perches9c261b32012-02-15 15:56:43 -0800118 jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
119 jeb->offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 list_del(this);
121 if ((jiffies + (n++)) & 127) {
122 /* Most of the time, we just erase it immediately. Otherwise we
123 spend ages scanning it on mount, etc. */
Joe Perches9c261b32012-02-15 15:56:43 -0800124 jffs2_dbg(1, "...and adding to erase_pending_list\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 list_add_tail(&jeb->list, &c->erase_pending_list);
126 c->nr_erasing_blocks++;
David Woodhouseae3b6ba2010-05-19 17:05:14 +0100127 jffs2_garbage_collect_trigger(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 } else {
129 /* Sometimes, however, we leave it elsewhere so it doesn't get
130 immediately reused, and we spread the load a bit. */
Joe Perches9c261b32012-02-15 15:56:43 -0800131 jffs2_dbg(1, "...and adding to erasable_list\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 list_add_tail(&jeb->list, &c->erasable_list);
133 }
134 }
135}
136
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000137#define REFILE_NOTEMPTY 0
138#define REFILE_ANYWAY 1
139
140static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141{
Joe Perches9c261b32012-02-15 15:56:43 -0800142 jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 /* File the existing block on the bad_used_list.... */
145 if (c->nextblock == jeb)
146 c->nextblock = NULL;
147 else /* Not sure this should ever happen... need more coffee */
148 list_del(&jeb->list);
149 if (jeb->first_node) {
Joe Perches9c261b32012-02-15 15:56:43 -0800150 jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
151 jeb->offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 list_add(&jeb->list, &c->bad_used_list);
153 } else {
Estelle Hammache9b88f472005-01-28 18:53:05 +0000154 BUG_ON(allow_empty == REFILE_NOTEMPTY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 /* It has to have had some nodes or we couldn't be here */
Joe Perches9c261b32012-02-15 15:56:43 -0800156 jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
157 jeb->offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 list_add(&jeb->list, &c->erase_pending_list);
159 c->nr_erasing_blocks++;
David Woodhouseae3b6ba2010-05-19 17:05:14 +0100160 jffs2_garbage_collect_trigger(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
David Woodhouse9bfeb692006-05-26 21:19:05 +0100163 if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
164 uint32_t oldfree = jeb->free_size;
165
166 jffs2_link_node_ref(c, jeb,
167 (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
168 oldfree, NULL);
169 /* convert to wasted */
170 c->wasted_size += oldfree;
171 jeb->wasted_size += oldfree;
172 c->dirty_size -= oldfree;
173 jeb->dirty_size -= oldfree;
174 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100176 jffs2_dbg_dump_block_lists_nolock(c);
177 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
178 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
David Woodhouse9bfeb692006-05-26 21:19:05 +0100181static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
182 struct jffs2_inode_info *f,
183 struct jffs2_raw_node_ref *raw,
184 union jffs2_node_union *node)
185{
186 struct jffs2_node_frag *frag;
187 struct jffs2_full_dirent *fd;
188
189 dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
190 node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
191
192 BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
193 je16_to_cpu(node->u.magic) != 0);
194
195 switch (je16_to_cpu(node->u.nodetype)) {
196 case JFFS2_NODETYPE_INODE:
David Woodhouseddc58bd2006-05-27 13:15:16 +0100197 if (f->metadata && f->metadata->raw == raw) {
198 dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
199 return &f->metadata->raw;
200 }
David Woodhouse9bfeb692006-05-26 21:19:05 +0100201 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
202 BUG_ON(!frag);
203 /* Find a frag which refers to the full_dnode we want to modify */
204 while (!frag->node || frag->node->raw != raw) {
205 frag = frag_next(frag);
206 BUG_ON(!frag);
207 }
208 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
209 return &frag->node->raw;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100210
211 case JFFS2_NODETYPE_DIRENT:
212 for (fd = f->dents; fd; fd = fd->next) {
213 if (fd->raw == raw) {
214 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
215 return &fd->raw;
216 }
217 }
218 BUG();
David Woodhouseddc58bd2006-05-27 13:15:16 +0100219
David Woodhouse9bfeb692006-05-26 21:19:05 +0100220 default:
221 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
222 je16_to_cpu(node->u.nodetype));
223 break;
224 }
225 return NULL;
226}
227
David Woodhousea6bc4322007-07-11 14:23:54 +0100228#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
229static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
230 uint32_t ofs)
231{
232 int ret;
233 size_t retlen;
234 char *eccstr;
235
Artem Bityutskiy329ad392011-12-23 17:30:16 +0200236 ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
David Woodhousea6bc4322007-07-11 14:23:54 +0100237 if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
Joe Perchesda320f02012-02-15 15:56:44 -0800238 pr_warn("%s(): Read back of page at %08x failed: %d\n",
239 __func__, c->wbuf_ofs, ret);
David Woodhousea6bc4322007-07-11 14:23:54 +0100240 return ret;
241 } else if (retlen != c->wbuf_pagesize) {
Joe Perchesda320f02012-02-15 15:56:44 -0800242 pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n",
243 __func__, ofs, retlen, c->wbuf_pagesize);
David Woodhousea6bc4322007-07-11 14:23:54 +0100244 return -EIO;
245 }
246 if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
247 return 0;
248
249 if (ret == -EUCLEAN)
250 eccstr = "corrected";
251 else if (ret == -EBADMSG)
252 eccstr = "correction failed";
253 else
254 eccstr = "OK or unused";
255
Joe Perchesda320f02012-02-15 15:56:44 -0800256 pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n",
257 eccstr, c->wbuf_ofs);
David Woodhousea6bc4322007-07-11 14:23:54 +0100258 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
259 c->wbuf, c->wbuf_pagesize, 0);
260
Joe Perchesda320f02012-02-15 15:56:44 -0800261 pr_warn("Read back:\n");
David Woodhousea6bc4322007-07-11 14:23:54 +0100262 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
263 c->wbuf_verify, c->wbuf_pagesize, 0);
264
265 return -EIO;
266}
267#else
268#define jffs2_verify_write(c,b,o) (0)
269#endif
270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271/* Recover from failure to write wbuf. Recover the nodes up to the
272 * wbuf, not the one which we were starting to try to write. */
273
274static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
275{
276 struct jffs2_eraseblock *jeb, *new_jeb;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100277 struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 size_t retlen;
279 int ret;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100280 int nr_refile = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 unsigned char *buf;
282 uint32_t start, end, ofs, len;
283
David Woodhouse046b8b92006-05-25 01:50:35 +0100284 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 spin_lock(&c->erase_completion_lock);
Vitaly Wool180bfb32007-03-06 17:01:04 +0300287 if (c->wbuf_ofs % c->mtd->erasesize)
288 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
289 else
290 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100291 spin_unlock(&c->erase_completion_lock);
292
293 BUG_ON(!ref_obsolete(jeb->last_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
295 /* Find the first node to be recovered, by skipping over every
296 node which ends before the wbuf starts, or which is obsolete. */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100297 for (next = raw = jeb->first_node; next; raw = next) {
298 next = ref_next(raw);
299
300 if (ref_obsolete(raw) ||
301 (next && ref_offset(next) <= c->wbuf_ofs)) {
302 dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
303 ref_offset(raw), ref_flags(raw),
304 (ref_offset(raw) + ref_totlen(c, jeb, raw)),
305 c->wbuf_ofs);
306 continue;
307 }
308 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
309 ref_offset(raw), ref_flags(raw),
310 (ref_offset(raw) + ref_totlen(c, jeb, raw)));
311
312 first_raw = raw;
313 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 }
315
David Woodhouse9bfeb692006-05-26 21:19:05 +0100316 if (!first_raw) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 /* All nodes were obsolete. Nothing to recover. */
Joe Perches9c261b32012-02-15 15:56:43 -0800318 jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
David Woodhouse9bfeb692006-05-26 21:19:05 +0100319 c->wbuf_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 return;
321 }
322
David Woodhouse9bfeb692006-05-26 21:19:05 +0100323 start = ref_offset(first_raw);
324 end = ref_offset(jeb->last_node);
325 nr_refile = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
David Woodhouse9bfeb692006-05-26 21:19:05 +0100327 /* Count the number of refs which need to be copied */
328 while ((raw = ref_next(raw)) != jeb->last_node)
329 nr_refile++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
David Woodhouse9bfeb692006-05-26 21:19:05 +0100331 dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
332 start, end, end - start, nr_refile);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
334 buf = NULL;
335 if (start < c->wbuf_ofs) {
336 /* First affected node was already partially written.
337 * Attempt to reread the old data into our buffer. */
338
339 buf = kmalloc(end - start, GFP_KERNEL);
340 if (!buf) {
Joe Perchesda320f02012-02-15 15:56:44 -0800341 pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
343 goto read_failed;
344 }
345
346 /* Do the read... */
Artem Bityutskiy329ad392011-12-23 17:30:16 +0200347 ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen,
348 buf);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000349
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200350 /* ECC recovered ? */
351 if ((ret == -EUCLEAN || ret == -EBADMSG) &&
352 (retlen == c->wbuf_ofs - start))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 ret = 0;
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 if (ret || retlen != c->wbuf_ofs - start) {
Joe Perchesda320f02012-02-15 15:56:44 -0800356 pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
358 kfree(buf);
359 buf = NULL;
360 read_failed:
David Woodhouse9bfeb692006-05-26 21:19:05 +0100361 first_raw = ref_next(first_raw);
362 nr_refile--;
363 while (first_raw && ref_obsolete(first_raw)) {
364 first_raw = ref_next(first_raw);
365 nr_refile--;
366 }
367
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 /* If this was the only node to be recovered, give up */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100369 if (!first_raw) {
370 c->wbuf_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 return;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100372 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 /* It wasn't. Go on and try to recover nodes complete in the wbuf */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100375 start = ref_offset(first_raw);
376 dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
377 start, end, end - start, nr_refile);
378
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 } else {
380 /* Read succeeded. Copy the remaining data from the wbuf */
381 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
382 }
383 }
384 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
385 Either 'buf' contains the data, or we find it in the wbuf */
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 /* ... and get an allocation of space from a shiny new block instead */
David Woodhouse9fe48542006-05-23 00:38:06 +0100388 ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 if (ret) {
Joe Perchesda320f02012-02-15 15:56:44 -0800390 pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n");
Estelle Hammache9b88f472005-01-28 18:53:05 +0000391 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 return;
393 }
David Woodhouse9bfeb692006-05-26 21:19:05 +0100394
Adrian Hunter7f762ab2007-04-04 13:47:53 +0300395 /* The summary is not recovered, so it must be disabled for this erase block */
396 jffs2_sum_disable_collecting(c->summary);
397
David Woodhouse9bfeb692006-05-26 21:19:05 +0100398 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
399 if (ret) {
Joe Perchesda320f02012-02-15 15:56:44 -0800400 pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
David Woodhouse9bfeb692006-05-26 21:19:05 +0100401 kfree(buf);
402 return;
403 }
404
David Woodhouse9fe48542006-05-23 00:38:06 +0100405 ofs = write_ofs(c);
406
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 if (end-start >= c->wbuf_pagesize) {
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000408 /* Need to do another write immediately, but it's possible
Estelle Hammache9b88f472005-01-28 18:53:05 +0000409 that this is just because the wbuf itself is completely
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000410 full, and there's nothing earlier read back from the
411 flash. Hence 'buf' isn't necessarily what we're writing
Estelle Hammache9b88f472005-01-28 18:53:05 +0000412 from. */
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000413 unsigned char *rewrite_buf = buf?:c->wbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
415
Joe Perches9c261b32012-02-15 15:56:43 -0800416 jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
417 towrite, ofs);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000418
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419#ifdef BREAKMEHEADER
420 static int breakme;
421 if (breakme++ == 20) {
Joe Perchesda320f02012-02-15 15:56:44 -0800422 pr_notice("Faking write error at 0x%08x\n", ofs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 breakme = 0;
Artem Bityutskiyeda95cb2011-12-23 17:35:41 +0200424 mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 ret = -EIO;
426 } else
427#endif
Artem Bityutskiyeda95cb2011-12-23 17:35:41 +0200428 ret = mtd_write(c->mtd, ofs, towrite, &retlen,
429 rewrite_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
David Woodhousea6bc4322007-07-11 14:23:54 +0100431 if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 /* Argh. We tried. Really we did. */
Joe Perchesda320f02012-02-15 15:56:44 -0800433 pr_crit("Recovery of wbuf failed due to a second write error\n");
Estelle Hammache9b88f472005-01-28 18:53:05 +0000434 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
David Woodhouse2f785402006-05-24 02:04:45 +0100436 if (retlen)
David Woodhouse9bfeb692006-05-26 21:19:05 +0100437 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 return;
440 }
Joe Perchesda320f02012-02-15 15:56:44 -0800441 pr_notice("Recovery of wbuf succeeded to %08x\n", ofs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
443 c->wbuf_len = (end - start) - towrite;
444 c->wbuf_ofs = ofs + towrite;
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000445 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 } else {
448 /* OK, now we're left with the dregs in whichever buffer we're using */
449 if (buf) {
450 memcpy(c->wbuf, buf, end-start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 } else {
452 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
453 }
454 c->wbuf_ofs = ofs;
455 c->wbuf_len = end - start;
456 }
457
458 /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
459 new_jeb = &c->blocks[ofs / c->sector_size];
460
461 spin_lock(&c->erase_completion_lock);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100462 for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
463 uint32_t rawlen = ref_totlen(c, jeb, raw);
464 struct jffs2_inode_cache *ic;
465 struct jffs2_raw_node_ref *new_ref;
466 struct jffs2_raw_node_ref **adjust_ref = NULL;
467 struct jffs2_inode_info *f = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
Joe Perches9c261b32012-02-15 15:56:43 -0800469 jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
470 rawlen, ref_offset(raw), ref_flags(raw), ofs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
David Woodhouse9bfeb692006-05-26 21:19:05 +0100472 ic = jffs2_raw_ref_to_ic(raw);
473
474 /* Ick. This XATTR mess should be fixed shortly... */
475 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
476 struct jffs2_xattr_datum *xd = (void *)ic;
477 BUG_ON(xd->node != raw);
478 adjust_ref = &xd->node;
479 raw->next_in_ino = NULL;
480 ic = NULL;
481 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
482 struct jffs2_xattr_datum *xr = (void *)ic;
483 BUG_ON(xr->node != raw);
484 adjust_ref = &xr->node;
485 raw->next_in_ino = NULL;
486 ic = NULL;
487 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
488 struct jffs2_raw_node_ref **p = &ic->nodes;
489
490 /* Remove the old node from the per-inode list */
491 while (*p && *p != (void *)ic) {
492 if (*p == raw) {
493 (*p) = (raw->next_in_ino);
494 raw->next_in_ino = NULL;
495 break;
496 }
497 p = &((*p)->next_in_ino);
498 }
499
500 if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
501 /* If it's an in-core inode, then we have to adjust any
502 full_dirent or full_dnode structure to point to the
503 new version instead of the old */
David Woodhouse27c72b02008-05-01 18:47:17 +0100504 f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100505 if (IS_ERR(f)) {
506 /* Should never happen; it _must_ be present */
507 JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
508 ic->ino, PTR_ERR(f));
509 BUG();
510 }
511 /* We don't lock f->sem. There's a number of ways we could
512 end up in here with it already being locked, and nobody's
513 going to modify it on us anyway because we hold the
514 alloc_sem. We're only changing one ->raw pointer too,
515 which we can get away with without upsetting readers. */
516 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
517 (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
518 } else if (unlikely(ic->state != INO_STATE_PRESENT &&
519 ic->state != INO_STATE_CHECKEDABSENT &&
520 ic->state != INO_STATE_GC)) {
521 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
522 BUG();
523 }
524 }
525
526 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
527
528 if (adjust_ref) {
529 BUG_ON(*adjust_ref != raw);
530 *adjust_ref = new_ref;
531 }
532 if (f)
533 jffs2_gc_release_inode(c, f);
534
535 if (!ref_obsolete(raw)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 jeb->dirty_size += rawlen;
537 jeb->used_size -= rawlen;
538 c->dirty_size += rawlen;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100539 c->used_size -= rawlen;
540 raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
541 BUG_ON(raw->next_in_ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 ofs += rawlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 }
545
David Woodhouse9bfeb692006-05-26 21:19:05 +0100546 kfree(buf);
547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 /* Fix up the original jeb now it's on the bad_list */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100549 if (first_raw == jeb->first_node) {
Joe Perches9c261b32012-02-15 15:56:43 -0800550 jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
551 jeb->offset);
Akinobu Mitaf1166292006-06-26 00:24:46 -0700552 list_move(&jeb->list, &c->erase_pending_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 c->nr_erasing_blocks++;
David Woodhouseae3b6ba2010-05-19 17:05:14 +0100554 jffs2_garbage_collect_trigger(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100557 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100558 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100560 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100561 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
563 spin_unlock(&c->erase_completion_lock);
564
Joe Perches9c261b32012-02-15 15:56:43 -0800565 jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
566 c->wbuf_ofs, c->wbuf_len);
David Woodhouse9bfeb692006-05-26 21:19:05 +0100567
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568}
569
570/* Meaning of pad argument:
571 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
572 1: Pad, do not adjust nextblock free_size
573 2: Pad, adjust nextblock free_size
574*/
575#define NOPAD 0
576#define PAD_NOACCOUNT 1
577#define PAD_ACCOUNTING 2
578
579static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
580{
David Woodhouse9bfeb692006-05-26 21:19:05 +0100581 struct jffs2_eraseblock *wbuf_jeb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 int ret;
583 size_t retlen;
584
Andrew Victor3be36672005-02-09 09:09:05 +0000585 /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 del_timer() the timer we never initialised. */
Andrew Victor3be36672005-02-09 09:09:05 +0000587 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 return 0;
589
Alexey Khoroshilov51b11e32011-06-28 00:21:30 +0400590 if (!mutex_is_locked(&c->alloc_sem)) {
Joe Perchesda320f02012-02-15 15:56:44 -0800591 pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 BUG();
593 }
594
Andrew Victor3be36672005-02-09 09:09:05 +0000595 if (!c->wbuf_len) /* already checked c->wbuf above */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 return 0;
597
David Woodhouse9bfeb692006-05-26 21:19:05 +0100598 wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
599 if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
David Woodhouse2f785402006-05-24 02:04:45 +0100600 return -ENOMEM;
601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 /* claim remaining space on the page
603 this happens, if we have a change to a new block,
604 or if fsync forces us to flush the writebuffer.
605 if we have a switch to next page, we will not have
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000606 enough remaining space for this.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 */
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +0100608 if (pad ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 c->wbuf_len = PAD(c->wbuf_len);
610
611 /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
612 with 8 byte page size */
613 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000614
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
616 struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
617 padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
618 padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
619 padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
620 padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
621 }
622 }
623 /* else jffs2_flash_writev has actually filled in the rest of the
624 buffer for us, and will deal with the node refs etc. later. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000625
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626#ifdef BREAKME
627 static int breakme;
628 if (breakme++ == 20) {
Joe Perchesda320f02012-02-15 15:56:44 -0800629 pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 breakme = 0;
Artem Bityutskiyeda95cb2011-12-23 17:35:41 +0200631 mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
632 brokenbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 ret = -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000634 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635#endif
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000636
Artem Bityutskiyeda95cb2011-12-23 17:35:41 +0200637 ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
638 &retlen, c->wbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639
David Woodhousea6bc4322007-07-11 14:23:54 +0100640 if (ret) {
Joe Perchesda320f02012-02-15 15:56:44 -0800641 pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret);
David Woodhousea6bc4322007-07-11 14:23:54 +0100642 goto wfail;
643 } else if (retlen != c->wbuf_pagesize) {
Joe Perchesda320f02012-02-15 15:56:44 -0800644 pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
645 retlen, c->wbuf_pagesize);
David Woodhousea6bc4322007-07-11 14:23:54 +0100646 ret = -EIO;
647 goto wfail;
648 } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
649 wfail:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 jffs2_wbuf_recover(c);
651
652 return ret;
653 }
654
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 /* Adjust free size of the block if we padded. */
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +0100656 if (pad) {
David Woodhouse0bcc0992006-05-21 13:00:54 +0100657 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
Joe Perches9c261b32012-02-15 15:56:43 -0800659 jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
660 (wbuf_jeb == c->nextblock) ? "next" : "",
661 wbuf_jeb->offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000663 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 padded. If there is less free space in the block than that,
665 something screwed up */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100666 if (wbuf_jeb->free_size < waste) {
Joe Perchesda320f02012-02-15 15:56:44 -0800667 pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
668 c->wbuf_ofs, c->wbuf_len, waste);
669 pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
670 wbuf_jeb->offset, wbuf_jeb->free_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 BUG();
672 }
David Woodhouse0bcc0992006-05-21 13:00:54 +0100673
674 spin_lock(&c->erase_completion_lock);
675
David Woodhouse9bfeb692006-05-26 21:19:05 +0100676 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
David Woodhouse0bcc0992006-05-21 13:00:54 +0100677 /* FIXME: that made it count as dirty. Convert to wasted */
David Woodhouse9bfeb692006-05-26 21:19:05 +0100678 wbuf_jeb->dirty_size -= waste;
David Woodhouse0bcc0992006-05-21 13:00:54 +0100679 c->dirty_size -= waste;
David Woodhouse9bfeb692006-05-26 21:19:05 +0100680 wbuf_jeb->wasted_size += waste;
David Woodhouse0bcc0992006-05-21 13:00:54 +0100681 c->wasted_size += waste;
682 } else
683 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684
685 /* Stick any now-obsoleted blocks on the erase_pending_list */
686 jffs2_refile_wbuf_blocks(c);
687 jffs2_clear_wbuf_ino_list(c);
688 spin_unlock(&c->erase_completion_lock);
689
690 memset(c->wbuf,0xff,c->wbuf_pagesize);
691 /* adjust write buffer offset, else we get a non contiguous write bug */
Alexander Belyakov5bf17232008-10-17 19:19:13 +0400692 c->wbuf_ofs += c->wbuf_pagesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 c->wbuf_len = 0;
694 return 0;
695}
696
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000697/* Trigger garbage collection to flush the write-buffer.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000699 outstanding. If ino arg non-zero, do it only if a write for the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 given inode is outstanding. */
701int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
702{
703 uint32_t old_wbuf_ofs;
704 uint32_t old_wbuf_len;
705 int ret = 0;
706
Joe Perches9c261b32012-02-15 15:56:43 -0800707 jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
David Woodhouse8aee6ac2005-02-02 22:12:08 +0000709 if (!c->wbuf)
710 return 0;
711
David Woodhouseced22072008-04-22 15:13:40 +0100712 mutex_lock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
Joe Perches9c261b32012-02-15 15:56:43 -0800714 jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
David Woodhouseced22072008-04-22 15:13:40 +0100715 mutex_unlock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 return 0;
717 }
718
719 old_wbuf_ofs = c->wbuf_ofs;
720 old_wbuf_len = c->wbuf_len;
721
722 if (c->unchecked_size) {
723 /* GC won't make any progress for a while */
Joe Perches9c261b32012-02-15 15:56:43 -0800724 jffs2_dbg(1, "%s(): padding. Not finished checking\n",
725 __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 down_write(&c->wbuf_sem);
727 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000728 /* retry flushing wbuf in case jffs2_wbuf_recover
729 left some data in the wbuf */
730 if (ret)
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000731 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 up_write(&c->wbuf_sem);
733 } else while (old_wbuf_len &&
734 old_wbuf_ofs == c->wbuf_ofs) {
735
David Woodhouseced22072008-04-22 15:13:40 +0100736 mutex_unlock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
Joe Perches9c261b32012-02-15 15:56:43 -0800738 jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
740 ret = jffs2_garbage_collect_pass(c);
741 if (ret) {
742 /* GC failed. Flush it with padding instead */
David Woodhouseced22072008-04-22 15:13:40 +0100743 mutex_lock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 down_write(&c->wbuf_sem);
745 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000746 /* retry flushing wbuf in case jffs2_wbuf_recover
747 left some data in the wbuf */
748 if (ret)
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000749 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 up_write(&c->wbuf_sem);
751 break;
752 }
David Woodhouseced22072008-04-22 15:13:40 +0100753 mutex_lock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 }
755
Joe Perches9c261b32012-02-15 15:56:43 -0800756 jffs2_dbg(1, "%s(): ends...\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
David Woodhouseced22072008-04-22 15:13:40 +0100758 mutex_unlock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 return ret;
760}
761
762/* Pad write-buffer to end and write it, wasting space. */
763int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
764{
765 int ret;
766
David Woodhouse8aee6ac2005-02-02 22:12:08 +0000767 if (!c->wbuf)
768 return 0;
769
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 down_write(&c->wbuf_sem);
771 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Estelle Hammache7f716cf2005-01-24 21:24:18 +0000772 /* retry - maybe wbuf recover left some data in wbuf. */
773 if (ret)
774 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 up_write(&c->wbuf_sem);
776
777 return ret;
778}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200780static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
781 size_t len)
782{
783 if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
784 return 0;
785
786 if (len > (c->wbuf_pagesize - c->wbuf_len))
787 len = c->wbuf_pagesize - c->wbuf_len;
788 memcpy(c->wbuf + c->wbuf_len, buf, len);
789 c->wbuf_len += (uint32_t) len;
790 return len;
791}
792
793int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
794 unsigned long count, loff_t to, size_t *retlen,
795 uint32_t ino)
796{
797 struct jffs2_eraseblock *jeb;
798 size_t wbuf_retlen, donelen = 0;
799 uint32_t outvec_to = to;
800 int ret, invec;
801
802 /* If not writebuffered flash, don't bother */
Andrew Victor3be36672005-02-09 09:09:05 +0000803 if (!jffs2_is_writebuffered(c))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000805
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 down_write(&c->wbuf_sem);
807
808 /* If wbuf_ofs is not initialized, set it to target address */
809 if (c->wbuf_ofs == 0xFFFFFFFF) {
810 c->wbuf_ofs = PAGE_DIV(to);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000811 c->wbuf_len = PAGE_MOD(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 memset(c->wbuf,0xff,c->wbuf_pagesize);
813 }
814
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200815 /*
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200816 * Sanity checks on target address. It's permitted to write
817 * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
818 * write at the beginning of a new erase block. Anything else,
819 * and you die. New block starts at xxx000c (0-b = block
820 * header)
821 */
Andrew Victor3be36672005-02-09 09:09:05 +0000822 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 /* It's a write to a new block */
824 if (c->wbuf_len) {
Joe Perches9c261b32012-02-15 15:56:43 -0800825 jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
826 __func__, (unsigned long)to, c->wbuf_ofs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200828 if (ret)
829 goto outerr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 }
831 /* set pointer to new block */
832 c->wbuf_ofs = PAGE_DIV(to);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000833 c->wbuf_len = PAGE_MOD(to);
834 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
836 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
837 /* We're not writing immediately after the writebuffer. Bad. */
Joe Perchesda320f02012-02-15 15:56:44 -0800838 pr_crit("%s(): Non-contiguous write to %08lx\n",
839 __func__, (unsigned long)to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 if (c->wbuf_len)
Joe Perchesda320f02012-02-15 15:56:44 -0800841 pr_crit("wbuf was previously %08x-%08x\n",
842 c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 BUG();
844 }
845
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200846 /* adjust alignment offset */
847 if (c->wbuf_len != PAGE_MOD(to)) {
848 c->wbuf_len = PAGE_MOD(to);
849 /* take care of alignment to next page */
850 if (!c->wbuf_len) {
851 c->wbuf_len = c->wbuf_pagesize;
852 ret = __jffs2_flush_wbuf(c, NOPAD);
853 if (ret)
854 goto outerr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 }
856 }
857
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200858 for (invec = 0; invec < count; invec++) {
859 int vlen = invecs[invec].iov_len;
860 uint8_t *v = invecs[invec].iov_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200862 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200864 if (c->wbuf_len == c->wbuf_pagesize) {
865 ret = __jffs2_flush_wbuf(c, NOPAD);
866 if (ret)
867 goto outerr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 }
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200869 vlen -= wbuf_retlen;
870 outvec_to += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 donelen += wbuf_retlen;
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200872 v += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200874 if (vlen >= c->wbuf_pagesize) {
Artem Bityutskiyeda95cb2011-12-23 17:35:41 +0200875 ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen),
876 &wbuf_retlen, v);
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200877 if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
878 goto outfile;
879
880 vlen -= wbuf_retlen;
881 outvec_to += wbuf_retlen;
882 c->wbuf_ofs = outvec_to;
883 donelen += wbuf_retlen;
884 v += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 }
886
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200887 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
888 if (c->wbuf_len == c->wbuf_pagesize) {
889 ret = __jffs2_flush_wbuf(c, NOPAD);
890 if (ret)
891 goto outerr;
892 }
893
894 outvec_to += wbuf_retlen;
895 donelen += wbuf_retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 }
897
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200898 /*
899 * If there's a remainder in the wbuf and it's a non-GC write,
900 * remember that the wbuf affects this ino
901 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 *retlen = donelen;
903
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100904 if (jffs2_sum_active()) {
905 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
906 if (res)
907 return res;
908 }
909
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 if (c->wbuf_len && ino)
911 jffs2_wbuf_dirties_inode(c, ino);
912
913 ret = 0;
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200914 up_write(&c->wbuf_sem);
915 return ret;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000916
Thomas Gleixnerdcb09322006-05-23 11:49:14 +0200917outfile:
918 /*
919 * At this point we have no problem, c->wbuf is empty. However
920 * refile nextblock to avoid writing again to same address.
921 */
922
923 spin_lock(&c->erase_completion_lock);
924
925 jeb = &c->blocks[outvec_to / c->sector_size];
926 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
927
928 spin_unlock(&c->erase_completion_lock);
929
930outerr:
931 *retlen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 up_write(&c->wbuf_sem);
933 return ret;
934}
935
936/*
937 * This is the entry for flash write.
938 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
939*/
David Woodhouse9bfeb692006-05-26 21:19:05 +0100940int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
941 size_t *retlen, const u_char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942{
943 struct kvec vecs[1];
944
Andrew Victor3be36672005-02-09 09:09:05 +0000945 if (!jffs2_is_writebuffered(c))
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100946 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
948 vecs[0].iov_base = (unsigned char *) buf;
949 vecs[0].iov_len = len;
950 return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
951}
952
953/*
954 Handle readback from writebuffer and ECC failure return
955*/
956int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
957{
958 loff_t orbf = 0, owbf = 0, lwbf = 0;
959 int ret;
960
Andrew Victor3be36672005-02-09 09:09:05 +0000961 if (!jffs2_is_writebuffered(c))
Artem Bityutskiy329ad392011-12-23 17:30:16 +0200962 return mtd_read(c->mtd, ofs, len, retlen, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963
Andrew Victor3be36672005-02-09 09:09:05 +0000964 /* Read flash */
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100965 down_read(&c->wbuf_sem);
Artem Bityutskiy329ad392011-12-23 17:30:16 +0200966 ret = mtd_read(c->mtd, ofs, len, retlen, buf);
Andrew Victor3be36672005-02-09 09:09:05 +0000967
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200968 if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
969 if (ret == -EBADMSG)
Joe Perchesda320f02012-02-15 15:56:44 -0800970 pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
971 len, ofs);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000972 /*
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200973 * We have the raw data without ECC correction in the buffer,
974 * maybe we are lucky and all data or parts are correct. We
975 * check the node. If data are corrupted node check will sort
976 * it out. We keep this block, it will fail on write or erase
977 * and the we mark it bad. Or should we do that now? But we
978 * should give him a chance. Maybe we had a system crash or
979 * power loss before the ecc write or a erase was completed.
Andrew Victor3be36672005-02-09 09:09:05 +0000980 * So we return success. :)
981 */
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +0200982 ret = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000983 }
Andrew Victor3be36672005-02-09 09:09:05 +0000984
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 /* if no writebuffer available or write buffer empty, return */
986 if (!c->wbuf_pagesize || !c->wbuf_len)
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100987 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
989 /* if we read in a different block, return */
Andrew Victor3be36672005-02-09 09:09:05 +0000990 if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
Artem B. Bityuckiy894214d2005-04-05 13:51:58 +0100991 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992
993 if (ofs >= c->wbuf_ofs) {
994 owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
995 if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
996 goto exit;
997 lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000998 if (lwbf > len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 lwbf = len;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001000 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
1002 if (orbf > len) /* is write beyond write buffer ? */
1003 goto exit;
Thomas Gleixner9a1fcdf2006-05-29 14:56:39 +02001004 lwbf = len - orbf; /* number of bytes to copy */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001005 if (lwbf > c->wbuf_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 lwbf = c->wbuf_len;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001007 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 if (lwbf > 0)
1009 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
1010
1011exit:
1012 up_read(&c->wbuf_sem);
1013 return ret;
1014}
1015
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001016#define NR_OOB_SCAN_PAGES 4
1017
David Woodhouse09b3fba2007-08-09 17:28:20 +08001018/* For historical reasons we use only 8 bytes for OOB clean marker */
1019#define OOB_CM_SIZE 8
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001020
1021static const struct jffs2_unknown_node oob_cleanmarker =
1022{
David Woodhouse566865a2007-04-23 12:07:17 +01001023 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
1024 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
1025 .totlen = constant_cpu_to_je32(8)
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001026};
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001027
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028/*
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001029 * Check, if the out of band area is empty. This function knows about the clean
1030 * marker and if it is present in OOB, treats the OOB as empty anyway.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 */
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001032int jffs2_check_oob_empty(struct jffs2_sb_info *c,
1033 struct jffs2_eraseblock *jeb, int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034{
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001035 int i, ret;
1036 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001037 struct mtd_oob_ops ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038
Brian Norris0612b9d2011-08-30 18:45:40 -07001039 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001040 ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001041 ops.oobbuf = c->oobbuf;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001042 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001043 ops.datbuf = NULL;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001044
Artem Bityutskiyfd2819b2011-12-23 18:27:05 +02001045 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001046 if (ret || ops.oobretlen != ops.ooblen) {
Joe Perchesda320f02012-02-15 15:56:44 -08001047 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1048 jeb->offset, ops.ooblen, ops.oobretlen, ret);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001049 if (!ret)
1050 ret = -EIO;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001051 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001053
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001054 for(i = 0; i < ops.ooblen; i++) {
1055 if (mode && i < cmlen)
1056 /* Yeah, we know about the cleanmarker */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 continue;
1058
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001059 if (ops.oobbuf[i] != 0xFF) {
Joe Perches9c261b32012-02-15 15:56:43 -08001060 jffs2_dbg(2, "Found %02x at %x in OOB for "
1061 "%08x\n", ops.oobbuf[i], i, jeb->offset);
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001062 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 }
1064 }
1065
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001066 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067}
1068
1069/*
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001070 * Check for a valid cleanmarker.
1071 * Returns: 0 if a valid cleanmarker was found
David Woodhouseef53cb02007-07-10 10:01:22 +01001072 * 1 if no cleanmarker was found
1073 * negative error code if an error occurred
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001074 */
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001075int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
1076 struct jffs2_eraseblock *jeb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077{
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001078 struct mtd_oob_ops ops;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001079 int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080
Brian Norris0612b9d2011-08-30 18:45:40 -07001081 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001082 ops.ooblen = cmlen;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001083 ops.oobbuf = c->oobbuf;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001084 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001085 ops.datbuf = NULL;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001086
Artem Bityutskiyfd2819b2011-12-23 18:27:05 +02001087 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001088 if (ret || ops.oobretlen != ops.ooblen) {
Joe Perchesda320f02012-02-15 15:56:44 -08001089 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1090 jeb->offset, ops.ooblen, ops.oobretlen, ret);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001091 if (!ret)
1092 ret = -EIO;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001093 return ret;
1094 }
1095
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001096 return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097}
1098
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001099int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1100 struct jffs2_eraseblock *jeb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101{
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001102 int ret;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001103 struct mtd_oob_ops ops;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001104 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105
Brian Norris0612b9d2011-08-30 18:45:40 -07001106 ops.mode = MTD_OPS_AUTO_OOB;
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001107 ops.ooblen = cmlen;
1108 ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1109 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001110 ops.datbuf = NULL;
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001111
Artem Bityutskiya2cc5ba2011-12-23 18:29:55 +02001112 ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001113 if (ret || ops.oobretlen != ops.ooblen) {
Joe Perchesda320f02012-02-15 15:56:44 -08001114 pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1115 jeb->offset, ops.ooblen, ops.oobretlen, ret);
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001116 if (!ret)
1117 ret = -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 return ret;
1119 }
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001120
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 return 0;
1122}
1123
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001124/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 * On NAND we try to mark this block bad. If the block was erased more
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001126 * than MAX_ERASE_FAILURES we mark it finally bad.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 * Don't care about failures. This block remains on the erase-pending
1128 * or badblock list as long as nobody manipulates the flash with
1129 * a bootloader or something like that.
1130 */
1131
1132int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1133{
1134 int ret;
1135
1136 /* if the count is < max, we try to write the counter to the 2nd page oob area */
1137 if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1138 return 0;
1139
Joe Perches5a528952012-02-15 15:56:45 -08001140 pr_warn("marking eraseblock at %08x as bad\n", bad_offset);
Artem Bityutskiy5942ddb2011-12-23 19:37:38 +02001141 ret = mtd_block_markbad(c->mtd, bad_offset);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001142
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 if (ret) {
Joe Perches9c261b32012-02-15 15:56:43 -08001144 jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
1145 __func__, jeb->offset, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 return ret;
1147 }
1148 return 1;
1149}
1150
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001151int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152{
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001153 struct nand_ecclayout *oinfo = c->mtd->ecclayout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 if (!c->mtd->oobsize)
1156 return 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001157
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 /* Cleanmarker is out-of-band, so inline size zero */
1159 c->cleanmarker_size = 0;
1160
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001161 if (!oinfo || oinfo->oobavail == 0) {
Joe Perchesda320f02012-02-15 15:56:44 -08001162 pr_err("inconsistent device description\n");
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001163 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 }
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001165
Joe Perches5a528952012-02-15 15:56:45 -08001166 jffs2_dbg(1, "using OOB on NAND\n");
Thomas Gleixner5bd34c02006-05-27 22:16:10 +02001167
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001168 c->oobavail = oinfo->oobavail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
1170 /* Initialise write buffer */
1171 init_rwsem(&c->wbuf_sem);
Joern Engel28318772006-05-22 23:18:05 +02001172 c->wbuf_pagesize = c->mtd->writesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 c->wbuf_ofs = 0xFFFFFFFF;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001174
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1176 if (!c->wbuf)
1177 return -ENOMEM;
1178
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001179 c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL);
1180 if (!c->oobbuf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 kfree(c->wbuf);
1182 return -ENOMEM;
1183 }
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001184
David Woodhousea6bc4322007-07-11 14:23:54 +01001185#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1186 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1187 if (!c->wbuf_verify) {
1188 kfree(c->oobbuf);
1189 kfree(c->wbuf);
1190 return -ENOMEM;
1191 }
1192#endif
Artem Bityutskiya7a6ace12007-01-31 11:38:53 +02001193 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194}
1195
1196void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1197{
David Woodhousea6bc4322007-07-11 14:23:54 +01001198#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1199 kfree(c->wbuf_verify);
1200#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 kfree(c->wbuf);
Thomas Gleixner8593fbc2006-05-29 03:26:58 +02001202 kfree(c->oobbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203}
1204
Andrew Victor8f15fd52005-02-09 09:17:45 +00001205int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1206 c->cleanmarker_size = 0; /* No cleanmarkers needed */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001207
Andrew Victor8f15fd52005-02-09 09:17:45 +00001208 /* Initialize write buffer */
1209 init_rwsem(&c->wbuf_sem);
Andrew Victor8f15fd52005-02-09 09:17:45 +00001210
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001211
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001212 c->wbuf_pagesize = c->mtd->erasesize;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001213
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001214 /* Find a suitable c->sector_size
1215 * - Not too much sectors
1216 * - Sectors have to be at least 4 K + some bytes
1217 * - All known dataflashes have erase sizes of 528 or 1056
1218 * - we take at least 8 eraseblocks and want to have at least 8K size
1219 * - The concatenation should be a power of 2
1220 */
Andrew Victor8f15fd52005-02-09 09:17:45 +00001221
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001222 c->sector_size = 8 * c->mtd->erasesize;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001223
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001224 while (c->sector_size < 8192) {
1225 c->sector_size *= 2;
1226 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001227
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001228 /* It may be necessary to adjust the flash size */
1229 c->flash_size = c->mtd->size;
1230
1231 if ((c->flash_size % c->sector_size) != 0) {
1232 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
Joe Perches5a528952012-02-15 15:56:45 -08001233 pr_warn("flash size adjusted to %dKiB\n", c->flash_size);
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001234 };
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001235
Artem B. Bityutskiydaba5cc2005-09-30 14:59:17 +01001236 c->wbuf_ofs = 0xFFFFFFFF;
Andrew Victor8f15fd52005-02-09 09:17:45 +00001237 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1238 if (!c->wbuf)
1239 return -ENOMEM;
1240
michaelcca15842008-04-18 13:44:17 -07001241#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1242 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1243 if (!c->wbuf_verify) {
1244 kfree(c->oobbuf);
1245 kfree(c->wbuf);
1246 return -ENOMEM;
1247 }
1248#endif
1249
Joe Perches5a528952012-02-15 15:56:45 -08001250 pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
Joe Perchesda320f02012-02-15 15:56:44 -08001251 c->wbuf_pagesize, c->sector_size);
Andrew Victor8f15fd52005-02-09 09:17:45 +00001252
1253 return 0;
1254}
1255
1256void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
michaelcca15842008-04-18 13:44:17 -07001257#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1258 kfree(c->wbuf_verify);
1259#endif
Andrew Victor8f15fd52005-02-09 09:17:45 +00001260 kfree(c->wbuf);
1261}
Andrew Victor8f15fd52005-02-09 09:17:45 +00001262
Nicolas Pitre59da7212005-08-06 05:51:33 +01001263int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
Joern Engelc8b229d2006-05-22 23:18:12 +02001264 /* Cleanmarker currently occupies whole programming regions,
1265 * either one or 2 for 8Byte STMicro flashes. */
1266 c->cleanmarker_size = max(16u, c->mtd->writesize);
Nicolas Pitre59da7212005-08-06 05:51:33 +01001267
1268 /* Initialize write buffer */
1269 init_rwsem(&c->wbuf_sem);
Joern Engel28318772006-05-22 23:18:05 +02001270 c->wbuf_pagesize = c->mtd->writesize;
Nicolas Pitre59da7212005-08-06 05:51:33 +01001271 c->wbuf_ofs = 0xFFFFFFFF;
1272
1273 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1274 if (!c->wbuf)
1275 return -ENOMEM;
1276
Massimo Cirillobc8cec02009-08-27 10:44:09 +02001277#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1278 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1279 if (!c->wbuf_verify) {
1280 kfree(c->wbuf);
1281 return -ENOMEM;
1282 }
1283#endif
Nicolas Pitre59da7212005-08-06 05:51:33 +01001284 return 0;
1285}
1286
1287void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
Massimo Cirillobc8cec02009-08-27 10:44:09 +02001288#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1289 kfree(c->wbuf_verify);
1290#endif
Nicolas Pitre59da7212005-08-06 05:51:33 +01001291 kfree(c->wbuf);
1292}
Artem Bityutskiy0029da32006-10-04 19:15:21 +03001293
1294int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
1295 c->cleanmarker_size = 0;
1296
1297 if (c->mtd->writesize == 1)
1298 /* We do not need write-buffer */
1299 return 0;
1300
1301 init_rwsem(&c->wbuf_sem);
1302
1303 c->wbuf_pagesize = c->mtd->writesize;
1304 c->wbuf_ofs = 0xFFFFFFFF;
1305 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1306 if (!c->wbuf)
1307 return -ENOMEM;
1308
Joe Perches5a528952012-02-15 15:56:45 -08001309 pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
Joe Perchesda320f02012-02-15 15:56:44 -08001310 c->wbuf_pagesize, c->sector_size);
Artem Bityutskiy0029da32006-10-04 19:15:21 +03001311
1312 return 0;
1313}
1314
1315void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) {
1316 kfree(c->wbuf);
1317}