blob: ae81b01e6fd7ebe8291cfcba8b419b245a6a2310 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
David Woodhousec00c3102007-04-25 14:16:47 +01004 * Copyright © 2001-2007 Red Hat, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11
Joe Perches5a528952012-02-15 15:56:45 -080012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/kernel.h>
Andrew Lunn737b7662005-07-30 16:29:30 +010015#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/slab.h>
17#include <linux/fs.h>
18#include <linux/crc32.h>
19#include <linux/pagemap.h>
20#include <linux/mtd/mtd.h>
21#include <linux/compiler.h>
22#include "nodelist.h"
23
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010024/*
David Woodhousedf8e96f2007-04-25 03:23:42 +010025 * Check the data CRC of the node.
26 *
27 * Returns: 0 if the data CRC is correct;
28 * 1 - if incorrect;
Lucas De Marchi25985ed2011-03-30 22:57:33 -030029 * error code if an error occurred.
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010030 */
David Woodhousedf8e96f2007-04-25 03:23:42 +010031static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032{
David Woodhousedf8e96f2007-04-25 03:23:42 +010033 struct jffs2_raw_node_ref *ref = tn->fn->raw;
34 int err = 0, pointed = 0;
35 struct jffs2_eraseblock *jeb;
36 unsigned char *buffer;
37 uint32_t crc, ofs, len;
38 size_t retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
David Woodhousedf8e96f2007-04-25 03:23:42 +010040 BUG_ON(tn->csize == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
David Woodhousedf8e96f2007-04-25 03:23:42 +010042 /* Calculate how many bytes were already checked */
43 ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode);
David Woodhouse92525722007-11-21 12:08:16 -050044 len = tn->csize;
David Woodhousedf8e96f2007-04-25 03:23:42 +010045
David Woodhouse92525722007-11-21 12:08:16 -050046 if (jffs2_is_writebuffered(c)) {
47 int adj = ofs % c->wbuf_pagesize;
48 if (likely(adj))
49 adj = c->wbuf_pagesize - adj;
50
51 if (adj >= tn->csize) {
52 dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
53 ref_offset(ref), tn->csize, ofs);
54 goto adj_acc;
55 }
56
57 ofs += adj;
58 len -= adj;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010059 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
David Woodhousedf8e96f2007-04-25 03:23:42 +010061 dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n",
62 ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len);
63
64#ifndef __ECOS
65 /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(),
66 * adding and jffs2_flash_read_end() interface. */
Artem Bityutskiy10934472011-12-28 15:55:42 +020067 err = mtd_point(c->mtd, ofs, len, &retlen, (void **)&buffer, NULL);
68 if (!err && retlen < len) {
69 JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
70 mtd_unpoint(c->mtd, ofs, retlen);
71 } else if (err) {
72 if (err != -EOPNOTSUPP)
David Woodhousedf8e96f2007-04-25 03:23:42 +010073 JFFS2_WARNING("MTD point failed: error code %d.\n", err);
Artem Bityutskiy10934472011-12-28 15:55:42 +020074 } else
75 pointed = 1; /* succefully pointed to device */
David Woodhousedf8e96f2007-04-25 03:23:42 +010076#endif
77
78 if (!pointed) {
79 buffer = kmalloc(len, GFP_KERNEL);
80 if (unlikely(!buffer))
81 return -ENOMEM;
82
83 /* TODO: this is very frequent pattern, make it a separate
84 * routine */
85 err = jffs2_flash_read(c, ofs, len, &retlen, buffer);
86 if (err) {
87 JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err);
88 goto free_out;
89 }
90
91 if (retlen != len) {
92 JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len);
93 err = -EIO;
94 goto free_out;
95 }
96 }
97
98 /* Continue calculating CRC */
99 crc = crc32(tn->partial_crc, buffer, len);
100 if(!pointed)
101 kfree(buffer);
102#ifndef __ECOS
103 else
Artem Bityutskiy72197782011-12-23 17:05:52 +0200104 mtd_unpoint(c->mtd, ofs, len);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100105#endif
106
107 if (crc != tn->data_crc) {
108 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
David Woodhouseb2e25232007-07-05 01:57:26 -0400109 ref_offset(ref), tn->data_crc, crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100110 return 1;
111 }
112
113adj_acc:
114 jeb = &c->blocks[ref->flash_offset / c->sector_size];
115 len = ref_totlen(c, jeb, ref);
116 /* If it should be REF_NORMAL, it'll get marked as such when
117 we build the fragtree, shortly. No need to worry about GC
118 moving it while it's marked REF_PRISTINE -- GC won't happen
119 till we've finished checking every inode anyway. */
120 ref->flash_offset |= REF_PRISTINE;
121 /*
122 * Mark the node as having been checked and fix the
123 * accounting accordingly.
124 */
125 spin_lock(&c->erase_completion_lock);
126 jeb->used_size += len;
127 jeb->unchecked_size -= len;
128 c->used_size += len;
129 c->unchecked_size -= len;
130 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
131 spin_unlock(&c->erase_completion_lock);
132
133 return 0;
134
135free_out:
136 if(!pointed)
137 kfree(buffer);
138#ifndef __ECOS
139 else
Artem Bityutskiy72197782011-12-23 17:05:52 +0200140 mtd_unpoint(c->mtd, ofs, len);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100141#endif
142 return err;
143}
144
145/*
146 * Helper function for jffs2_add_older_frag_to_fragtree().
147 *
148 * Checks the node if we are in the checking stage.
149 */
150static int check_tn_node(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
151{
152 int ret;
153
154 BUG_ON(ref_obsolete(tn->fn->raw));
155
156 /* We only check the data CRC of unchecked nodes */
157 if (ref_flags(tn->fn->raw) != REF_UNCHECKED)
158 return 0;
159
160 dbg_readinode("check node %#04x-%#04x, phys offs %#08x\n",
161 tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw));
162
163 ret = check_node_data(c, tn);
164 if (unlikely(ret < 0)) {
165 JFFS2_ERROR("check_node_data() returned error: %d.\n",
166 ret);
167 } else if (unlikely(ret > 0)) {
168 dbg_readinode("CRC error, mark it obsolete.\n");
169 jffs2_mark_node_obsolete(c, tn->fn->raw);
170 }
171
172 return ret;
173}
174
175static struct jffs2_tmp_dnode_info *jffs2_lookup_tn(struct rb_root *tn_root, uint32_t offset)
176{
177 struct rb_node *next;
178 struct jffs2_tmp_dnode_info *tn = NULL;
179
180 dbg_readinode("root %p, offset %d\n", tn_root, offset);
181
182 next = tn_root->rb_node;
183
184 while (next) {
185 tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb);
186
187 if (tn->fn->ofs < offset)
188 next = tn->rb.rb_right;
189 else if (tn->fn->ofs >= offset)
190 next = tn->rb.rb_left;
191 else
192 break;
193 }
194
195 return tn;
196}
197
198
199static void jffs2_kill_tn(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
200{
201 jffs2_mark_node_obsolete(c, tn->fn->raw);
202 jffs2_free_full_dnode(tn->fn);
203 jffs2_free_tmp_dnode_info(tn);
204}
205/*
206 * This function is used when we read an inode. Data nodes arrive in
207 * arbitrary order -- they may be older or newer than the nodes which
208 * are already in the tree. Where overlaps occur, the older node can
209 * be discarded as long as the newer passes the CRC check. We don't
210 * bother to keep track of holes in this rbtree, and neither do we deal
211 * with frags -- we can have multiple entries starting at the same
212 * offset, and the one with the smallest length will come first in the
213 * ordering.
214 *
David Woodhouse14c63812007-07-03 16:51:19 -0400215 * Returns 0 if the node was handled (including marking it obsolete)
David Woodhouseef53cb02007-07-10 10:01:22 +0100216 * < 0 an if error occurred
David Woodhousedf8e96f2007-04-25 03:23:42 +0100217 */
218static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
219 struct jffs2_readinode_info *rii,
220 struct jffs2_tmp_dnode_info *tn)
221{
222 uint32_t fn_end = tn->fn->ofs + tn->fn->size;
Thomas Gleixner4c41bd02009-02-16 21:29:31 +0100223 struct jffs2_tmp_dnode_info *this, *ptn;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100224
David Woodhousefcf3caf2007-05-07 13:16:13 +0100225 dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100226
227 /* If a node has zero dsize, we only have to keep if it if it might be the
228 node with highest version -- i.e. the one which will end up as f->metadata.
229 Note that such nodes won't be REF_UNCHECKED since there are no data to
230 check anyway. */
231 if (!tn->fn->size) {
232 if (rii->mdata_tn) {
David Woodhouse0477d242007-06-01 20:04:43 +0100233 if (rii->mdata_tn->version < tn->version) {
234 /* We had a candidate mdata node already */
235 dbg_readinode("kill old mdata with ver %d\n", rii->mdata_tn->version);
236 jffs2_kill_tn(c, rii->mdata_tn);
237 } else {
238 dbg_readinode("kill new mdata with ver %d (older than existing %d\n",
239 tn->version, rii->mdata_tn->version);
240 jffs2_kill_tn(c, tn);
241 return 0;
242 }
David Woodhousedf8e96f2007-04-25 03:23:42 +0100243 }
244 rii->mdata_tn = tn;
245 dbg_readinode("keep new mdata with ver %d\n", tn->version);
246 return 0;
247 }
248
249 /* Find the earliest node which _may_ be relevant to this one */
250 this = jffs2_lookup_tn(&rii->tn_root, tn->fn->ofs);
David Woodhouse1c979642007-05-08 00:19:54 +0100251 if (this) {
252 /* If the node is coincident with another at a lower address,
253 back up until the other node is found. It may be relevant */
Thomas Gleixner4c41bd02009-02-16 21:29:31 +0100254 while (this->overlapped) {
255 ptn = tn_prev(this);
256 if (!ptn) {
257 /*
258 * We killed a node which set the overlapped
259 * flags during the scan. Fix it up.
260 */
261 this->overlapped = 0;
262 break;
263 }
264 this = ptn;
265 }
David Woodhouse1c979642007-05-08 00:19:54 +0100266 dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole");
David Woodhousedf8e96f2007-04-25 03:23:42 +0100267 }
268
David Woodhousedf8e96f2007-04-25 03:23:42 +0100269 while (this) {
270 if (this->fn->ofs > fn_end)
271 break;
272 dbg_readinode("Ponder this ver %d, 0x%x-0x%x\n",
273 this->version, this->fn->ofs, this->fn->size);
274
275 if (this->version == tn->version) {
276 /* Version number collision means REF_PRISTINE GC. Accept either of them
277 as long as the CRC is correct. Check the one we have already... */
278 if (!check_tn_node(c, this)) {
279 /* The one we already had was OK. Keep it and throw away the new one */
280 dbg_readinode("Like old node. Throw away new\n");
281 jffs2_kill_tn(c, tn);
282 return 0;
283 } else {
284 /* Who cares if the new one is good; keep it for now anyway. */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100285 dbg_readinode("Like new node. Throw away old\n");
David Woodhousefcf3caf2007-05-07 13:16:13 +0100286 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root);
287 jffs2_kill_tn(c, this);
288 /* Same overlapping from in front and behind */
289 return 0;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100290 }
291 }
292 if (this->version < tn->version &&
293 this->fn->ofs >= tn->fn->ofs &&
294 this->fn->ofs + this->fn->size <= fn_end) {
295 /* New node entirely overlaps 'this' */
296 if (check_tn_node(c, tn)) {
297 dbg_readinode("new node bad CRC\n");
298 jffs2_kill_tn(c, tn);
299 return 0;
300 }
David Woodhousefcf3caf2007-05-07 13:16:13 +0100301 /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */
David Woodhouse1c979642007-05-08 00:19:54 +0100302 while (this && this->fn->ofs + this->fn->size <= fn_end) {
David Woodhousedf8e96f2007-04-25 03:23:42 +0100303 struct jffs2_tmp_dnode_info *next = tn_next(this);
304 if (this->version < tn->version) {
305 tn_erase(this, &rii->tn_root);
306 dbg_readinode("Kill overlapped ver %d, 0x%x-0x%x\n",
307 this->version, this->fn->ofs,
308 this->fn->ofs+this->fn->size);
309 jffs2_kill_tn(c, this);
310 }
311 this = next;
312 }
David Woodhousefcf3caf2007-05-07 13:16:13 +0100313 dbg_readinode("Done killing overlapped nodes\n");
David Woodhouse1c979642007-05-08 00:19:54 +0100314 continue;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100315 }
316 if (this->version > tn->version &&
317 this->fn->ofs <= tn->fn->ofs &&
318 this->fn->ofs+this->fn->size >= fn_end) {
319 /* New node entirely overlapped by 'this' */
320 if (!check_tn_node(c, this)) {
321 dbg_readinode("Good CRC on old node. Kill new\n");
322 jffs2_kill_tn(c, tn);
323 return 0;
324 }
325 /* ... but 'this' was bad. Replace it... */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100326 dbg_readinode("Bad CRC on old overlapping node. Kill it\n");
David Woodhousefcf3caf2007-05-07 13:16:13 +0100327 tn_erase(this, &rii->tn_root);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100328 jffs2_kill_tn(c, this);
David Woodhousefcf3caf2007-05-07 13:16:13 +0100329 break;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100330 }
David Woodhousedf8e96f2007-04-25 03:23:42 +0100331
332 this = tn_next(this);
333 }
David Woodhouse96dd8d22007-05-06 14:41:40 +0100334
David Woodhousedf8e96f2007-04-25 03:23:42 +0100335 /* We neither completely obsoleted nor were completely
David Woodhouse96dd8d22007-05-06 14:41:40 +0100336 obsoleted by an earlier node. Insert into the tree */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100337 {
David Woodhouse96dd8d22007-05-06 14:41:40 +0100338 struct rb_node *parent;
339 struct rb_node **link = &rii->tn_root.rb_node;
David Woodhouse1c979642007-05-08 00:19:54 +0100340 struct jffs2_tmp_dnode_info *insert_point = NULL;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100341
342 while (*link) {
343 parent = *link;
344 insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
345 if (tn->fn->ofs > insert_point->fn->ofs)
346 link = &insert_point->rb.rb_right;
347 else if (tn->fn->ofs < insert_point->fn->ofs ||
348 tn->fn->size < insert_point->fn->size)
349 link = &insert_point->rb.rb_left;
350 else
351 link = &insert_point->rb.rb_right;
352 }
353 rb_link_node(&tn->rb, &insert_point->rb, link);
354 rb_insert_color(&tn->rb, &rii->tn_root);
355 }
David Woodhouse1123e2a2007-05-05 16:29:34 +0100356
David Woodhousedf8e96f2007-04-25 03:23:42 +0100357 /* If there's anything behind that overlaps us, note it */
358 this = tn_prev(tn);
359 if (this) {
360 while (1) {
361 if (this->fn->ofs + this->fn->size > tn->fn->ofs) {
362 dbg_readinode("Node is overlapped by %p (v %d, 0x%x-0x%x)\n",
363 this, this->version, this->fn->ofs,
364 this->fn->ofs+this->fn->size);
365 tn->overlapped = 1;
366 break;
367 }
368 if (!this->overlapped)
369 break;
Thomas Gleixner4c41bd02009-02-16 21:29:31 +0100370
371 ptn = tn_prev(this);
372 if (!ptn) {
373 /*
374 * We killed a node which set the overlapped
375 * flags during the scan. Fix it up.
376 */
377 this->overlapped = 0;
378 break;
379 }
380 this = ptn;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100381 }
382 }
383
384 /* If the new node overlaps anything ahead, note it */
385 this = tn_next(tn);
386 while (this && this->fn->ofs < fn_end) {
387 this->overlapped = 1;
388 dbg_readinode("Node ver %d, 0x%x-0x%x is overlapped\n",
389 this->version, this->fn->ofs,
390 this->fn->ofs+this->fn->size);
391 this = tn_next(this);
392 }
393 return 0;
394}
395
396/* Trivial function to remove the last node in the tree. Which by definition
Michel Lespinassebf7ad8e2012-10-08 16:30:37 -0700397 has no right-hand child — so can be removed just by making its left-hand
398 child (if any) take its place under its parent. Since this is only done
399 when we're consuming the whole tree, there's no need to use rb_erase()
400 and let it worry about adjusting colours and balancing the tree. That
401 would just be a waste of time. */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100402static void eat_last(struct rb_root *root, struct rb_node *node)
403{
404 struct rb_node *parent = rb_parent(node);
405 struct rb_node **link;
406
407 /* LAST! */
408 BUG_ON(node->rb_right);
409
410 if (!parent)
411 link = &root->rb_node;
412 else if (node == parent->rb_left)
413 link = &parent->rb_left;
414 else
415 link = &parent->rb_right;
416
417 *link = node->rb_left;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100418 if (node->rb_left)
Michel Lespinassebf7ad8e2012-10-08 16:30:37 -0700419 node->rb_left->__rb_parent_color = node->__rb_parent_color;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100420}
421
Michel Lespinassebf7ad8e2012-10-08 16:30:37 -0700422/* We put the version tree in reverse order, so we can use the same eat_last()
423 function that we use to consume the tmpnode tree (tn_root). */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100424static void ver_insert(struct rb_root *ver_root, struct jffs2_tmp_dnode_info *tn)
425{
426 struct rb_node **link = &ver_root->rb_node;
427 struct rb_node *parent = NULL;
428 struct jffs2_tmp_dnode_info *this_tn;
429
430 while (*link) {
431 parent = *link;
432 this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
433
434 if (tn->version > this_tn->version)
435 link = &parent->rb_left;
436 else
437 link = &parent->rb_right;
438 }
439 dbg_readinode("Link new node at %p (root is %p)\n", link, ver_root);
440 rb_link_node(&tn->rb, parent, link);
441 rb_insert_color(&tn->rb, ver_root);
442}
443
444/* Build final, normal fragtree from tn tree. It doesn't matter which order
445 we add nodes to the real fragtree, as long as they don't overlap. And
446 having thrown away the majority of overlapped nodes as we went, there
447 really shouldn't be many sets of nodes which do overlap. If we start at
448 the end, we can use the overlap markers -- we can just eat nodes which
449 aren't overlapped, and when we encounter nodes which _do_ overlap we
450 sort them all into a temporary tree in version order before replaying them. */
451static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c,
452 struct jffs2_inode_info *f,
453 struct jffs2_readinode_info *rii)
454{
455 struct jffs2_tmp_dnode_info *pen, *last, *this;
456 struct rb_root ver_root = RB_ROOT;
457 uint32_t high_ver = 0;
458
459 if (rii->mdata_tn) {
460 dbg_readinode("potential mdata is ver %d at %p\n", rii->mdata_tn->version, rii->mdata_tn);
461 high_ver = rii->mdata_tn->version;
462 rii->latest_ref = rii->mdata_tn->fn->raw;
463 }
464#ifdef JFFS2_DBG_READINODE_MESSAGES
465 this = tn_last(&rii->tn_root);
466 while (this) {
467 dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d\n", this, this->version, this->fn->ofs,
David Woodhouse96dd8d22007-05-06 14:41:40 +0100468 this->fn->ofs+this->fn->size, this->overlapped);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100469 this = tn_prev(this);
470 }
471#endif
472 pen = tn_last(&rii->tn_root);
473 while ((last = pen)) {
474 pen = tn_prev(last);
475
476 eat_last(&rii->tn_root, &last->rb);
477 ver_insert(&ver_root, last);
478
Thomas Gleixner4c41bd02009-02-16 21:29:31 +0100479 if (unlikely(last->overlapped)) {
480 if (pen)
481 continue;
482 /*
483 * We killed a node which set the overlapped
484 * flags during the scan. Fix it up.
485 */
486 last->overlapped = 0;
487 }
David Woodhousedf8e96f2007-04-25 03:23:42 +0100488
489 /* Now we have a bunch of nodes in reverse version
490 order, in the tree at ver_root. Most of the time,
491 there'll actually be only one node in the 'tree',
492 in fact. */
493 this = tn_last(&ver_root);
494
495 while (this) {
496 struct jffs2_tmp_dnode_info *vers_next;
497 int ret;
498 vers_next = tn_prev(this);
499 eat_last(&ver_root, &this->rb);
500 if (check_tn_node(c, this)) {
David Woodhouse1123e2a2007-05-05 16:29:34 +0100501 dbg_readinode("node ver %d, 0x%x-0x%x failed CRC\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +0100502 this->version, this->fn->ofs,
503 this->fn->ofs+this->fn->size);
504 jffs2_kill_tn(c, this);
505 } else {
506 if (this->version > high_ver) {
507 /* Note that this is different from the other
508 highest_version, because this one is only
509 counting _valid_ nodes which could give the
510 latest inode metadata */
511 high_ver = this->version;
512 rii->latest_ref = this->fn->raw;
513 }
David Woodhouse1123e2a2007-05-05 16:29:34 +0100514 dbg_readinode("Add %p (v %d, 0x%x-0x%x, ov %d) to fragtree\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +0100515 this, this->version, this->fn->ofs,
516 this->fn->ofs+this->fn->size, this->overlapped);
517
518 ret = jffs2_add_full_dnode_to_inode(c, f, this->fn);
519 if (ret) {
520 /* Free the nodes in vers_root; let the caller
521 deal with the rest */
522 JFFS2_ERROR("Add node to tree failed %d\n", ret);
523 while (1) {
524 vers_next = tn_prev(this);
525 if (check_tn_node(c, this))
526 jffs2_mark_node_obsolete(c, this->fn->raw);
527 jffs2_free_full_dnode(this->fn);
528 jffs2_free_tmp_dnode_info(this);
529 this = vers_next;
530 if (!this)
531 break;
532 eat_last(&ver_root, &vers_next->rb);
533 }
534 return ret;
535 }
536 jffs2_free_tmp_dnode_info(this);
537 }
538 this = vers_next;
539 }
540 }
541 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100542}
543
544static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
545{
546 struct rb_node *this;
547 struct jffs2_tmp_dnode_info *tn;
548
549 this = list->rb_node;
550
551 /* Now at bottom of tree */
552 while (this) {
553 if (this->rb_left)
554 this = this->rb_left;
555 else if (this->rb_right)
556 this = this->rb_right;
557 else {
558 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
559 jffs2_free_full_dnode(tn->fn);
560 jffs2_free_tmp_dnode_info(tn);
561
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100562 this = rb_parent(this);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100563 if (!this)
564 break;
565
566 if (this->rb_left == &tn->rb)
567 this->rb_left = NULL;
568 else if (this->rb_right == &tn->rb)
569 this->rb_right = NULL;
570 else BUG();
571 }
572 }
Venkatesh Pallipadibcc54e2a2010-03-15 00:34:59 -0400573 *list = RB_ROOT;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100574}
575
576static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
577{
578 struct jffs2_full_dirent *next;
579
580 while (fd) {
581 next = fd->next;
582 jffs2_free_full_dirent(fd);
583 fd = next;
584 }
585}
586
587/* Returns first valid node after 'ref'. May return 'ref' */
588static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
589{
590 while (ref && ref->next_in_ino) {
591 if (!ref_obsolete(ref))
592 return ref;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100593 dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100594 ref = ref->next_in_ino;
595 }
596 return NULL;
597}
598
599/*
600 * Helper function for jffs2_get_inode_nodes().
601 * It is called every time an directory entry node is found.
602 *
David Woodhouse14c63812007-07-03 16:51:19 -0400603 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100604 * negative error code on failure.
605 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100606static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100607 struct jffs2_raw_dirent *rd, size_t read,
608 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100609{
610 struct jffs2_full_dirent *fd;
David Woodhouse1046d882006-06-18 22:44:21 +0100611 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000612
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100613 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
614 BUG_ON(ref_obsolete(ref));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000615
David Woodhouse1046d882006-06-18 22:44:21 +0100616 crc = crc32(0, rd, sizeof(*rd) - 8);
617 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
618 JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n",
619 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100620 jffs2_mark_node_obsolete(c, ref);
621 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100622 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000623
David Woodhouse1046d882006-06-18 22:44:21 +0100624 /* If we've never checked the CRCs on this node, check them now */
625 if (ref_flags(ref) == REF_UNCHECKED) {
626 struct jffs2_eraseblock *jeb;
627 int len;
628
629 /* Sanity check */
630 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
631 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
632 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100633 jffs2_mark_node_obsolete(c, ref);
634 return 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100635 }
636
637 jeb = &c->blocks[ref->flash_offset / c->sector_size];
638 len = ref_totlen(c, jeb, ref);
639
640 spin_lock(&c->erase_completion_lock);
641 jeb->used_size += len;
642 jeb->unchecked_size -= len;
643 c->used_size += len;
644 c->unchecked_size -= len;
David Woodhouse43dfa072007-06-29 13:39:57 +0100645 ref->flash_offset = ref_offset(ref) | dirent_node_state(rd);
David Woodhouse1046d882006-06-18 22:44:21 +0100646 spin_unlock(&c->erase_completion_lock);
647 }
648
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100649 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
650 if (unlikely(!fd))
651 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100653 fd->raw = ref;
654 fd->version = je32_to_cpu(rd->version);
655 fd->ino = je32_to_cpu(rd->ino);
656 fd->type = rd->type;
657
David Woodhousedf8e96f2007-04-25 03:23:42 +0100658 if (fd->version > rii->highest_version)
659 rii->highest_version = fd->version;
660
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100661 /* Pick out the mctime of the latest dirent */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100662 if(fd->version > rii->mctime_ver && je32_to_cpu(rd->mctime)) {
663 rii->mctime_ver = fd->version;
664 rii->latest_mctime = je32_to_cpu(rd->mctime);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100665 }
666
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000667 /*
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100668 * Copy as much of the name as possible from the raw
669 * dirent we've already read from the flash.
670 */
671 if (read > sizeof(*rd))
672 memcpy(&fd->name[0], &rd->name[0],
673 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000674
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100675 /* Do we need to copy any more of the name directly from the flash? */
676 if (rd->nsize + sizeof(*rd) > read) {
677 /* FIXME: point() */
678 int err;
679 int already = read - sizeof(*rd);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000680
681 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100682 rd->nsize - already, &read, &fd->name[already]);
683 if (unlikely(read != rd->nsize - already) && likely(!err))
684 return -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000685
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100686 if (unlikely(err)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100687 JFFS2_ERROR("read remainder of name: error %d\n", err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100688 jffs2_free_full_dirent(fd);
689 return -EIO;
690 }
691 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000692
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100693 fd->nhash = full_name_hash(fd->name, rd->nsize);
694 fd->next = NULL;
695 fd->name[rd->nsize] = '\0';
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000696
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100697 /*
698 * Wheee. We now have a complete jffs2_full_dirent structure, with
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000699 * the name in it and everything. Link it into the list
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100700 */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100701 jffs2_add_fd_to_list(c, fd, &rii->fds);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100702
703 return 0;
704}
705
706/*
707 * Helper function for jffs2_get_inode_nodes().
708 * It is called every time an inode node is found.
709 *
David Woodhouse14c63812007-07-03 16:51:19 -0400710 * Returns: 0 on success (possibly after marking a bad node obsolete);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100711 * negative error code on failure.
712 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100713static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100714 struct jffs2_raw_inode *rd, int rdlen,
715 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100716{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100717 struct jffs2_tmp_dnode_info *tn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100718 uint32_t len, csize;
David Woodhouse14c63812007-07-03 16:51:19 -0400719 int ret = 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100720 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000721
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100722 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
723 BUG_ON(ref_obsolete(ref));
724
David Woodhouse1046d882006-06-18 22:44:21 +0100725 crc = crc32(0, rd, sizeof(*rd) - 8);
726 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
727 JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n",
728 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100729 jffs2_mark_node_obsolete(c, ref);
730 return 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100731 }
732
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100733 tn = jffs2_alloc_tmp_dnode_info();
734 if (!tn) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400735 JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100736 return -ENOMEM;
737 }
738
739 tn->partial_crc = 0;
740 csize = je32_to_cpu(rd->csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000741
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100742 /* If we've never checked the CRCs on this node, check them now */
743 if (ref_flags(ref) == REF_UNCHECKED) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000744
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100745 /* Sanity checks */
746 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
747 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
David Woodhouse14c63812007-07-03 16:51:19 -0400748 JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
749 jffs2_dbg_dump_node(c, ref_offset(ref));
750 jffs2_mark_node_obsolete(c, ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100751 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100752 }
753
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100754 if (jffs2_is_writebuffered(c) && csize != 0) {
755 /* At this point we are supposed to check the data CRC
756 * of our unchecked node. But thus far, we do not
757 * know whether the node is valid or obsolete. To
758 * figure this out, we need to walk all the nodes of
759 * the inode and build the inode fragtree. We don't
760 * want to spend time checking data of nodes which may
761 * later be found to be obsolete. So we put off the full
762 * data CRC checking until we have read all the inode
763 * nodes and have started building the fragtree.
764 *
765 * The fragtree is being built starting with nodes
766 * having the highest version number, so we'll be able
767 * to detect whether a node is valid (i.e., it is not
768 * overlapped by a node with higher version) or not.
769 * And we'll be able to check only those nodes, which
770 * are not obsolete.
771 *
772 * Of course, this optimization only makes sense in case
Robert P. J. Daye1b85132008-02-03 15:14:02 +0200773 * of NAND flashes (or other flashes with
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100774 * !jffs2_can_mark_obsolete()), since on NOR flashes
775 * nodes are marked obsolete physically.
776 *
777 * Since NAND flashes (or other flashes with
778 * jffs2_is_writebuffered(c)) are anyway read by
779 * fractions of c->wbuf_pagesize, and we have just read
780 * the node header, it is likely that the starting part
781 * of the node data is also read when we read the
782 * header. So we don't mind to check the CRC of the
783 * starting part of the data of the node now, and check
784 * the second part later (in jffs2_check_node_data()).
785 * Of course, we will not need to re-read and re-check
786 * the NAND page which we have just read. This is why we
787 * read the whole NAND page at jffs2_get_inode_nodes(),
788 * while we needed only the node header.
789 */
790 unsigned char *buf;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100791
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100792 /* 'buf' will point to the start of data */
793 buf = (unsigned char *)rd + sizeof(*rd);
794 /* len will be the read data length */
795 len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100796 tn->partial_crc = crc32(0, buf, len);
797
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100798 dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100799
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100800 /* If we actually calculated the whole data CRC
801 * and it is wrong, drop the node. */
Artem B. Bityutskiy3c091332005-08-04 12:40:02 +0100802 if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100803 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
804 ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
David Woodhouse14c63812007-07-03 16:51:19 -0400805 jffs2_mark_node_obsolete(c, ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100806 goto free_out;
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100807 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100808
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100809 } else if (csize == 0) {
810 /*
811 * We checked the header CRC. If the node has no data, adjust
812 * the space accounting now. For other nodes this will be done
813 * later either when the node is marked obsolete or when its
814 * data is checked.
815 */
816 struct jffs2_eraseblock *jeb;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100817
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100818 dbg_readinode("the node has no data.\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100819 jeb = &c->blocks[ref->flash_offset / c->sector_size];
820 len = ref_totlen(c, jeb, ref);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100821
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100822 spin_lock(&c->erase_completion_lock);
823 jeb->used_size += len;
824 jeb->unchecked_size -= len;
825 c->used_size += len;
826 c->unchecked_size -= len;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100827 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100828 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100829 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100830 }
831
832 tn->fn = jffs2_alloc_full_dnode();
833 if (!tn->fn) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100834 JFFS2_ERROR("alloc fn failed\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100835 ret = -ENOMEM;
836 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100837 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000838
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100839 tn->version = je32_to_cpu(rd->version);
840 tn->fn->ofs = je32_to_cpu(rd->offset);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100841 tn->data_crc = je32_to_cpu(rd->data_crc);
842 tn->csize = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100843 tn->fn->raw = ref;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100844 tn->overlapped = 0;
845
846 if (tn->version > rii->highest_version)
847 rii->highest_version = tn->version;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000848
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100849 /* There was a bug where we wrote hole nodes out with
850 csize/dsize swapped. Deal with it */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100851 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
852 tn->fn->size = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100853 else // normal case...
854 tn->fn->size = je32_to_cpu(rd->dsize);
855
David Woodhouse2c61cb22008-04-23 16:43:15 +0100856 dbg_readinode2("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
857 ref_offset(ref), je32_to_cpu(rd->version),
858 je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000859
David Woodhousedf8e96f2007-04-25 03:23:42 +0100860 ret = jffs2_add_tn_to_tree(c, rii, tn);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100861
David Woodhousedf8e96f2007-04-25 03:23:42 +0100862 if (ret) {
863 jffs2_free_full_dnode(tn->fn);
864 free_out:
865 jffs2_free_tmp_dnode_info(tn);
866 return ret;
867 }
David Woodhouse2c61cb22008-04-23 16:43:15 +0100868#ifdef JFFS2_DBG_READINODE2_MESSAGES
869 dbg_readinode2("After adding ver %d:\n", je32_to_cpu(rd->version));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100870 tn = tn_first(&rii->tn_root);
871 while (tn) {
David Woodhouse2c61cb22008-04-23 16:43:15 +0100872 dbg_readinode2("%p: v %d r 0x%x-0x%x ov %d\n",
873 tn, tn->version, tn->fn->ofs,
874 tn->fn->ofs+tn->fn->size, tn->overlapped);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100875 tn = tn_next(tn);
876 }
877#endif
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100878 return 0;
879}
880
881/*
882 * Helper function for jffs2_get_inode_nodes().
883 * It is called every time an unknown node is found.
884 *
David Woodhouse3877f0b2006-06-18 00:05:26 +0100885 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100886 * negative error code on failure.
887 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100888static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100889{
890 /* We don't mark unknown nodes as REF_UNCHECKED */
David Woodhousec7258a42007-03-09 11:44:00 +0000891 if (ref_flags(ref) == REF_UNCHECKED) {
892 JFFS2_ERROR("REF_UNCHECKED but unknown node at %#08x\n",
893 ref_offset(ref));
894 JFFS2_ERROR("Node is {%04x,%04x,%08x,%08x}. Please report this error.\n",
David Woodhouseef53cb02007-07-10 10:01:22 +0100895 je16_to_cpu(un->magic), je16_to_cpu(un->nodetype),
896 je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100897 jffs2_mark_node_obsolete(c, ref);
898 return 0;
David Woodhousec7258a42007-03-09 11:44:00 +0000899 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000900
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100901 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
902
David Woodhouse3877f0b2006-06-18 00:05:26 +0100903 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
904
905 case JFFS2_FEATURE_INCOMPAT:
906 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
907 je16_to_cpu(un->nodetype), ref_offset(ref));
908 /* EEP */
909 BUG();
910 break;
911
912 case JFFS2_FEATURE_ROCOMPAT:
913 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
914 je16_to_cpu(un->nodetype), ref_offset(ref));
915 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
916 break;
917
918 case JFFS2_FEATURE_RWCOMPAT_COPY:
919 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
920 je16_to_cpu(un->nodetype), ref_offset(ref));
921 break;
922
923 case JFFS2_FEATURE_RWCOMPAT_DELETE:
924 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
925 je16_to_cpu(un->nodetype), ref_offset(ref));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100926 jffs2_mark_node_obsolete(c, ref);
927 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100928 }
929
930 return 0;
931}
932
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100933/*
934 * Helper function for jffs2_get_inode_nodes().
935 * The function detects whether more data should be read and reads it if yes.
936 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200937 * Returns: 0 on success;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100938 * negative error code on failure.
939 */
940static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300941 int needed_len, int *rdlen, unsigned char *buf)
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100942{
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300943 int err, to_read = needed_len - *rdlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100944 size_t retlen;
945 uint32_t offs;
946
947 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300948 int rem = to_read % c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100949
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300950 if (rem)
951 to_read += c->wbuf_pagesize - rem;
952 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100953
954 /* We need to read more data */
955 offs = ref_offset(ref) + *rdlen;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000956
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300957 dbg_readinode("read more %d bytes\n", to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100958
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300959 err = jffs2_flash_read(c, offs, to_read, &retlen, buf + *rdlen);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100960 if (err) {
961 JFFS2_ERROR("can not read %d bytes from 0x%08x, "
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300962 "error code: %d.\n", to_read, offs, err);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100963 return err;
964 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000965
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300966 if (retlen < to_read) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400967 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n",
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300968 offs, retlen, to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100969 return -EIO;
970 }
971
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300972 *rdlen += to_read;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100973 return 0;
974}
975
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100976/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
David Woodhousedf8e96f2007-04-25 03:23:42 +0100977 with this ino. Perform a preliminary ordering on data nodes, throwing away
978 those which are completely obsoleted by newer ones. The naïve approach we
979 use to take of just returning them _all_ in version order will cause us to
980 run out of memory in certain degenerate cases. */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100981static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100982 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100983{
984 struct jffs2_raw_node_ref *ref, *valid_ref;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100985 unsigned char *buf = NULL;
986 union jffs2_node_union *node;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100987 size_t retlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100988 int len, err;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100989
David Woodhousedf8e96f2007-04-25 03:23:42 +0100990 rii->mctime_ver = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000991
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100992 dbg_readinode("ino #%u\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100993
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100994 /* FIXME: in case of NOR and available ->point() this
995 * needs to be fixed. */
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300996 len = sizeof(union jffs2_node_union) + c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100997 buf = kmalloc(len, GFP_KERNEL);
998 if (!buf)
999 return -ENOMEM;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001000
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001001 spin_lock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001002 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001003 if (!valid_ref && f->inocache->ino != 1)
1004 JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001005 while (valid_ref) {
1006 /* We can hold a pointer to a non-obsolete node without the spinlock,
1007 but _obsolete_ nodes may disappear at any time, if the block
1008 they're in gets erased. So if we mark 'ref' obsolete while we're
1009 not holding the lock, it can go away immediately. For that reason,
1010 we find the next valid node first, before processing 'ref'.
1011 */
1012 ref = valid_ref;
1013 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
1014 spin_unlock(&c->erase_completion_lock);
1015
1016 cond_resched();
1017
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001018 /*
1019 * At this point we don't know the type of the node we're going
1020 * to read, so we do not know the size of its header. In order
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001021 * to minimize the amount of flash IO we assume the header is
1022 * of size = JFFS2_MIN_NODE_HEADER.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001023 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001024 len = JFFS2_MIN_NODE_HEADER;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001025 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001026 int end, rem;
1027
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001028 /*
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001029 * We are about to read JFFS2_MIN_NODE_HEADER bytes,
1030 * but this flash has some minimal I/O unit. It is
1031 * possible that we'll need to read more soon, so read
1032 * up to the next min. I/O unit, in order not to
1033 * re-read the same min. I/O unit twice.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001034 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001035 end = ref_offset(ref) + len;
1036 rem = end % c->wbuf_pagesize;
1037 if (rem)
1038 end += c->wbuf_pagesize - rem;
1039 len = end - ref_offset(ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001040 }
1041
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001042 dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001043
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001044 /* FIXME: point() */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001045 err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001046 if (err) {
Jean Delvaredf2e3012011-07-16 18:10:35 +02001047 JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ref_offset(ref), err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001048 goto free_out;
1049 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001050
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001051 if (retlen < len) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -04001052 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001053 err = -EIO;
1054 goto free_out;
1055 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001056
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001057 node = (union jffs2_node_union *)buf;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001058
David Woodhouse3877f0b2006-06-18 00:05:26 +01001059 /* No need to mask in the valid bit; it shouldn't be invalid */
1060 if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) {
1061 JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n",
1062 ref_offset(ref), je16_to_cpu(node->u.magic),
1063 je16_to_cpu(node->u.nodetype),
1064 je32_to_cpu(node->u.totlen),
1065 je32_to_cpu(node->u.hdr_crc));
1066 jffs2_dbg_dump_node(c, ref_offset(ref));
1067 jffs2_mark_node_obsolete(c, ref);
1068 goto cont;
1069 }
Joakim Tjernlund0dec4c82007-03-10 17:08:44 +01001070 if (je16_to_cpu(node->u.magic) != JFFS2_MAGIC_BITMASK) {
1071 /* Not a JFFS2 node, whinge and move on */
1072 JFFS2_NOTICE("Wrong magic bitmask 0x%04x in node header at %#08x.\n",
1073 je16_to_cpu(node->u.magic), ref_offset(ref));
David Woodhousec7258a42007-03-09 11:44:00 +00001074 jffs2_mark_node_obsolete(c, ref);
1075 goto cont;
1076 }
David Woodhouse3877f0b2006-06-18 00:05:26 +01001077
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001078 switch (je16_to_cpu(node->u.nodetype)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001079
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001080 case JFFS2_NODETYPE_DIRENT:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001081
Artem Bityutskiyea55d302007-05-30 12:08:14 +03001082 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent) &&
1083 len < sizeof(struct jffs2_raw_dirent)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001084 err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001085 if (unlikely(err))
1086 goto free_out;
1087 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001088
David Woodhousedf8e96f2007-04-25 03:23:42 +01001089 err = read_direntry(c, ref, &node->d, retlen, rii);
1090 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001091 goto free_out;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001092
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001093 break;
1094
1095 case JFFS2_NODETYPE_INODE:
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001096
Artem Bityutskiyea55d302007-05-30 12:08:14 +03001097 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode) &&
1098 len < sizeof(struct jffs2_raw_inode)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001099 err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001100 if (unlikely(err))
1101 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001102 }
1103
David Woodhousedf8e96f2007-04-25 03:23:42 +01001104 err = read_dnode(c, ref, &node->i, len, rii);
1105 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001106 goto free_out;
1107
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 break;
1109
1110 default:
Artem Bityutskiyea55d302007-05-30 12:08:14 +03001111 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node) &&
1112 len < sizeof(struct jffs2_unknown_node)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001113 err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001114 if (unlikely(err))
1115 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001116 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001117
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001118 err = read_unknown(c, ref, &node->u);
David Woodhouse14c63812007-07-03 16:51:19 -04001119 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001120 goto free_out;
1121
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 }
David Woodhouse3877f0b2006-06-18 00:05:26 +01001123 cont:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001124 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001126
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001127 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001128 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129
David Woodhousedf8e96f2007-04-25 03:23:42 +01001130 f->highest_version = rii->highest_version;
1131
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001132 dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +01001133 f->inocache->ino, rii->highest_version, rii->latest_mctime,
1134 rii->mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001135 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001137 free_out:
David Woodhousedf8e96f2007-04-25 03:23:42 +01001138 jffs2_free_tmp_dnode_info_list(&rii->tn_root);
1139 jffs2_free_full_dirent_list(rii->fds);
1140 rii->fds = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001141 kfree(buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001142 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143}
1144
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001145static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 struct jffs2_inode_info *f,
1147 struct jffs2_raw_inode *latest_node)
1148{
David Woodhousedf8e96f2007-04-25 03:23:42 +01001149 struct jffs2_readinode_info rii;
David Woodhouse61c4b232007-04-25 17:04:23 +01001150 uint32_t crc, new_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 size_t retlen;
1152 int ret;
1153
David Woodhouse27c72b02008-05-01 18:47:17 +01001154 dbg_readinode("ino #%u pino/nlink is %d\n", f->inocache->ino,
1155 f->inocache->pino_nlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
David Woodhousedf8e96f2007-04-25 03:23:42 +01001157 memset(&rii, 0, sizeof(rii));
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 /* Grab all nodes relevant to this ino */
David Woodhousedf8e96f2007-04-25 03:23:42 +01001160 ret = jffs2_get_inode_nodes(c, f, &rii);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
1162 if (ret) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001163 JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 if (f->inocache->state == INO_STATE_READING)
1165 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1166 return ret;
1167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168
David Woodhousedf8e96f2007-04-25 03:23:42 +01001169 ret = jffs2_build_inode_fragtree(c, f, &rii);
1170 if (ret) {
1171 JFFS2_ERROR("Failed to build final fragtree for inode #%u: error %d\n",
1172 f->inocache->ino, ret);
1173 if (f->inocache->state == INO_STATE_READING)
1174 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1175 jffs2_free_tmp_dnode_info_list(&rii.tn_root);
1176 /* FIXME: We could at least crc-check them all */
1177 if (rii.mdata_tn) {
1178 jffs2_free_full_dnode(rii.mdata_tn->fn);
1179 jffs2_free_tmp_dnode_info(rii.mdata_tn);
1180 rii.mdata_tn = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001181 }
David Woodhousedf8e96f2007-04-25 03:23:42 +01001182 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 }
David Woodhousedf8e96f2007-04-25 03:23:42 +01001184
1185 if (rii.mdata_tn) {
1186 if (rii.mdata_tn->fn->raw == rii.latest_ref) {
1187 f->metadata = rii.mdata_tn->fn;
1188 jffs2_free_tmp_dnode_info(rii.mdata_tn);
1189 } else {
1190 jffs2_kill_tn(c, rii.mdata_tn);
1191 }
1192 rii.mdata_tn = NULL;
1193 }
1194
1195 f->dents = rii.fds;
1196
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +01001197 jffs2_dbg_fragtree_paranoia_check_nolock(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
David Woodhousedf8e96f2007-04-25 03:23:42 +01001199 if (unlikely(!rii.latest_ref)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 /* No data nodes for this inode. */
1201 if (f->inocache->ino != 1) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001202 JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
David Woodhousedf8e96f2007-04-25 03:23:42 +01001203 if (!rii.fds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 if (f->inocache->state == INO_STATE_READING)
1205 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1206 return -EIO;
1207 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001208 JFFS2_NOTICE("but it has children so we fake some modes for it\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 }
1210 latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
1211 latest_node->version = cpu_to_je32(0);
1212 latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
1213 latest_node->isize = cpu_to_je32(0);
1214 latest_node->gid = cpu_to_je16(0);
1215 latest_node->uid = cpu_to_je16(0);
1216 if (f->inocache->state == INO_STATE_READING)
1217 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
1218 return 0;
1219 }
1220
David Woodhousedf8e96f2007-04-25 03:23:42 +01001221 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref), sizeof(*latest_node), &retlen, (void *)latest_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 if (ret || retlen != sizeof(*latest_node)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001223 JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
1224 ret, retlen, sizeof(*latest_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 /* FIXME: If this fails, there seems to be a memory leak. Find it. */
David Woodhouseced22072008-04-22 15:13:40 +01001226 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 jffs2_do_clear_inode(c, f);
1228 return ret?ret:-EIO;
1229 }
1230
1231 crc = crc32(0, latest_node, sizeof(*latest_node)-8);
1232 if (crc != je32_to_cpu(latest_node->node_crc)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001233 JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +01001234 f->inocache->ino, ref_offset(rii.latest_ref));
David Woodhouseced22072008-04-22 15:13:40 +01001235 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 jffs2_do_clear_inode(c, f);
1237 return -EIO;
1238 }
1239
1240 switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
1241 case S_IFDIR:
David Woodhousedf8e96f2007-04-25 03:23:42 +01001242 if (rii.mctime_ver > je32_to_cpu(latest_node->version)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 /* The times in the latest_node are actually older than
1244 mctime in the latest dirent. Cheat. */
David Woodhousedf8e96f2007-04-25 03:23:42 +01001245 latest_node->ctime = latest_node->mtime = cpu_to_je32(rii.latest_mctime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 }
1247 break;
1248
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001249
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 case S_IFREG:
1251 /* If it was a regular file, truncate it to the latest node's isize */
David Woodhouse61c4b232007-04-25 17:04:23 +01001252 new_size = jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
1253 if (new_size != je32_to_cpu(latest_node->isize)) {
1254 JFFS2_WARNING("Truncating ino #%u to %d bytes failed because it only had %d bytes to start with!\n",
1255 f->inocache->ino, je32_to_cpu(latest_node->isize), new_size);
1256 latest_node->isize = cpu_to_je32(new_size);
1257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 break;
1259
1260 case S_IFLNK:
1261 /* Hack to work around broken isize in old symlink code.
1262 Remove this when dwmw2 comes to his senses and stops
1263 symlinks from being an entirely gratuitous special
1264 case. */
1265 if (!je32_to_cpu(latest_node->isize))
1266 latest_node->isize = latest_node->dsize;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001267
1268 if (f->inocache->state != INO_STATE_CHECKING) {
1269 /* Symlink's inode data is the target path. Read it and
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001270 * keep in RAM to facilitate quick follow symlink
1271 * operation. */
Xi Wang7c80c352012-04-25 14:45:22 -04001272 uint32_t csize = je32_to_cpu(latest_node->csize);
1273 if (csize > JFFS2_MAX_NAME_LEN) {
1274 mutex_unlock(&f->sem);
1275 jffs2_do_clear_inode(c, f);
1276 return -ENAMETOOLONG;
1277 }
Xi Wangb6778fd2012-04-25 14:45:23 -04001278 f->target = kmalloc(csize + 1, GFP_KERNEL);
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001279 if (!f->target) {
Xi Wangb6778fd2012-04-25 14:45:23 -04001280 JFFS2_ERROR("can't allocate %u bytes of memory for the symlink target path cache\n", csize);
David Woodhouseced22072008-04-22 15:13:40 +01001281 mutex_unlock(&f->sem);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001282 jffs2_do_clear_inode(c, f);
1283 return -ENOMEM;
1284 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001285
David Woodhousedf8e96f2007-04-25 03:23:42 +01001286 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node),
Xi Wangb6778fd2012-04-25 14:45:23 -04001287 csize, &retlen, (char *)f->target);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001288
Xi Wangb6778fd2012-04-25 14:45:23 -04001289 if (ret || retlen != csize) {
1290 if (retlen != csize)
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001291 ret = -EIO;
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001292 kfree(f->target);
1293 f->target = NULL;
David Woodhouseced22072008-04-22 15:13:40 +01001294 mutex_unlock(&f->sem);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001295 jffs2_do_clear_inode(c, f);
Roel Kluine670e412009-11-12 18:09:48 +01001296 return ret;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001297 }
1298
Xi Wangb6778fd2012-04-25 14:45:23 -04001299 f->target[csize] = '\0';
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001300 dbg_readinode("symlink's target '%s' cached\n", f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001301 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001302
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 /* fall through... */
1304
1305 case S_IFBLK:
1306 case S_IFCHR:
1307 /* Certain inode types should have only one data node, and it's
1308 kept as the metadata node */
1309 if (f->metadata) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001310 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 f->inocache->ino, jemode_to_cpu(latest_node->mode));
David Woodhouseced22072008-04-22 15:13:40 +01001312 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 jffs2_do_clear_inode(c, f);
1314 return -EIO;
1315 }
1316 if (!frag_first(&f->fragtree)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001317 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 f->inocache->ino, jemode_to_cpu(latest_node->mode));
David Woodhouseced22072008-04-22 15:13:40 +01001319 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 jffs2_do_clear_inode(c, f);
1321 return -EIO;
1322 }
1323 /* ASSERT: f->fraglist != NULL */
1324 if (frag_next(frag_first(&f->fragtree))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001325 JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 f->inocache->ino, jemode_to_cpu(latest_node->mode));
1327 /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
David Woodhouseced22072008-04-22 15:13:40 +01001328 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 jffs2_do_clear_inode(c, f);
1330 return -EIO;
1331 }
1332 /* OK. We're happy */
1333 f->metadata = frag_first(&f->fragtree)->node;
1334 jffs2_free_node_frag(frag_first(&f->fragtree));
1335 f->fragtree = RB_ROOT;
1336 break;
1337 }
1338 if (f->inocache->state == INO_STATE_READING)
1339 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
1340
1341 return 0;
1342}
1343
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001344/* Scan the list of all nodes present for this ino, build map of versions, etc. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001345int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001346 uint32_t ino, struct jffs2_raw_inode *latest_node)
1347{
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001348 dbg_readinode("read inode #%u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001349
1350 retry_inocache:
1351 spin_lock(&c->inocache_lock);
1352 f->inocache = jffs2_get_ino_cache(c, ino);
1353
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001354 if (f->inocache) {
1355 /* Check its state. We may need to wait before we can use it */
1356 switch(f->inocache->state) {
1357 case INO_STATE_UNCHECKED:
1358 case INO_STATE_CHECKEDABSENT:
1359 f->inocache->state = INO_STATE_READING;
1360 break;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001361
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001362 case INO_STATE_CHECKING:
1363 case INO_STATE_GC:
1364 /* If it's in either of these states, we need
1365 to wait for whoever's got it to finish and
1366 put it back. */
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001367 dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001368 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
1369 goto retry_inocache;
1370
1371 case INO_STATE_READING:
1372 case INO_STATE_PRESENT:
1373 /* Eep. This should never happen. It can
1374 happen if Linux calls read_inode() again
1375 before clear_inode() has finished though. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001376 JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001377 /* Fail. That's probably better than allowing it to succeed */
1378 f->inocache = NULL;
1379 break;
1380
1381 default:
1382 BUG();
1383 }
1384 }
1385 spin_unlock(&c->inocache_lock);
1386
1387 if (!f->inocache && ino == 1) {
1388 /* Special case - no root inode on medium */
1389 f->inocache = jffs2_alloc_inode_cache();
1390 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001391 JFFS2_ERROR("cannot allocate inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001392 return -ENOMEM;
1393 }
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001394 dbg_readinode("creating inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001395 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
David Woodhouse27c72b02008-05-01 18:47:17 +01001396 f->inocache->ino = f->inocache->pino_nlink = 1;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001397 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
1398 f->inocache->state = INO_STATE_READING;
1399 jffs2_add_ino_cache(c, f->inocache);
1400 }
1401 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001402 JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001403 return -ENOENT;
1404 }
1405
1406 return jffs2_do_read_inode_internal(c, f, latest_node);
1407}
1408
1409int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
1410{
1411 struct jffs2_raw_inode n;
Yan Burman3d375d92006-12-04 15:03:01 -08001412 struct jffs2_inode_info *f = kzalloc(sizeof(*f), GFP_KERNEL);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001413 int ret;
1414
1415 if (!f)
1416 return -ENOMEM;
1417
David Woodhouseced22072008-04-22 15:13:40 +01001418 mutex_init(&f->sem);
1419 mutex_lock(&f->sem);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001420 f->inocache = ic;
1421
1422 ret = jffs2_do_read_inode_internal(c, f, &n);
1423 if (!ret) {
David Woodhouseced22072008-04-22 15:13:40 +01001424 mutex_unlock(&f->sem);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001425 jffs2_do_clear_inode(c, f);
1426 }
Jean-Christophe DUBOIS8c5a0362012-05-10 17:14:03 +02001427 jffs2_xattr_do_crccheck_inode(c, ic);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001428 kfree (f);
1429 return ret;
1430}
1431
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
1433{
1434 struct jffs2_full_dirent *fd, *fds;
1435 int deleted;
1436
KaiGai Kohei355ed4e2006-06-24 09:15:36 +09001437 jffs2_xattr_delete_inode(c, f->inocache);
David Woodhouseced22072008-04-22 15:13:40 +01001438 mutex_lock(&f->sem);
David Woodhouse27c72b02008-05-01 18:47:17 +01001439 deleted = f->inocache && !f->inocache->pino_nlink;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
David Woodhouse67e345d2005-02-27 23:01:36 +00001441 if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
1442 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
1443
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 if (f->metadata) {
1445 if (deleted)
1446 jffs2_mark_node_obsolete(c, f->metadata->raw);
1447 jffs2_free_full_dnode(f->metadata);
1448 }
1449
1450 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
1451
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001452 if (f->target) {
1453 kfree(f->target);
1454 f->target = NULL;
1455 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001456
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001457 fds = f->dents;
1458 while(fds) {
1459 fd = fds;
1460 fds = fd->next;
1461 jffs2_free_full_dirent(fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 }
1463
David Woodhouse67e345d2005-02-27 23:01:36 +00001464 if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
David Woodhouse67e345d2005-02-27 23:01:36 +00001466 if (f->inocache->nodes == (void *)f->inocache)
1467 jffs2_del_ino_cache(c, f->inocache);
1468 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
David Woodhouseced22072008-04-22 15:13:40 +01001470 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471}