blob: 3093ac4fb24c2966c39b4987040dc9b5a3c33855 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
David Woodhousec00c3102007-04-25 14:16:47 +01004 * Copyright © 2001-2007 Red Hat, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11
12#include <linux/kernel.h>
Andrew Lunn737b7662005-07-30 16:29:30 +010013#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/slab.h>
15#include <linux/fs.h>
16#include <linux/crc32.h>
17#include <linux/pagemap.h>
18#include <linux/mtd/mtd.h>
19#include <linux/compiler.h>
20#include "nodelist.h"
21
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010022/*
David Woodhousedf8e96f2007-04-25 03:23:42 +010023 * Check the data CRC of the node.
24 *
25 * Returns: 0 if the data CRC is correct;
26 * 1 - if incorrect;
Lucas De Marchi25985ed2011-03-30 22:57:33 -030027 * error code if an error occurred.
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010028 */
David Woodhousedf8e96f2007-04-25 03:23:42 +010029static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
Linus Torvalds1da177e2005-04-16 15:20:36 -070030{
David Woodhousedf8e96f2007-04-25 03:23:42 +010031 struct jffs2_raw_node_ref *ref = tn->fn->raw;
32 int err = 0, pointed = 0;
33 struct jffs2_eraseblock *jeb;
34 unsigned char *buffer;
35 uint32_t crc, ofs, len;
36 size_t retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
David Woodhousedf8e96f2007-04-25 03:23:42 +010038 BUG_ON(tn->csize == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
David Woodhousedf8e96f2007-04-25 03:23:42 +010040 /* Calculate how many bytes were already checked */
41 ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode);
David Woodhouse92525722007-11-21 12:08:16 -050042 len = tn->csize;
David Woodhousedf8e96f2007-04-25 03:23:42 +010043
David Woodhouse92525722007-11-21 12:08:16 -050044 if (jffs2_is_writebuffered(c)) {
45 int adj = ofs % c->wbuf_pagesize;
46 if (likely(adj))
47 adj = c->wbuf_pagesize - adj;
48
49 if (adj >= tn->csize) {
50 dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
51 ref_offset(ref), tn->csize, ofs);
52 goto adj_acc;
53 }
54
55 ofs += adj;
56 len -= adj;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010057 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
David Woodhousedf8e96f2007-04-25 03:23:42 +010059 dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n",
60 ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len);
61
62#ifndef __ECOS
63 /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(),
64 * adding and jffs2_flash_read_end() interface. */
Artem Bityutskiy10934472011-12-28 15:55:42 +020065 err = mtd_point(c->mtd, ofs, len, &retlen, (void **)&buffer, NULL);
66 if (!err && retlen < len) {
67 JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
68 mtd_unpoint(c->mtd, ofs, retlen);
69 } else if (err) {
70 if (err != -EOPNOTSUPP)
David Woodhousedf8e96f2007-04-25 03:23:42 +010071 JFFS2_WARNING("MTD point failed: error code %d.\n", err);
Artem Bityutskiy10934472011-12-28 15:55:42 +020072 } else
73 pointed = 1; /* succefully pointed to device */
David Woodhousedf8e96f2007-04-25 03:23:42 +010074#endif
75
76 if (!pointed) {
77 buffer = kmalloc(len, GFP_KERNEL);
78 if (unlikely(!buffer))
79 return -ENOMEM;
80
81 /* TODO: this is very frequent pattern, make it a separate
82 * routine */
83 err = jffs2_flash_read(c, ofs, len, &retlen, buffer);
84 if (err) {
85 JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err);
86 goto free_out;
87 }
88
89 if (retlen != len) {
90 JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len);
91 err = -EIO;
92 goto free_out;
93 }
94 }
95
96 /* Continue calculating CRC */
97 crc = crc32(tn->partial_crc, buffer, len);
98 if(!pointed)
99 kfree(buffer);
100#ifndef __ECOS
101 else
Artem Bityutskiy72197782011-12-23 17:05:52 +0200102 mtd_unpoint(c->mtd, ofs, len);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100103#endif
104
105 if (crc != tn->data_crc) {
106 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
David Woodhouseb2e25232007-07-05 01:57:26 -0400107 ref_offset(ref), tn->data_crc, crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100108 return 1;
109 }
110
111adj_acc:
112 jeb = &c->blocks[ref->flash_offset / c->sector_size];
113 len = ref_totlen(c, jeb, ref);
114 /* If it should be REF_NORMAL, it'll get marked as such when
115 we build the fragtree, shortly. No need to worry about GC
116 moving it while it's marked REF_PRISTINE -- GC won't happen
117 till we've finished checking every inode anyway. */
118 ref->flash_offset |= REF_PRISTINE;
119 /*
120 * Mark the node as having been checked and fix the
121 * accounting accordingly.
122 */
123 spin_lock(&c->erase_completion_lock);
124 jeb->used_size += len;
125 jeb->unchecked_size -= len;
126 c->used_size += len;
127 c->unchecked_size -= len;
128 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
129 spin_unlock(&c->erase_completion_lock);
130
131 return 0;
132
133free_out:
134 if(!pointed)
135 kfree(buffer);
136#ifndef __ECOS
137 else
Artem Bityutskiy72197782011-12-23 17:05:52 +0200138 mtd_unpoint(c->mtd, ofs, len);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100139#endif
140 return err;
141}
142
143/*
144 * Helper function for jffs2_add_older_frag_to_fragtree().
145 *
146 * Checks the node if we are in the checking stage.
147 */
148static int check_tn_node(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
149{
150 int ret;
151
152 BUG_ON(ref_obsolete(tn->fn->raw));
153
154 /* We only check the data CRC of unchecked nodes */
155 if (ref_flags(tn->fn->raw) != REF_UNCHECKED)
156 return 0;
157
158 dbg_readinode("check node %#04x-%#04x, phys offs %#08x\n",
159 tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw));
160
161 ret = check_node_data(c, tn);
162 if (unlikely(ret < 0)) {
163 JFFS2_ERROR("check_node_data() returned error: %d.\n",
164 ret);
165 } else if (unlikely(ret > 0)) {
166 dbg_readinode("CRC error, mark it obsolete.\n");
167 jffs2_mark_node_obsolete(c, tn->fn->raw);
168 }
169
170 return ret;
171}
172
173static struct jffs2_tmp_dnode_info *jffs2_lookup_tn(struct rb_root *tn_root, uint32_t offset)
174{
175 struct rb_node *next;
176 struct jffs2_tmp_dnode_info *tn = NULL;
177
178 dbg_readinode("root %p, offset %d\n", tn_root, offset);
179
180 next = tn_root->rb_node;
181
182 while (next) {
183 tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb);
184
185 if (tn->fn->ofs < offset)
186 next = tn->rb.rb_right;
187 else if (tn->fn->ofs >= offset)
188 next = tn->rb.rb_left;
189 else
190 break;
191 }
192
193 return tn;
194}
195
196
197static void jffs2_kill_tn(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
198{
199 jffs2_mark_node_obsolete(c, tn->fn->raw);
200 jffs2_free_full_dnode(tn->fn);
201 jffs2_free_tmp_dnode_info(tn);
202}
203/*
204 * This function is used when we read an inode. Data nodes arrive in
205 * arbitrary order -- they may be older or newer than the nodes which
206 * are already in the tree. Where overlaps occur, the older node can
207 * be discarded as long as the newer passes the CRC check. We don't
208 * bother to keep track of holes in this rbtree, and neither do we deal
209 * with frags -- we can have multiple entries starting at the same
210 * offset, and the one with the smallest length will come first in the
211 * ordering.
212 *
David Woodhouse14c63812007-07-03 16:51:19 -0400213 * Returns 0 if the node was handled (including marking it obsolete)
David Woodhouseef53cb02007-07-10 10:01:22 +0100214 * < 0 an if error occurred
David Woodhousedf8e96f2007-04-25 03:23:42 +0100215 */
216static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
217 struct jffs2_readinode_info *rii,
218 struct jffs2_tmp_dnode_info *tn)
219{
220 uint32_t fn_end = tn->fn->ofs + tn->fn->size;
Thomas Gleixner4c41bd02009-02-16 21:29:31 +0100221 struct jffs2_tmp_dnode_info *this, *ptn;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100222
David Woodhousefcf3caf2007-05-07 13:16:13 +0100223 dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100224
225 /* If a node has zero dsize, we only have to keep if it if it might be the
226 node with highest version -- i.e. the one which will end up as f->metadata.
227 Note that such nodes won't be REF_UNCHECKED since there are no data to
228 check anyway. */
229 if (!tn->fn->size) {
230 if (rii->mdata_tn) {
David Woodhouse0477d242007-06-01 20:04:43 +0100231 if (rii->mdata_tn->version < tn->version) {
232 /* We had a candidate mdata node already */
233 dbg_readinode("kill old mdata with ver %d\n", rii->mdata_tn->version);
234 jffs2_kill_tn(c, rii->mdata_tn);
235 } else {
236 dbg_readinode("kill new mdata with ver %d (older than existing %d\n",
237 tn->version, rii->mdata_tn->version);
238 jffs2_kill_tn(c, tn);
239 return 0;
240 }
David Woodhousedf8e96f2007-04-25 03:23:42 +0100241 }
242 rii->mdata_tn = tn;
243 dbg_readinode("keep new mdata with ver %d\n", tn->version);
244 return 0;
245 }
246
247 /* Find the earliest node which _may_ be relevant to this one */
248 this = jffs2_lookup_tn(&rii->tn_root, tn->fn->ofs);
David Woodhouse1c979642007-05-08 00:19:54 +0100249 if (this) {
250 /* If the node is coincident with another at a lower address,
251 back up until the other node is found. It may be relevant */
Thomas Gleixner4c41bd02009-02-16 21:29:31 +0100252 while (this->overlapped) {
253 ptn = tn_prev(this);
254 if (!ptn) {
255 /*
256 * We killed a node which set the overlapped
257 * flags during the scan. Fix it up.
258 */
259 this->overlapped = 0;
260 break;
261 }
262 this = ptn;
263 }
David Woodhouse1c979642007-05-08 00:19:54 +0100264 dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole");
David Woodhousedf8e96f2007-04-25 03:23:42 +0100265 }
266
David Woodhousedf8e96f2007-04-25 03:23:42 +0100267 while (this) {
268 if (this->fn->ofs > fn_end)
269 break;
270 dbg_readinode("Ponder this ver %d, 0x%x-0x%x\n",
271 this->version, this->fn->ofs, this->fn->size);
272
273 if (this->version == tn->version) {
274 /* Version number collision means REF_PRISTINE GC. Accept either of them
275 as long as the CRC is correct. Check the one we have already... */
276 if (!check_tn_node(c, this)) {
277 /* The one we already had was OK. Keep it and throw away the new one */
278 dbg_readinode("Like old node. Throw away new\n");
279 jffs2_kill_tn(c, tn);
280 return 0;
281 } else {
282 /* Who cares if the new one is good; keep it for now anyway. */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100283 dbg_readinode("Like new node. Throw away old\n");
David Woodhousefcf3caf2007-05-07 13:16:13 +0100284 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root);
285 jffs2_kill_tn(c, this);
286 /* Same overlapping from in front and behind */
287 return 0;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100288 }
289 }
290 if (this->version < tn->version &&
291 this->fn->ofs >= tn->fn->ofs &&
292 this->fn->ofs + this->fn->size <= fn_end) {
293 /* New node entirely overlaps 'this' */
294 if (check_tn_node(c, tn)) {
295 dbg_readinode("new node bad CRC\n");
296 jffs2_kill_tn(c, tn);
297 return 0;
298 }
David Woodhousefcf3caf2007-05-07 13:16:13 +0100299 /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */
David Woodhouse1c979642007-05-08 00:19:54 +0100300 while (this && this->fn->ofs + this->fn->size <= fn_end) {
David Woodhousedf8e96f2007-04-25 03:23:42 +0100301 struct jffs2_tmp_dnode_info *next = tn_next(this);
302 if (this->version < tn->version) {
303 tn_erase(this, &rii->tn_root);
304 dbg_readinode("Kill overlapped ver %d, 0x%x-0x%x\n",
305 this->version, this->fn->ofs,
306 this->fn->ofs+this->fn->size);
307 jffs2_kill_tn(c, this);
308 }
309 this = next;
310 }
David Woodhousefcf3caf2007-05-07 13:16:13 +0100311 dbg_readinode("Done killing overlapped nodes\n");
David Woodhouse1c979642007-05-08 00:19:54 +0100312 continue;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100313 }
314 if (this->version > tn->version &&
315 this->fn->ofs <= tn->fn->ofs &&
316 this->fn->ofs+this->fn->size >= fn_end) {
317 /* New node entirely overlapped by 'this' */
318 if (!check_tn_node(c, this)) {
319 dbg_readinode("Good CRC on old node. Kill new\n");
320 jffs2_kill_tn(c, tn);
321 return 0;
322 }
323 /* ... but 'this' was bad. Replace it... */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100324 dbg_readinode("Bad CRC on old overlapping node. Kill it\n");
David Woodhousefcf3caf2007-05-07 13:16:13 +0100325 tn_erase(this, &rii->tn_root);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100326 jffs2_kill_tn(c, this);
David Woodhousefcf3caf2007-05-07 13:16:13 +0100327 break;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100328 }
David Woodhousedf8e96f2007-04-25 03:23:42 +0100329
330 this = tn_next(this);
331 }
David Woodhouse96dd8d22007-05-06 14:41:40 +0100332
David Woodhousedf8e96f2007-04-25 03:23:42 +0100333 /* We neither completely obsoleted nor were completely
David Woodhouse96dd8d22007-05-06 14:41:40 +0100334 obsoleted by an earlier node. Insert into the tree */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100335 {
David Woodhouse96dd8d22007-05-06 14:41:40 +0100336 struct rb_node *parent;
337 struct rb_node **link = &rii->tn_root.rb_node;
David Woodhouse1c979642007-05-08 00:19:54 +0100338 struct jffs2_tmp_dnode_info *insert_point = NULL;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100339
340 while (*link) {
341 parent = *link;
342 insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
343 if (tn->fn->ofs > insert_point->fn->ofs)
344 link = &insert_point->rb.rb_right;
345 else if (tn->fn->ofs < insert_point->fn->ofs ||
346 tn->fn->size < insert_point->fn->size)
347 link = &insert_point->rb.rb_left;
348 else
349 link = &insert_point->rb.rb_right;
350 }
351 rb_link_node(&tn->rb, &insert_point->rb, link);
352 rb_insert_color(&tn->rb, &rii->tn_root);
353 }
David Woodhouse1123e2a2007-05-05 16:29:34 +0100354
David Woodhousedf8e96f2007-04-25 03:23:42 +0100355 /* If there's anything behind that overlaps us, note it */
356 this = tn_prev(tn);
357 if (this) {
358 while (1) {
359 if (this->fn->ofs + this->fn->size > tn->fn->ofs) {
360 dbg_readinode("Node is overlapped by %p (v %d, 0x%x-0x%x)\n",
361 this, this->version, this->fn->ofs,
362 this->fn->ofs+this->fn->size);
363 tn->overlapped = 1;
364 break;
365 }
366 if (!this->overlapped)
367 break;
Thomas Gleixner4c41bd02009-02-16 21:29:31 +0100368
369 ptn = tn_prev(this);
370 if (!ptn) {
371 /*
372 * We killed a node which set the overlapped
373 * flags during the scan. Fix it up.
374 */
375 this->overlapped = 0;
376 break;
377 }
378 this = ptn;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100379 }
380 }
381
382 /* If the new node overlaps anything ahead, note it */
383 this = tn_next(tn);
384 while (this && this->fn->ofs < fn_end) {
385 this->overlapped = 1;
386 dbg_readinode("Node ver %d, 0x%x-0x%x is overlapped\n",
387 this->version, this->fn->ofs,
388 this->fn->ofs+this->fn->size);
389 this = tn_next(this);
390 }
391 return 0;
392}
393
394/* Trivial function to remove the last node in the tree. Which by definition
395 has no right-hand -- so can be removed just by making its only child (if
396 any) take its place under its parent. */
397static void eat_last(struct rb_root *root, struct rb_node *node)
398{
399 struct rb_node *parent = rb_parent(node);
400 struct rb_node **link;
401
402 /* LAST! */
403 BUG_ON(node->rb_right);
404
405 if (!parent)
406 link = &root->rb_node;
407 else if (node == parent->rb_left)
408 link = &parent->rb_left;
409 else
410 link = &parent->rb_right;
411
412 *link = node->rb_left;
413 /* Colour doesn't matter now. Only the parent pointer. */
414 if (node->rb_left)
415 node->rb_left->rb_parent_color = node->rb_parent_color;
416}
417
418/* We put this in reverse order, so we can just use eat_last */
419static void ver_insert(struct rb_root *ver_root, struct jffs2_tmp_dnode_info *tn)
420{
421 struct rb_node **link = &ver_root->rb_node;
422 struct rb_node *parent = NULL;
423 struct jffs2_tmp_dnode_info *this_tn;
424
425 while (*link) {
426 parent = *link;
427 this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
428
429 if (tn->version > this_tn->version)
430 link = &parent->rb_left;
431 else
432 link = &parent->rb_right;
433 }
434 dbg_readinode("Link new node at %p (root is %p)\n", link, ver_root);
435 rb_link_node(&tn->rb, parent, link);
436 rb_insert_color(&tn->rb, ver_root);
437}
438
439/* Build final, normal fragtree from tn tree. It doesn't matter which order
440 we add nodes to the real fragtree, as long as they don't overlap. And
441 having thrown away the majority of overlapped nodes as we went, there
442 really shouldn't be many sets of nodes which do overlap. If we start at
443 the end, we can use the overlap markers -- we can just eat nodes which
444 aren't overlapped, and when we encounter nodes which _do_ overlap we
445 sort them all into a temporary tree in version order before replaying them. */
446static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c,
447 struct jffs2_inode_info *f,
448 struct jffs2_readinode_info *rii)
449{
450 struct jffs2_tmp_dnode_info *pen, *last, *this;
451 struct rb_root ver_root = RB_ROOT;
452 uint32_t high_ver = 0;
453
454 if (rii->mdata_tn) {
455 dbg_readinode("potential mdata is ver %d at %p\n", rii->mdata_tn->version, rii->mdata_tn);
456 high_ver = rii->mdata_tn->version;
457 rii->latest_ref = rii->mdata_tn->fn->raw;
458 }
459#ifdef JFFS2_DBG_READINODE_MESSAGES
460 this = tn_last(&rii->tn_root);
461 while (this) {
462 dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d\n", this, this->version, this->fn->ofs,
David Woodhouse96dd8d22007-05-06 14:41:40 +0100463 this->fn->ofs+this->fn->size, this->overlapped);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100464 this = tn_prev(this);
465 }
466#endif
467 pen = tn_last(&rii->tn_root);
468 while ((last = pen)) {
469 pen = tn_prev(last);
470
471 eat_last(&rii->tn_root, &last->rb);
472 ver_insert(&ver_root, last);
473
Thomas Gleixner4c41bd02009-02-16 21:29:31 +0100474 if (unlikely(last->overlapped)) {
475 if (pen)
476 continue;
477 /*
478 * We killed a node which set the overlapped
479 * flags during the scan. Fix it up.
480 */
481 last->overlapped = 0;
482 }
David Woodhousedf8e96f2007-04-25 03:23:42 +0100483
484 /* Now we have a bunch of nodes in reverse version
485 order, in the tree at ver_root. Most of the time,
486 there'll actually be only one node in the 'tree',
487 in fact. */
488 this = tn_last(&ver_root);
489
490 while (this) {
491 struct jffs2_tmp_dnode_info *vers_next;
492 int ret;
493 vers_next = tn_prev(this);
494 eat_last(&ver_root, &this->rb);
495 if (check_tn_node(c, this)) {
David Woodhouse1123e2a2007-05-05 16:29:34 +0100496 dbg_readinode("node ver %d, 0x%x-0x%x failed CRC\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +0100497 this->version, this->fn->ofs,
498 this->fn->ofs+this->fn->size);
499 jffs2_kill_tn(c, this);
500 } else {
501 if (this->version > high_ver) {
502 /* Note that this is different from the other
503 highest_version, because this one is only
504 counting _valid_ nodes which could give the
505 latest inode metadata */
506 high_ver = this->version;
507 rii->latest_ref = this->fn->raw;
508 }
David Woodhouse1123e2a2007-05-05 16:29:34 +0100509 dbg_readinode("Add %p (v %d, 0x%x-0x%x, ov %d) to fragtree\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +0100510 this, this->version, this->fn->ofs,
511 this->fn->ofs+this->fn->size, this->overlapped);
512
513 ret = jffs2_add_full_dnode_to_inode(c, f, this->fn);
514 if (ret) {
515 /* Free the nodes in vers_root; let the caller
516 deal with the rest */
517 JFFS2_ERROR("Add node to tree failed %d\n", ret);
518 while (1) {
519 vers_next = tn_prev(this);
520 if (check_tn_node(c, this))
521 jffs2_mark_node_obsolete(c, this->fn->raw);
522 jffs2_free_full_dnode(this->fn);
523 jffs2_free_tmp_dnode_info(this);
524 this = vers_next;
525 if (!this)
526 break;
527 eat_last(&ver_root, &vers_next->rb);
528 }
529 return ret;
530 }
531 jffs2_free_tmp_dnode_info(this);
532 }
533 this = vers_next;
534 }
535 }
536 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100537}
538
539static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
540{
541 struct rb_node *this;
542 struct jffs2_tmp_dnode_info *tn;
543
544 this = list->rb_node;
545
546 /* Now at bottom of tree */
547 while (this) {
548 if (this->rb_left)
549 this = this->rb_left;
550 else if (this->rb_right)
551 this = this->rb_right;
552 else {
553 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
554 jffs2_free_full_dnode(tn->fn);
555 jffs2_free_tmp_dnode_info(tn);
556
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100557 this = rb_parent(this);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100558 if (!this)
559 break;
560
561 if (this->rb_left == &tn->rb)
562 this->rb_left = NULL;
563 else if (this->rb_right == &tn->rb)
564 this->rb_right = NULL;
565 else BUG();
566 }
567 }
Venkatesh Pallipadibcc54e2a2010-03-15 00:34:59 -0400568 *list = RB_ROOT;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100569}
570
571static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
572{
573 struct jffs2_full_dirent *next;
574
575 while (fd) {
576 next = fd->next;
577 jffs2_free_full_dirent(fd);
578 fd = next;
579 }
580}
581
582/* Returns first valid node after 'ref'. May return 'ref' */
583static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
584{
585 while (ref && ref->next_in_ino) {
586 if (!ref_obsolete(ref))
587 return ref;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100588 dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100589 ref = ref->next_in_ino;
590 }
591 return NULL;
592}
593
594/*
595 * Helper function for jffs2_get_inode_nodes().
596 * It is called every time an directory entry node is found.
597 *
David Woodhouse14c63812007-07-03 16:51:19 -0400598 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100599 * negative error code on failure.
600 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100601static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100602 struct jffs2_raw_dirent *rd, size_t read,
603 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100604{
605 struct jffs2_full_dirent *fd;
David Woodhouse1046d882006-06-18 22:44:21 +0100606 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000607
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100608 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
609 BUG_ON(ref_obsolete(ref));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000610
David Woodhouse1046d882006-06-18 22:44:21 +0100611 crc = crc32(0, rd, sizeof(*rd) - 8);
612 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
613 JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n",
614 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100615 jffs2_mark_node_obsolete(c, ref);
616 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100617 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000618
David Woodhouse1046d882006-06-18 22:44:21 +0100619 /* If we've never checked the CRCs on this node, check them now */
620 if (ref_flags(ref) == REF_UNCHECKED) {
621 struct jffs2_eraseblock *jeb;
622 int len;
623
624 /* Sanity check */
625 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
626 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
627 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100628 jffs2_mark_node_obsolete(c, ref);
629 return 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100630 }
631
632 jeb = &c->blocks[ref->flash_offset / c->sector_size];
633 len = ref_totlen(c, jeb, ref);
634
635 spin_lock(&c->erase_completion_lock);
636 jeb->used_size += len;
637 jeb->unchecked_size -= len;
638 c->used_size += len;
639 c->unchecked_size -= len;
David Woodhouse43dfa072007-06-29 13:39:57 +0100640 ref->flash_offset = ref_offset(ref) | dirent_node_state(rd);
David Woodhouse1046d882006-06-18 22:44:21 +0100641 spin_unlock(&c->erase_completion_lock);
642 }
643
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100644 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
645 if (unlikely(!fd))
646 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100648 fd->raw = ref;
649 fd->version = je32_to_cpu(rd->version);
650 fd->ino = je32_to_cpu(rd->ino);
651 fd->type = rd->type;
652
David Woodhousedf8e96f2007-04-25 03:23:42 +0100653 if (fd->version > rii->highest_version)
654 rii->highest_version = fd->version;
655
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100656 /* Pick out the mctime of the latest dirent */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100657 if(fd->version > rii->mctime_ver && je32_to_cpu(rd->mctime)) {
658 rii->mctime_ver = fd->version;
659 rii->latest_mctime = je32_to_cpu(rd->mctime);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100660 }
661
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000662 /*
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100663 * Copy as much of the name as possible from the raw
664 * dirent we've already read from the flash.
665 */
666 if (read > sizeof(*rd))
667 memcpy(&fd->name[0], &rd->name[0],
668 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000669
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100670 /* Do we need to copy any more of the name directly from the flash? */
671 if (rd->nsize + sizeof(*rd) > read) {
672 /* FIXME: point() */
673 int err;
674 int already = read - sizeof(*rd);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000675
676 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100677 rd->nsize - already, &read, &fd->name[already]);
678 if (unlikely(read != rd->nsize - already) && likely(!err))
679 return -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000680
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100681 if (unlikely(err)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100682 JFFS2_ERROR("read remainder of name: error %d\n", err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100683 jffs2_free_full_dirent(fd);
684 return -EIO;
685 }
686 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000687
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100688 fd->nhash = full_name_hash(fd->name, rd->nsize);
689 fd->next = NULL;
690 fd->name[rd->nsize] = '\0';
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000691
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100692 /*
693 * Wheee. We now have a complete jffs2_full_dirent structure, with
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000694 * the name in it and everything. Link it into the list
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100695 */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100696 jffs2_add_fd_to_list(c, fd, &rii->fds);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100697
698 return 0;
699}
700
701/*
702 * Helper function for jffs2_get_inode_nodes().
703 * It is called every time an inode node is found.
704 *
David Woodhouse14c63812007-07-03 16:51:19 -0400705 * Returns: 0 on success (possibly after marking a bad node obsolete);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100706 * negative error code on failure.
707 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100708static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100709 struct jffs2_raw_inode *rd, int rdlen,
710 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100711{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100712 struct jffs2_tmp_dnode_info *tn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100713 uint32_t len, csize;
David Woodhouse14c63812007-07-03 16:51:19 -0400714 int ret = 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100715 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000716
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100717 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
718 BUG_ON(ref_obsolete(ref));
719
David Woodhouse1046d882006-06-18 22:44:21 +0100720 crc = crc32(0, rd, sizeof(*rd) - 8);
721 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
722 JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n",
723 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100724 jffs2_mark_node_obsolete(c, ref);
725 return 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100726 }
727
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100728 tn = jffs2_alloc_tmp_dnode_info();
729 if (!tn) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400730 JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100731 return -ENOMEM;
732 }
733
734 tn->partial_crc = 0;
735 csize = je32_to_cpu(rd->csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000736
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100737 /* If we've never checked the CRCs on this node, check them now */
738 if (ref_flags(ref) == REF_UNCHECKED) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000739
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100740 /* Sanity checks */
741 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
742 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
David Woodhouse14c63812007-07-03 16:51:19 -0400743 JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
744 jffs2_dbg_dump_node(c, ref_offset(ref));
745 jffs2_mark_node_obsolete(c, ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100746 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100747 }
748
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100749 if (jffs2_is_writebuffered(c) && csize != 0) {
750 /* At this point we are supposed to check the data CRC
751 * of our unchecked node. But thus far, we do not
752 * know whether the node is valid or obsolete. To
753 * figure this out, we need to walk all the nodes of
754 * the inode and build the inode fragtree. We don't
755 * want to spend time checking data of nodes which may
756 * later be found to be obsolete. So we put off the full
757 * data CRC checking until we have read all the inode
758 * nodes and have started building the fragtree.
759 *
760 * The fragtree is being built starting with nodes
761 * having the highest version number, so we'll be able
762 * to detect whether a node is valid (i.e., it is not
763 * overlapped by a node with higher version) or not.
764 * And we'll be able to check only those nodes, which
765 * are not obsolete.
766 *
767 * Of course, this optimization only makes sense in case
Robert P. J. Daye1b85132008-02-03 15:14:02 +0200768 * of NAND flashes (or other flashes with
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100769 * !jffs2_can_mark_obsolete()), since on NOR flashes
770 * nodes are marked obsolete physically.
771 *
772 * Since NAND flashes (or other flashes with
773 * jffs2_is_writebuffered(c)) are anyway read by
774 * fractions of c->wbuf_pagesize, and we have just read
775 * the node header, it is likely that the starting part
776 * of the node data is also read when we read the
777 * header. So we don't mind to check the CRC of the
778 * starting part of the data of the node now, and check
779 * the second part later (in jffs2_check_node_data()).
780 * Of course, we will not need to re-read and re-check
781 * the NAND page which we have just read. This is why we
782 * read the whole NAND page at jffs2_get_inode_nodes(),
783 * while we needed only the node header.
784 */
785 unsigned char *buf;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100786
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100787 /* 'buf' will point to the start of data */
788 buf = (unsigned char *)rd + sizeof(*rd);
789 /* len will be the read data length */
790 len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100791 tn->partial_crc = crc32(0, buf, len);
792
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100793 dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100794
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100795 /* If we actually calculated the whole data CRC
796 * and it is wrong, drop the node. */
Artem B. Bityutskiy3c091332005-08-04 12:40:02 +0100797 if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100798 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
799 ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
David Woodhouse14c63812007-07-03 16:51:19 -0400800 jffs2_mark_node_obsolete(c, ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100801 goto free_out;
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100802 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100803
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100804 } else if (csize == 0) {
805 /*
806 * We checked the header CRC. If the node has no data, adjust
807 * the space accounting now. For other nodes this will be done
808 * later either when the node is marked obsolete or when its
809 * data is checked.
810 */
811 struct jffs2_eraseblock *jeb;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100812
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100813 dbg_readinode("the node has no data.\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100814 jeb = &c->blocks[ref->flash_offset / c->sector_size];
815 len = ref_totlen(c, jeb, ref);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100816
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100817 spin_lock(&c->erase_completion_lock);
818 jeb->used_size += len;
819 jeb->unchecked_size -= len;
820 c->used_size += len;
821 c->unchecked_size -= len;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100822 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100823 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100824 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100825 }
826
827 tn->fn = jffs2_alloc_full_dnode();
828 if (!tn->fn) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100829 JFFS2_ERROR("alloc fn failed\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100830 ret = -ENOMEM;
831 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100832 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000833
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100834 tn->version = je32_to_cpu(rd->version);
835 tn->fn->ofs = je32_to_cpu(rd->offset);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100836 tn->data_crc = je32_to_cpu(rd->data_crc);
837 tn->csize = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100838 tn->fn->raw = ref;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100839 tn->overlapped = 0;
840
841 if (tn->version > rii->highest_version)
842 rii->highest_version = tn->version;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000843
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100844 /* There was a bug where we wrote hole nodes out with
845 csize/dsize swapped. Deal with it */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100846 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
847 tn->fn->size = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100848 else // normal case...
849 tn->fn->size = je32_to_cpu(rd->dsize);
850
David Woodhouse2c61cb22008-04-23 16:43:15 +0100851 dbg_readinode2("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
852 ref_offset(ref), je32_to_cpu(rd->version),
853 je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000854
David Woodhousedf8e96f2007-04-25 03:23:42 +0100855 ret = jffs2_add_tn_to_tree(c, rii, tn);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100856
David Woodhousedf8e96f2007-04-25 03:23:42 +0100857 if (ret) {
858 jffs2_free_full_dnode(tn->fn);
859 free_out:
860 jffs2_free_tmp_dnode_info(tn);
861 return ret;
862 }
David Woodhouse2c61cb22008-04-23 16:43:15 +0100863#ifdef JFFS2_DBG_READINODE2_MESSAGES
864 dbg_readinode2("After adding ver %d:\n", je32_to_cpu(rd->version));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100865 tn = tn_first(&rii->tn_root);
866 while (tn) {
David Woodhouse2c61cb22008-04-23 16:43:15 +0100867 dbg_readinode2("%p: v %d r 0x%x-0x%x ov %d\n",
868 tn, tn->version, tn->fn->ofs,
869 tn->fn->ofs+tn->fn->size, tn->overlapped);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100870 tn = tn_next(tn);
871 }
872#endif
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100873 return 0;
874}
875
876/*
877 * Helper function for jffs2_get_inode_nodes().
878 * It is called every time an unknown node is found.
879 *
David Woodhouse3877f0b2006-06-18 00:05:26 +0100880 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100881 * negative error code on failure.
882 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100883static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100884{
885 /* We don't mark unknown nodes as REF_UNCHECKED */
David Woodhousec7258a42007-03-09 11:44:00 +0000886 if (ref_flags(ref) == REF_UNCHECKED) {
887 JFFS2_ERROR("REF_UNCHECKED but unknown node at %#08x\n",
888 ref_offset(ref));
889 JFFS2_ERROR("Node is {%04x,%04x,%08x,%08x}. Please report this error.\n",
David Woodhouseef53cb02007-07-10 10:01:22 +0100890 je16_to_cpu(un->magic), je16_to_cpu(un->nodetype),
891 je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100892 jffs2_mark_node_obsolete(c, ref);
893 return 0;
David Woodhousec7258a42007-03-09 11:44:00 +0000894 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000895
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100896 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
897
David Woodhouse3877f0b2006-06-18 00:05:26 +0100898 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
899
900 case JFFS2_FEATURE_INCOMPAT:
901 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
902 je16_to_cpu(un->nodetype), ref_offset(ref));
903 /* EEP */
904 BUG();
905 break;
906
907 case JFFS2_FEATURE_ROCOMPAT:
908 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
909 je16_to_cpu(un->nodetype), ref_offset(ref));
910 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
911 break;
912
913 case JFFS2_FEATURE_RWCOMPAT_COPY:
914 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
915 je16_to_cpu(un->nodetype), ref_offset(ref));
916 break;
917
918 case JFFS2_FEATURE_RWCOMPAT_DELETE:
919 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
920 je16_to_cpu(un->nodetype), ref_offset(ref));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100921 jffs2_mark_node_obsolete(c, ref);
922 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100923 }
924
925 return 0;
926}
927
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100928/*
929 * Helper function for jffs2_get_inode_nodes().
930 * The function detects whether more data should be read and reads it if yes.
931 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200932 * Returns: 0 on success;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100933 * negative error code on failure.
934 */
935static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300936 int needed_len, int *rdlen, unsigned char *buf)
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100937{
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300938 int err, to_read = needed_len - *rdlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100939 size_t retlen;
940 uint32_t offs;
941
942 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300943 int rem = to_read % c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100944
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300945 if (rem)
946 to_read += c->wbuf_pagesize - rem;
947 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100948
949 /* We need to read more data */
950 offs = ref_offset(ref) + *rdlen;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000951
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300952 dbg_readinode("read more %d bytes\n", to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100953
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300954 err = jffs2_flash_read(c, offs, to_read, &retlen, buf + *rdlen);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100955 if (err) {
956 JFFS2_ERROR("can not read %d bytes from 0x%08x, "
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300957 "error code: %d.\n", to_read, offs, err);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100958 return err;
959 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000960
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300961 if (retlen < to_read) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400962 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n",
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300963 offs, retlen, to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100964 return -EIO;
965 }
966
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300967 *rdlen += to_read;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100968 return 0;
969}
970
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100971/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
David Woodhousedf8e96f2007-04-25 03:23:42 +0100972 with this ino. Perform a preliminary ordering on data nodes, throwing away
973 those which are completely obsoleted by newer ones. The naïve approach we
974 use to take of just returning them _all_ in version order will cause us to
975 run out of memory in certain degenerate cases. */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100976static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100977 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100978{
979 struct jffs2_raw_node_ref *ref, *valid_ref;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100980 unsigned char *buf = NULL;
981 union jffs2_node_union *node;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100982 size_t retlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100983 int len, err;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100984
David Woodhousedf8e96f2007-04-25 03:23:42 +0100985 rii->mctime_ver = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000986
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100987 dbg_readinode("ino #%u\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100988
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100989 /* FIXME: in case of NOR and available ->point() this
990 * needs to be fixed. */
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300991 len = sizeof(union jffs2_node_union) + c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100992 buf = kmalloc(len, GFP_KERNEL);
993 if (!buf)
994 return -ENOMEM;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000995
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100996 spin_lock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100997 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100998 if (!valid_ref && f->inocache->ino != 1)
999 JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001000 while (valid_ref) {
1001 /* We can hold a pointer to a non-obsolete node without the spinlock,
1002 but _obsolete_ nodes may disappear at any time, if the block
1003 they're in gets erased. So if we mark 'ref' obsolete while we're
1004 not holding the lock, it can go away immediately. For that reason,
1005 we find the next valid node first, before processing 'ref'.
1006 */
1007 ref = valid_ref;
1008 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
1009 spin_unlock(&c->erase_completion_lock);
1010
1011 cond_resched();
1012
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001013 /*
1014 * At this point we don't know the type of the node we're going
1015 * to read, so we do not know the size of its header. In order
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001016 * to minimize the amount of flash IO we assume the header is
1017 * of size = JFFS2_MIN_NODE_HEADER.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001018 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001019 len = JFFS2_MIN_NODE_HEADER;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001020 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001021 int end, rem;
1022
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001023 /*
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001024 * We are about to read JFFS2_MIN_NODE_HEADER bytes,
1025 * but this flash has some minimal I/O unit. It is
1026 * possible that we'll need to read more soon, so read
1027 * up to the next min. I/O unit, in order not to
1028 * re-read the same min. I/O unit twice.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001029 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001030 end = ref_offset(ref) + len;
1031 rem = end % c->wbuf_pagesize;
1032 if (rem)
1033 end += c->wbuf_pagesize - rem;
1034 len = end - ref_offset(ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001035 }
1036
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001037 dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001038
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001039 /* FIXME: point() */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001040 err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001041 if (err) {
Jean Delvaredf2e3012011-07-16 18:10:35 +02001042 JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ref_offset(ref), err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001043 goto free_out;
1044 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001045
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001046 if (retlen < len) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -04001047 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001048 err = -EIO;
1049 goto free_out;
1050 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001051
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001052 node = (union jffs2_node_union *)buf;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001053
David Woodhouse3877f0b2006-06-18 00:05:26 +01001054 /* No need to mask in the valid bit; it shouldn't be invalid */
1055 if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) {
1056 JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n",
1057 ref_offset(ref), je16_to_cpu(node->u.magic),
1058 je16_to_cpu(node->u.nodetype),
1059 je32_to_cpu(node->u.totlen),
1060 je32_to_cpu(node->u.hdr_crc));
1061 jffs2_dbg_dump_node(c, ref_offset(ref));
1062 jffs2_mark_node_obsolete(c, ref);
1063 goto cont;
1064 }
Joakim Tjernlund0dec4c82007-03-10 17:08:44 +01001065 if (je16_to_cpu(node->u.magic) != JFFS2_MAGIC_BITMASK) {
1066 /* Not a JFFS2 node, whinge and move on */
1067 JFFS2_NOTICE("Wrong magic bitmask 0x%04x in node header at %#08x.\n",
1068 je16_to_cpu(node->u.magic), ref_offset(ref));
David Woodhousec7258a42007-03-09 11:44:00 +00001069 jffs2_mark_node_obsolete(c, ref);
1070 goto cont;
1071 }
David Woodhouse3877f0b2006-06-18 00:05:26 +01001072
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001073 switch (je16_to_cpu(node->u.nodetype)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001074
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001075 case JFFS2_NODETYPE_DIRENT:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001076
Artem Bityutskiyea55d302007-05-30 12:08:14 +03001077 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent) &&
1078 len < sizeof(struct jffs2_raw_dirent)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001079 err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001080 if (unlikely(err))
1081 goto free_out;
1082 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001083
David Woodhousedf8e96f2007-04-25 03:23:42 +01001084 err = read_direntry(c, ref, &node->d, retlen, rii);
1085 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001086 goto free_out;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001087
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001088 break;
1089
1090 case JFFS2_NODETYPE_INODE:
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001091
Artem Bityutskiyea55d302007-05-30 12:08:14 +03001092 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode) &&
1093 len < sizeof(struct jffs2_raw_inode)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001094 err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001095 if (unlikely(err))
1096 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001097 }
1098
David Woodhousedf8e96f2007-04-25 03:23:42 +01001099 err = read_dnode(c, ref, &node->i, len, rii);
1100 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001101 goto free_out;
1102
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 break;
1104
1105 default:
Artem Bityutskiyea55d302007-05-30 12:08:14 +03001106 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node) &&
1107 len < sizeof(struct jffs2_unknown_node)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001108 err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001109 if (unlikely(err))
1110 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001111 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001112
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001113 err = read_unknown(c, ref, &node->u);
David Woodhouse14c63812007-07-03 16:51:19 -04001114 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001115 goto free_out;
1116
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 }
David Woodhouse3877f0b2006-06-18 00:05:26 +01001118 cont:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001119 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001121
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001122 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001123 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
David Woodhousedf8e96f2007-04-25 03:23:42 +01001125 f->highest_version = rii->highest_version;
1126
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001127 dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +01001128 f->inocache->ino, rii->highest_version, rii->latest_mctime,
1129 rii->mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001130 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001132 free_out:
David Woodhousedf8e96f2007-04-25 03:23:42 +01001133 jffs2_free_tmp_dnode_info_list(&rii->tn_root);
1134 jffs2_free_full_dirent_list(rii->fds);
1135 rii->fds = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001136 kfree(buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001137 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138}
1139
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001140static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 struct jffs2_inode_info *f,
1142 struct jffs2_raw_inode *latest_node)
1143{
David Woodhousedf8e96f2007-04-25 03:23:42 +01001144 struct jffs2_readinode_info rii;
David Woodhouse61c4b232007-04-25 17:04:23 +01001145 uint32_t crc, new_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 size_t retlen;
1147 int ret;
1148
David Woodhouse27c72b02008-05-01 18:47:17 +01001149 dbg_readinode("ino #%u pino/nlink is %d\n", f->inocache->ino,
1150 f->inocache->pino_nlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151
David Woodhousedf8e96f2007-04-25 03:23:42 +01001152 memset(&rii, 0, sizeof(rii));
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 /* Grab all nodes relevant to this ino */
David Woodhousedf8e96f2007-04-25 03:23:42 +01001155 ret = jffs2_get_inode_nodes(c, f, &rii);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
1157 if (ret) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001158 JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 if (f->inocache->state == INO_STATE_READING)
1160 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1161 return ret;
1162 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
David Woodhousedf8e96f2007-04-25 03:23:42 +01001164 ret = jffs2_build_inode_fragtree(c, f, &rii);
1165 if (ret) {
1166 JFFS2_ERROR("Failed to build final fragtree for inode #%u: error %d\n",
1167 f->inocache->ino, ret);
1168 if (f->inocache->state == INO_STATE_READING)
1169 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1170 jffs2_free_tmp_dnode_info_list(&rii.tn_root);
1171 /* FIXME: We could at least crc-check them all */
1172 if (rii.mdata_tn) {
1173 jffs2_free_full_dnode(rii.mdata_tn->fn);
1174 jffs2_free_tmp_dnode_info(rii.mdata_tn);
1175 rii.mdata_tn = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001176 }
David Woodhousedf8e96f2007-04-25 03:23:42 +01001177 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 }
David Woodhousedf8e96f2007-04-25 03:23:42 +01001179
1180 if (rii.mdata_tn) {
1181 if (rii.mdata_tn->fn->raw == rii.latest_ref) {
1182 f->metadata = rii.mdata_tn->fn;
1183 jffs2_free_tmp_dnode_info(rii.mdata_tn);
1184 } else {
1185 jffs2_kill_tn(c, rii.mdata_tn);
1186 }
1187 rii.mdata_tn = NULL;
1188 }
1189
1190 f->dents = rii.fds;
1191
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +01001192 jffs2_dbg_fragtree_paranoia_check_nolock(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
David Woodhousedf8e96f2007-04-25 03:23:42 +01001194 if (unlikely(!rii.latest_ref)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 /* No data nodes for this inode. */
1196 if (f->inocache->ino != 1) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001197 JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
David Woodhousedf8e96f2007-04-25 03:23:42 +01001198 if (!rii.fds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 if (f->inocache->state == INO_STATE_READING)
1200 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1201 return -EIO;
1202 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001203 JFFS2_NOTICE("but it has children so we fake some modes for it\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 }
1205 latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
1206 latest_node->version = cpu_to_je32(0);
1207 latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
1208 latest_node->isize = cpu_to_je32(0);
1209 latest_node->gid = cpu_to_je16(0);
1210 latest_node->uid = cpu_to_je16(0);
1211 if (f->inocache->state == INO_STATE_READING)
1212 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
1213 return 0;
1214 }
1215
David Woodhousedf8e96f2007-04-25 03:23:42 +01001216 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref), sizeof(*latest_node), &retlen, (void *)latest_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 if (ret || retlen != sizeof(*latest_node)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001218 JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
1219 ret, retlen, sizeof(*latest_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 /* FIXME: If this fails, there seems to be a memory leak. Find it. */
David Woodhouseced22072008-04-22 15:13:40 +01001221 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 jffs2_do_clear_inode(c, f);
1223 return ret?ret:-EIO;
1224 }
1225
1226 crc = crc32(0, latest_node, sizeof(*latest_node)-8);
1227 if (crc != je32_to_cpu(latest_node->node_crc)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001228 JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +01001229 f->inocache->ino, ref_offset(rii.latest_ref));
David Woodhouseced22072008-04-22 15:13:40 +01001230 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 jffs2_do_clear_inode(c, f);
1232 return -EIO;
1233 }
1234
1235 switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
1236 case S_IFDIR:
David Woodhousedf8e96f2007-04-25 03:23:42 +01001237 if (rii.mctime_ver > je32_to_cpu(latest_node->version)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 /* The times in the latest_node are actually older than
1239 mctime in the latest dirent. Cheat. */
David Woodhousedf8e96f2007-04-25 03:23:42 +01001240 latest_node->ctime = latest_node->mtime = cpu_to_je32(rii.latest_mctime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 }
1242 break;
1243
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001244
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 case S_IFREG:
1246 /* If it was a regular file, truncate it to the latest node's isize */
David Woodhouse61c4b232007-04-25 17:04:23 +01001247 new_size = jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
1248 if (new_size != je32_to_cpu(latest_node->isize)) {
1249 JFFS2_WARNING("Truncating ino #%u to %d bytes failed because it only had %d bytes to start with!\n",
1250 f->inocache->ino, je32_to_cpu(latest_node->isize), new_size);
1251 latest_node->isize = cpu_to_je32(new_size);
1252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 break;
1254
1255 case S_IFLNK:
1256 /* Hack to work around broken isize in old symlink code.
1257 Remove this when dwmw2 comes to his senses and stops
1258 symlinks from being an entirely gratuitous special
1259 case. */
1260 if (!je32_to_cpu(latest_node->isize))
1261 latest_node->isize = latest_node->dsize;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001262
1263 if (f->inocache->state != INO_STATE_CHECKING) {
1264 /* Symlink's inode data is the target path. Read it and
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001265 * keep in RAM to facilitate quick follow symlink
1266 * operation. */
1267 f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
1268 if (!f->target) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001269 JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
David Woodhouseced22072008-04-22 15:13:40 +01001270 mutex_unlock(&f->sem);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001271 jffs2_do_clear_inode(c, f);
1272 return -ENOMEM;
1273 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001274
David Woodhousedf8e96f2007-04-25 03:23:42 +01001275 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node),
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001276 je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001277
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001278 if (ret || retlen != je32_to_cpu(latest_node->csize)) {
1279 if (retlen != je32_to_cpu(latest_node->csize))
1280 ret = -EIO;
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001281 kfree(f->target);
1282 f->target = NULL;
David Woodhouseced22072008-04-22 15:13:40 +01001283 mutex_unlock(&f->sem);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001284 jffs2_do_clear_inode(c, f);
Roel Kluine670e412009-11-12 18:09:48 +01001285 return ret;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001286 }
1287
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001288 f->target[je32_to_cpu(latest_node->csize)] = '\0';
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001289 dbg_readinode("symlink's target '%s' cached\n", f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001290 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001291
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 /* fall through... */
1293
1294 case S_IFBLK:
1295 case S_IFCHR:
1296 /* Certain inode types should have only one data node, and it's
1297 kept as the metadata node */
1298 if (f->metadata) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001299 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 f->inocache->ino, jemode_to_cpu(latest_node->mode));
David Woodhouseced22072008-04-22 15:13:40 +01001301 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 jffs2_do_clear_inode(c, f);
1303 return -EIO;
1304 }
1305 if (!frag_first(&f->fragtree)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001306 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 f->inocache->ino, jemode_to_cpu(latest_node->mode));
David Woodhouseced22072008-04-22 15:13:40 +01001308 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 jffs2_do_clear_inode(c, f);
1310 return -EIO;
1311 }
1312 /* ASSERT: f->fraglist != NULL */
1313 if (frag_next(frag_first(&f->fragtree))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001314 JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 f->inocache->ino, jemode_to_cpu(latest_node->mode));
1316 /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
David Woodhouseced22072008-04-22 15:13:40 +01001317 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 jffs2_do_clear_inode(c, f);
1319 return -EIO;
1320 }
1321 /* OK. We're happy */
1322 f->metadata = frag_first(&f->fragtree)->node;
1323 jffs2_free_node_frag(frag_first(&f->fragtree));
1324 f->fragtree = RB_ROOT;
1325 break;
1326 }
1327 if (f->inocache->state == INO_STATE_READING)
1328 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
1329
1330 return 0;
1331}
1332
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001333/* Scan the list of all nodes present for this ino, build map of versions, etc. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001334int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001335 uint32_t ino, struct jffs2_raw_inode *latest_node)
1336{
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001337 dbg_readinode("read inode #%u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001338
1339 retry_inocache:
1340 spin_lock(&c->inocache_lock);
1341 f->inocache = jffs2_get_ino_cache(c, ino);
1342
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001343 if (f->inocache) {
1344 /* Check its state. We may need to wait before we can use it */
1345 switch(f->inocache->state) {
1346 case INO_STATE_UNCHECKED:
1347 case INO_STATE_CHECKEDABSENT:
1348 f->inocache->state = INO_STATE_READING;
1349 break;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001350
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001351 case INO_STATE_CHECKING:
1352 case INO_STATE_GC:
1353 /* If it's in either of these states, we need
1354 to wait for whoever's got it to finish and
1355 put it back. */
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001356 dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001357 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
1358 goto retry_inocache;
1359
1360 case INO_STATE_READING:
1361 case INO_STATE_PRESENT:
1362 /* Eep. This should never happen. It can
1363 happen if Linux calls read_inode() again
1364 before clear_inode() has finished though. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001365 JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001366 /* Fail. That's probably better than allowing it to succeed */
1367 f->inocache = NULL;
1368 break;
1369
1370 default:
1371 BUG();
1372 }
1373 }
1374 spin_unlock(&c->inocache_lock);
1375
1376 if (!f->inocache && ino == 1) {
1377 /* Special case - no root inode on medium */
1378 f->inocache = jffs2_alloc_inode_cache();
1379 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001380 JFFS2_ERROR("cannot allocate inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001381 return -ENOMEM;
1382 }
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001383 dbg_readinode("creating inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001384 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
David Woodhouse27c72b02008-05-01 18:47:17 +01001385 f->inocache->ino = f->inocache->pino_nlink = 1;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001386 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
1387 f->inocache->state = INO_STATE_READING;
1388 jffs2_add_ino_cache(c, f->inocache);
1389 }
1390 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001391 JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001392 return -ENOENT;
1393 }
1394
1395 return jffs2_do_read_inode_internal(c, f, latest_node);
1396}
1397
1398int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
1399{
1400 struct jffs2_raw_inode n;
Yan Burman3d375d92006-12-04 15:03:01 -08001401 struct jffs2_inode_info *f = kzalloc(sizeof(*f), GFP_KERNEL);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001402 int ret;
1403
1404 if (!f)
1405 return -ENOMEM;
1406
David Woodhouseced22072008-04-22 15:13:40 +01001407 mutex_init(&f->sem);
1408 mutex_lock(&f->sem);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001409 f->inocache = ic;
1410
1411 ret = jffs2_do_read_inode_internal(c, f, &n);
1412 if (!ret) {
David Woodhouseced22072008-04-22 15:13:40 +01001413 mutex_unlock(&f->sem);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001414 jffs2_do_clear_inode(c, f);
1415 }
1416 kfree (f);
1417 return ret;
1418}
1419
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
1421{
1422 struct jffs2_full_dirent *fd, *fds;
1423 int deleted;
1424
KaiGai Kohei355ed4e2006-06-24 09:15:36 +09001425 jffs2_xattr_delete_inode(c, f->inocache);
David Woodhouseced22072008-04-22 15:13:40 +01001426 mutex_lock(&f->sem);
David Woodhouse27c72b02008-05-01 18:47:17 +01001427 deleted = f->inocache && !f->inocache->pino_nlink;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428
David Woodhouse67e345d2005-02-27 23:01:36 +00001429 if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
1430 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
1431
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 if (f->metadata) {
1433 if (deleted)
1434 jffs2_mark_node_obsolete(c, f->metadata->raw);
1435 jffs2_free_full_dnode(f->metadata);
1436 }
1437
1438 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
1439
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001440 if (f->target) {
1441 kfree(f->target);
1442 f->target = NULL;
1443 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001444
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001445 fds = f->dents;
1446 while(fds) {
1447 fd = fds;
1448 fds = fd->next;
1449 jffs2_free_full_dirent(fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 }
1451
David Woodhouse67e345d2005-02-27 23:01:36 +00001452 if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
David Woodhouse67e345d2005-02-27 23:01:36 +00001454 if (f->inocache->nodes == (void *)f->inocache)
1455 jffs2_del_ino_cache(c, f->inocache);
1456 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457
David Woodhouseced22072008-04-22 15:13:40 +01001458 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459}