blob: b0645ac7769ab3bb6aee7b94f9fccb66440c04a5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
David Woodhousec00c3102007-04-25 14:16:47 +01004 * Copyright © 2001-2007 Red Hat, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11
12#include <linux/kernel.h>
Andrew Lunn737b7662005-07-30 16:29:30 +010013#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/slab.h>
15#include <linux/fs.h>
16#include <linux/crc32.h>
17#include <linux/pagemap.h>
18#include <linux/mtd/mtd.h>
19#include <linux/compiler.h>
20#include "nodelist.h"
21
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010022/*
David Woodhousedf8e96f2007-04-25 03:23:42 +010023 * Check the data CRC of the node.
24 *
25 * Returns: 0 if the data CRC is correct;
26 * 1 - if incorrect;
27 * error code if an error occured.
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010028 */
David Woodhousedf8e96f2007-04-25 03:23:42 +010029static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
Linus Torvalds1da177e2005-04-16 15:20:36 -070030{
David Woodhousedf8e96f2007-04-25 03:23:42 +010031 struct jffs2_raw_node_ref *ref = tn->fn->raw;
32 int err = 0, pointed = 0;
33 struct jffs2_eraseblock *jeb;
34 unsigned char *buffer;
35 uint32_t crc, ofs, len;
36 size_t retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
David Woodhousedf8e96f2007-04-25 03:23:42 +010038 BUG_ON(tn->csize == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
David Woodhousedf8e96f2007-04-25 03:23:42 +010040 if (!jffs2_is_writebuffered(c))
41 goto adj_acc;
42
43 /* Calculate how many bytes were already checked */
44 ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode);
45 len = ofs % c->wbuf_pagesize;
46 if (likely(len))
47 len = c->wbuf_pagesize - len;
48
49 if (len >= tn->csize) {
50 dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
51 ref_offset(ref), tn->csize, ofs);
52 goto adj_acc;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010053 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
David Woodhousedf8e96f2007-04-25 03:23:42 +010055 ofs += len;
56 len = tn->csize - len;
57
58 dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n",
59 ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len);
60
61#ifndef __ECOS
62 /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(),
63 * adding and jffs2_flash_read_end() interface. */
64 if (c->mtd->point) {
65 err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
66 if (!err && retlen < tn->csize) {
67 JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
68 c->mtd->unpoint(c->mtd, buffer, ofs, len);
69 } else if (err)
70 JFFS2_WARNING("MTD point failed: error code %d.\n", err);
71 else
72 pointed = 1; /* succefully pointed to device */
73 }
74#endif
75
76 if (!pointed) {
77 buffer = kmalloc(len, GFP_KERNEL);
78 if (unlikely(!buffer))
79 return -ENOMEM;
80
81 /* TODO: this is very frequent pattern, make it a separate
82 * routine */
83 err = jffs2_flash_read(c, ofs, len, &retlen, buffer);
84 if (err) {
85 JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err);
86 goto free_out;
87 }
88
89 if (retlen != len) {
90 JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len);
91 err = -EIO;
92 goto free_out;
93 }
94 }
95
96 /* Continue calculating CRC */
97 crc = crc32(tn->partial_crc, buffer, len);
98 if(!pointed)
99 kfree(buffer);
100#ifndef __ECOS
101 else
102 c->mtd->unpoint(c->mtd, buffer, ofs, len);
103#endif
104
105 if (crc != tn->data_crc) {
106 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
107 ofs, tn->data_crc, crc);
108 return 1;
109 }
110
111adj_acc:
112 jeb = &c->blocks[ref->flash_offset / c->sector_size];
113 len = ref_totlen(c, jeb, ref);
114 /* If it should be REF_NORMAL, it'll get marked as such when
115 we build the fragtree, shortly. No need to worry about GC
116 moving it while it's marked REF_PRISTINE -- GC won't happen
117 till we've finished checking every inode anyway. */
118 ref->flash_offset |= REF_PRISTINE;
119 /*
120 * Mark the node as having been checked and fix the
121 * accounting accordingly.
122 */
123 spin_lock(&c->erase_completion_lock);
124 jeb->used_size += len;
125 jeb->unchecked_size -= len;
126 c->used_size += len;
127 c->unchecked_size -= len;
128 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
129 spin_unlock(&c->erase_completion_lock);
130
131 return 0;
132
133free_out:
134 if(!pointed)
135 kfree(buffer);
136#ifndef __ECOS
137 else
138 c->mtd->unpoint(c->mtd, buffer, ofs, len);
139#endif
140 return err;
141}
142
143/*
144 * Helper function for jffs2_add_older_frag_to_fragtree().
145 *
146 * Checks the node if we are in the checking stage.
147 */
148static int check_tn_node(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
149{
150 int ret;
151
152 BUG_ON(ref_obsolete(tn->fn->raw));
153
154 /* We only check the data CRC of unchecked nodes */
155 if (ref_flags(tn->fn->raw) != REF_UNCHECKED)
156 return 0;
157
158 dbg_readinode("check node %#04x-%#04x, phys offs %#08x\n",
159 tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw));
160
161 ret = check_node_data(c, tn);
162 if (unlikely(ret < 0)) {
163 JFFS2_ERROR("check_node_data() returned error: %d.\n",
164 ret);
165 } else if (unlikely(ret > 0)) {
166 dbg_readinode("CRC error, mark it obsolete.\n");
167 jffs2_mark_node_obsolete(c, tn->fn->raw);
168 }
169
170 return ret;
171}
172
173static struct jffs2_tmp_dnode_info *jffs2_lookup_tn(struct rb_root *tn_root, uint32_t offset)
174{
175 struct rb_node *next;
176 struct jffs2_tmp_dnode_info *tn = NULL;
177
178 dbg_readinode("root %p, offset %d\n", tn_root, offset);
179
180 next = tn_root->rb_node;
181
182 while (next) {
183 tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb);
184
185 if (tn->fn->ofs < offset)
186 next = tn->rb.rb_right;
187 else if (tn->fn->ofs >= offset)
188 next = tn->rb.rb_left;
189 else
190 break;
191 }
192
193 return tn;
194}
195
196
197static void jffs2_kill_tn(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
198{
199 jffs2_mark_node_obsolete(c, tn->fn->raw);
200 jffs2_free_full_dnode(tn->fn);
201 jffs2_free_tmp_dnode_info(tn);
202}
203/*
204 * This function is used when we read an inode. Data nodes arrive in
205 * arbitrary order -- they may be older or newer than the nodes which
206 * are already in the tree. Where overlaps occur, the older node can
207 * be discarded as long as the newer passes the CRC check. We don't
208 * bother to keep track of holes in this rbtree, and neither do we deal
209 * with frags -- we can have multiple entries starting at the same
210 * offset, and the one with the smallest length will come first in the
211 * ordering.
212 *
213 * Returns 0 if the node was inserted
214 * 1 if the node is obsolete (because we can't mark it so yet)
215 * < 0 an if error occurred
216 */
217static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
218 struct jffs2_readinode_info *rii,
219 struct jffs2_tmp_dnode_info *tn)
220{
221 uint32_t fn_end = tn->fn->ofs + tn->fn->size;
222 struct jffs2_tmp_dnode_info *insert_point = NULL, *this;
223
224 dbg_readinode("insert fragment %#04x-%#04x, ver %u\n", tn->fn->ofs, fn_end, tn->version);
225
226 /* If a node has zero dsize, we only have to keep if it if it might be the
227 node with highest version -- i.e. the one which will end up as f->metadata.
228 Note that such nodes won't be REF_UNCHECKED since there are no data to
229 check anyway. */
230 if (!tn->fn->size) {
231 if (rii->mdata_tn) {
232 /* We had a candidate mdata node already */
233 dbg_readinode("kill old mdata with ver %d\n", rii->mdata_tn->version);
234 jffs2_kill_tn(c, rii->mdata_tn);
235 }
236 rii->mdata_tn = tn;
237 dbg_readinode("keep new mdata with ver %d\n", tn->version);
238 return 0;
239 }
240
241 /* Find the earliest node which _may_ be relevant to this one */
242 this = jffs2_lookup_tn(&rii->tn_root, tn->fn->ofs);
243 if (!this) {
244 /* First addition to empty tree. $DEITY how I love the easy cases */
245 rb_link_node(&tn->rb, NULL, &rii->tn_root.rb_node);
246 rb_insert_color(&tn->rb, &rii->tn_root);
247 dbg_readinode("keep new frag\n");
248 return 0;
249 }
250
251 /* If we add a new node it'll be somewhere under here. */
252 insert_point = this;
253
254 /* If the node is coincident with another at a lower address,
255 back up until the other node is found. It may be relevant */
256 while (tn->overlapped)
257 tn = tn_prev(tn);
258
259 dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole");
260
261 while (this) {
262 if (this->fn->ofs > fn_end)
263 break;
264 dbg_readinode("Ponder this ver %d, 0x%x-0x%x\n",
265 this->version, this->fn->ofs, this->fn->size);
266
267 if (this->version == tn->version) {
268 /* Version number collision means REF_PRISTINE GC. Accept either of them
269 as long as the CRC is correct. Check the one we have already... */
270 if (!check_tn_node(c, this)) {
271 /* The one we already had was OK. Keep it and throw away the new one */
272 dbg_readinode("Like old node. Throw away new\n");
273 jffs2_kill_tn(c, tn);
274 return 0;
275 } else {
276 /* Who cares if the new one is good; keep it for now anyway. */
277 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root);
278 /* Same overlapping from in front and behind */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100279 jffs2_kill_tn(c, this);
280 dbg_readinode("Like new node. Throw away old\n");
David Woodhouse1123e2a2007-05-05 16:29:34 +0100281 goto calc_overlaps;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100282 }
283 }
284 if (this->version < tn->version &&
285 this->fn->ofs >= tn->fn->ofs &&
286 this->fn->ofs + this->fn->size <= fn_end) {
287 /* New node entirely overlaps 'this' */
288 if (check_tn_node(c, tn)) {
289 dbg_readinode("new node bad CRC\n");
290 jffs2_kill_tn(c, tn);
291 return 0;
292 }
293 /* ... and is good. Kill 'this'... */
294 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100295 jffs2_kill_tn(c, this);
296 /* ... and any subsequent nodes which are also overlapped */
297 this = tn_next(tn);
298 while (this && this->fn->ofs + this->fn->size < fn_end) {
299 struct jffs2_tmp_dnode_info *next = tn_next(this);
300 if (this->version < tn->version) {
301 tn_erase(this, &rii->tn_root);
302 dbg_readinode("Kill overlapped ver %d, 0x%x-0x%x\n",
303 this->version, this->fn->ofs,
304 this->fn->ofs+this->fn->size);
305 jffs2_kill_tn(c, this);
306 }
307 this = next;
308 }
309 dbg_readinode("Done inserting new\n");
David Woodhouse1123e2a2007-05-05 16:29:34 +0100310 goto calc_overlaps;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100311 }
312 if (this->version > tn->version &&
313 this->fn->ofs <= tn->fn->ofs &&
314 this->fn->ofs+this->fn->size >= fn_end) {
315 /* New node entirely overlapped by 'this' */
316 if (!check_tn_node(c, this)) {
317 dbg_readinode("Good CRC on old node. Kill new\n");
318 jffs2_kill_tn(c, tn);
319 return 0;
320 }
321 /* ... but 'this' was bad. Replace it... */
David Woodhouse1123e2a2007-05-05 16:29:34 +0100322 tn->overlapped = this->overlapped;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100323 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root);
324 dbg_readinode("Bad CRC on old overlapping node. Kill it\n");
325 jffs2_kill_tn(c, this);
326 return 0;
327 }
328 /* We want to be inserted under the last node which is
329 either at a lower offset _or_ has a smaller range */
330 if (this->fn->ofs < tn->fn->ofs ||
331 (this->fn->ofs == tn->fn->ofs &&
332 this->fn->size <= tn->fn->size))
333 insert_point = this;
334
335 this = tn_next(this);
336 }
337 dbg_readinode("insert_point %p, ver %d, 0x%x-0x%x, ov %d\n",
338 insert_point, insert_point->version, insert_point->fn->ofs,
339 insert_point->fn->ofs+insert_point->fn->size,
340 insert_point->overlapped);
341 /* We neither completely obsoleted nor were completely
342 obsoleted by an earlier node. Insert under insert_point */
343 {
344 struct rb_node *parent = &insert_point->rb;
345 struct rb_node **link = &parent;
346
347 while (*link) {
348 parent = *link;
349 insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
350 if (tn->fn->ofs > insert_point->fn->ofs)
351 link = &insert_point->rb.rb_right;
352 else if (tn->fn->ofs < insert_point->fn->ofs ||
353 tn->fn->size < insert_point->fn->size)
354 link = &insert_point->rb.rb_left;
355 else
356 link = &insert_point->rb.rb_right;
357 }
358 rb_link_node(&tn->rb, &insert_point->rb, link);
359 rb_insert_color(&tn->rb, &rii->tn_root);
360 }
David Woodhouse1123e2a2007-05-05 16:29:34 +0100361
362 calc_overlaps:
David Woodhousedf8e96f2007-04-25 03:23:42 +0100363 /* If there's anything behind that overlaps us, note it */
364 this = tn_prev(tn);
365 if (this) {
366 while (1) {
367 if (this->fn->ofs + this->fn->size > tn->fn->ofs) {
368 dbg_readinode("Node is overlapped by %p (v %d, 0x%x-0x%x)\n",
369 this, this->version, this->fn->ofs,
370 this->fn->ofs+this->fn->size);
371 tn->overlapped = 1;
372 break;
373 }
374 if (!this->overlapped)
375 break;
376 this = tn_prev(this);
377 }
378 }
379
380 /* If the new node overlaps anything ahead, note it */
381 this = tn_next(tn);
382 while (this && this->fn->ofs < fn_end) {
383 this->overlapped = 1;
384 dbg_readinode("Node ver %d, 0x%x-0x%x is overlapped\n",
385 this->version, this->fn->ofs,
386 this->fn->ofs+this->fn->size);
387 this = tn_next(this);
388 }
389 return 0;
390}
391
392/* Trivial function to remove the last node in the tree. Which by definition
393 has no right-hand -- so can be removed just by making its only child (if
394 any) take its place under its parent. */
395static void eat_last(struct rb_root *root, struct rb_node *node)
396{
397 struct rb_node *parent = rb_parent(node);
398 struct rb_node **link;
399
400 /* LAST! */
401 BUG_ON(node->rb_right);
402
403 if (!parent)
404 link = &root->rb_node;
405 else if (node == parent->rb_left)
406 link = &parent->rb_left;
407 else
408 link = &parent->rb_right;
409
410 *link = node->rb_left;
411 /* Colour doesn't matter now. Only the parent pointer. */
412 if (node->rb_left)
413 node->rb_left->rb_parent_color = node->rb_parent_color;
414}
415
416/* We put this in reverse order, so we can just use eat_last */
417static void ver_insert(struct rb_root *ver_root, struct jffs2_tmp_dnode_info *tn)
418{
419 struct rb_node **link = &ver_root->rb_node;
420 struct rb_node *parent = NULL;
421 struct jffs2_tmp_dnode_info *this_tn;
422
423 while (*link) {
424 parent = *link;
425 this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
426
427 if (tn->version > this_tn->version)
428 link = &parent->rb_left;
429 else
430 link = &parent->rb_right;
431 }
432 dbg_readinode("Link new node at %p (root is %p)\n", link, ver_root);
433 rb_link_node(&tn->rb, parent, link);
434 rb_insert_color(&tn->rb, ver_root);
435}
436
437/* Build final, normal fragtree from tn tree. It doesn't matter which order
438 we add nodes to the real fragtree, as long as they don't overlap. And
439 having thrown away the majority of overlapped nodes as we went, there
440 really shouldn't be many sets of nodes which do overlap. If we start at
441 the end, we can use the overlap markers -- we can just eat nodes which
442 aren't overlapped, and when we encounter nodes which _do_ overlap we
443 sort them all into a temporary tree in version order before replaying them. */
444static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c,
445 struct jffs2_inode_info *f,
446 struct jffs2_readinode_info *rii)
447{
448 struct jffs2_tmp_dnode_info *pen, *last, *this;
449 struct rb_root ver_root = RB_ROOT;
450 uint32_t high_ver = 0;
451
452 if (rii->mdata_tn) {
453 dbg_readinode("potential mdata is ver %d at %p\n", rii->mdata_tn->version, rii->mdata_tn);
454 high_ver = rii->mdata_tn->version;
455 rii->latest_ref = rii->mdata_tn->fn->raw;
456 }
457#ifdef JFFS2_DBG_READINODE_MESSAGES
458 this = tn_last(&rii->tn_root);
459 while (this) {
460 dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d\n", this, this->version, this->fn->ofs,
461 this->fn->ofs+this->fn->size, this->overlapped);
462 this = tn_prev(this);
463 }
464#endif
465 pen = tn_last(&rii->tn_root);
466 while ((last = pen)) {
467 pen = tn_prev(last);
468
469 eat_last(&rii->tn_root, &last->rb);
470 ver_insert(&ver_root, last);
471
472 if (unlikely(last->overlapped))
473 continue;
474
475 /* Now we have a bunch of nodes in reverse version
476 order, in the tree at ver_root. Most of the time,
477 there'll actually be only one node in the 'tree',
478 in fact. */
479 this = tn_last(&ver_root);
480
481 while (this) {
482 struct jffs2_tmp_dnode_info *vers_next;
483 int ret;
484 vers_next = tn_prev(this);
485 eat_last(&ver_root, &this->rb);
486 if (check_tn_node(c, this)) {
David Woodhouse1123e2a2007-05-05 16:29:34 +0100487 dbg_readinode("node ver %d, 0x%x-0x%x failed CRC\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +0100488 this->version, this->fn->ofs,
489 this->fn->ofs+this->fn->size);
490 jffs2_kill_tn(c, this);
491 } else {
492 if (this->version > high_ver) {
493 /* Note that this is different from the other
494 highest_version, because this one is only
495 counting _valid_ nodes which could give the
496 latest inode metadata */
497 high_ver = this->version;
498 rii->latest_ref = this->fn->raw;
499 }
David Woodhouse1123e2a2007-05-05 16:29:34 +0100500 dbg_readinode("Add %p (v %d, 0x%x-0x%x, ov %d) to fragtree\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +0100501 this, this->version, this->fn->ofs,
502 this->fn->ofs+this->fn->size, this->overlapped);
503
504 ret = jffs2_add_full_dnode_to_inode(c, f, this->fn);
505 if (ret) {
506 /* Free the nodes in vers_root; let the caller
507 deal with the rest */
508 JFFS2_ERROR("Add node to tree failed %d\n", ret);
509 while (1) {
510 vers_next = tn_prev(this);
511 if (check_tn_node(c, this))
512 jffs2_mark_node_obsolete(c, this->fn->raw);
513 jffs2_free_full_dnode(this->fn);
514 jffs2_free_tmp_dnode_info(this);
515 this = vers_next;
516 if (!this)
517 break;
518 eat_last(&ver_root, &vers_next->rb);
519 }
520 return ret;
521 }
522 jffs2_free_tmp_dnode_info(this);
523 }
524 this = vers_next;
525 }
526 }
527 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100528}
529
530static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
531{
532 struct rb_node *this;
533 struct jffs2_tmp_dnode_info *tn;
534
535 this = list->rb_node;
536
537 /* Now at bottom of tree */
538 while (this) {
539 if (this->rb_left)
540 this = this->rb_left;
541 else if (this->rb_right)
542 this = this->rb_right;
543 else {
544 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
545 jffs2_free_full_dnode(tn->fn);
546 jffs2_free_tmp_dnode_info(tn);
547
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100548 this = rb_parent(this);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100549 if (!this)
550 break;
551
552 if (this->rb_left == &tn->rb)
553 this->rb_left = NULL;
554 else if (this->rb_right == &tn->rb)
555 this->rb_right = NULL;
556 else BUG();
557 }
558 }
559 list->rb_node = NULL;
560}
561
562static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
563{
564 struct jffs2_full_dirent *next;
565
566 while (fd) {
567 next = fd->next;
568 jffs2_free_full_dirent(fd);
569 fd = next;
570 }
571}
572
573/* Returns first valid node after 'ref'. May return 'ref' */
574static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
575{
576 while (ref && ref->next_in_ino) {
577 if (!ref_obsolete(ref))
578 return ref;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100579 dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100580 ref = ref->next_in_ino;
581 }
582 return NULL;
583}
584
585/*
586 * Helper function for jffs2_get_inode_nodes().
587 * It is called every time an directory entry node is found.
588 *
589 * Returns: 0 on succes;
590 * 1 if the node should be marked obsolete;
591 * negative error code on failure.
592 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100593static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100594 struct jffs2_raw_dirent *rd, size_t read,
595 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100596{
597 struct jffs2_full_dirent *fd;
David Woodhouse1046d882006-06-18 22:44:21 +0100598 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000599
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100600 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
601 BUG_ON(ref_obsolete(ref));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000602
David Woodhouse1046d882006-06-18 22:44:21 +0100603 crc = crc32(0, rd, sizeof(*rd) - 8);
604 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
605 JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n",
606 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100607 jffs2_mark_node_obsolete(c, ref);
608 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100609 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000610
David Woodhouse1046d882006-06-18 22:44:21 +0100611 /* If we've never checked the CRCs on this node, check them now */
612 if (ref_flags(ref) == REF_UNCHECKED) {
613 struct jffs2_eraseblock *jeb;
614 int len;
615
616 /* Sanity check */
617 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
618 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
619 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100620 jffs2_mark_node_obsolete(c, ref);
621 return 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100622 }
623
624 jeb = &c->blocks[ref->flash_offset / c->sector_size];
625 len = ref_totlen(c, jeb, ref);
626
627 spin_lock(&c->erase_completion_lock);
628 jeb->used_size += len;
629 jeb->unchecked_size -= len;
630 c->used_size += len;
631 c->unchecked_size -= len;
632 ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
633 spin_unlock(&c->erase_completion_lock);
634 }
635
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100636 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
637 if (unlikely(!fd))
638 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100640 fd->raw = ref;
641 fd->version = je32_to_cpu(rd->version);
642 fd->ino = je32_to_cpu(rd->ino);
643 fd->type = rd->type;
644
David Woodhousedf8e96f2007-04-25 03:23:42 +0100645 if (fd->version > rii->highest_version)
646 rii->highest_version = fd->version;
647
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100648 /* Pick out the mctime of the latest dirent */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100649 if(fd->version > rii->mctime_ver && je32_to_cpu(rd->mctime)) {
650 rii->mctime_ver = fd->version;
651 rii->latest_mctime = je32_to_cpu(rd->mctime);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100652 }
653
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000654 /*
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100655 * Copy as much of the name as possible from the raw
656 * dirent we've already read from the flash.
657 */
658 if (read > sizeof(*rd))
659 memcpy(&fd->name[0], &rd->name[0],
660 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000661
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100662 /* Do we need to copy any more of the name directly from the flash? */
663 if (rd->nsize + sizeof(*rd) > read) {
664 /* FIXME: point() */
665 int err;
666 int already = read - sizeof(*rd);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000667
668 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100669 rd->nsize - already, &read, &fd->name[already]);
670 if (unlikely(read != rd->nsize - already) && likely(!err))
671 return -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000672
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100673 if (unlikely(err)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100674 JFFS2_ERROR("read remainder of name: error %d\n", err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100675 jffs2_free_full_dirent(fd);
676 return -EIO;
677 }
678 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000679
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100680 fd->nhash = full_name_hash(fd->name, rd->nsize);
681 fd->next = NULL;
682 fd->name[rd->nsize] = '\0';
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000683
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100684 /*
685 * Wheee. We now have a complete jffs2_full_dirent structure, with
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000686 * the name in it and everything. Link it into the list
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100687 */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100688 jffs2_add_fd_to_list(c, fd, &rii->fds);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100689
690 return 0;
691}
692
693/*
694 * Helper function for jffs2_get_inode_nodes().
695 * It is called every time an inode node is found.
696 *
David Woodhousedf8e96f2007-04-25 03:23:42 +0100697 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100698 * 1 if the node should be marked obsolete;
699 * negative error code on failure.
700 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100701static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100702 struct jffs2_raw_inode *rd, int rdlen,
703 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100704{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100705 struct jffs2_tmp_dnode_info *tn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100706 uint32_t len, csize;
707 int ret = 1;
David Woodhouse1046d882006-06-18 22:44:21 +0100708 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000709
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100710 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
711 BUG_ON(ref_obsolete(ref));
712
David Woodhouse1046d882006-06-18 22:44:21 +0100713 crc = crc32(0, rd, sizeof(*rd) - 8);
714 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
715 JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n",
716 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100717 jffs2_mark_node_obsolete(c, ref);
718 return 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100719 }
720
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100721 tn = jffs2_alloc_tmp_dnode_info();
722 if (!tn) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400723 JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100724 return -ENOMEM;
725 }
726
727 tn->partial_crc = 0;
728 csize = je32_to_cpu(rd->csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000729
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100730 /* If we've never checked the CRCs on this node, check them now */
731 if (ref_flags(ref) == REF_UNCHECKED) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000732
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100733 /* Sanity checks */
734 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
735 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100736 JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
Andrew Lunn737b7662005-07-30 16:29:30 +0100737 jffs2_dbg_dump_node(c, ref_offset(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100738 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100739 }
740
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100741 if (jffs2_is_writebuffered(c) && csize != 0) {
742 /* At this point we are supposed to check the data CRC
743 * of our unchecked node. But thus far, we do not
744 * know whether the node is valid or obsolete. To
745 * figure this out, we need to walk all the nodes of
746 * the inode and build the inode fragtree. We don't
747 * want to spend time checking data of nodes which may
748 * later be found to be obsolete. So we put off the full
749 * data CRC checking until we have read all the inode
750 * nodes and have started building the fragtree.
751 *
752 * The fragtree is being built starting with nodes
753 * having the highest version number, so we'll be able
754 * to detect whether a node is valid (i.e., it is not
755 * overlapped by a node with higher version) or not.
756 * And we'll be able to check only those nodes, which
757 * are not obsolete.
758 *
759 * Of course, this optimization only makes sense in case
760 * of NAND flashes (or other flashes whith
761 * !jffs2_can_mark_obsolete()), since on NOR flashes
762 * nodes are marked obsolete physically.
763 *
764 * Since NAND flashes (or other flashes with
765 * jffs2_is_writebuffered(c)) are anyway read by
766 * fractions of c->wbuf_pagesize, and we have just read
767 * the node header, it is likely that the starting part
768 * of the node data is also read when we read the
769 * header. So we don't mind to check the CRC of the
770 * starting part of the data of the node now, and check
771 * the second part later (in jffs2_check_node_data()).
772 * Of course, we will not need to re-read and re-check
773 * the NAND page which we have just read. This is why we
774 * read the whole NAND page at jffs2_get_inode_nodes(),
775 * while we needed only the node header.
776 */
777 unsigned char *buf;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100778
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100779 /* 'buf' will point to the start of data */
780 buf = (unsigned char *)rd + sizeof(*rd);
781 /* len will be the read data length */
782 len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100783 tn->partial_crc = crc32(0, buf, len);
784
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100785 dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100786
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100787 /* If we actually calculated the whole data CRC
788 * and it is wrong, drop the node. */
Artem B. Bityutskiy3c091332005-08-04 12:40:02 +0100789 if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100790 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
791 ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100792 goto free_out;
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100793 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100794
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100795 } else if (csize == 0) {
796 /*
797 * We checked the header CRC. If the node has no data, adjust
798 * the space accounting now. For other nodes this will be done
799 * later either when the node is marked obsolete or when its
800 * data is checked.
801 */
802 struct jffs2_eraseblock *jeb;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100803
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100804 dbg_readinode("the node has no data.\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100805 jeb = &c->blocks[ref->flash_offset / c->sector_size];
806 len = ref_totlen(c, jeb, ref);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100807
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100808 spin_lock(&c->erase_completion_lock);
809 jeb->used_size += len;
810 jeb->unchecked_size -= len;
811 c->used_size += len;
812 c->unchecked_size -= len;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100813 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100814 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100815 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100816 }
817
818 tn->fn = jffs2_alloc_full_dnode();
819 if (!tn->fn) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100820 JFFS2_ERROR("alloc fn failed\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100821 ret = -ENOMEM;
822 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100823 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000824
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100825 tn->version = je32_to_cpu(rd->version);
826 tn->fn->ofs = je32_to_cpu(rd->offset);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100827 tn->data_crc = je32_to_cpu(rd->data_crc);
828 tn->csize = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100829 tn->fn->raw = ref;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100830 tn->overlapped = 0;
831
832 if (tn->version > rii->highest_version)
833 rii->highest_version = tn->version;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000834
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100835 /* There was a bug where we wrote hole nodes out with
836 csize/dsize swapped. Deal with it */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100837 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
838 tn->fn->size = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100839 else // normal case...
840 tn->fn->size = je32_to_cpu(rd->dsize);
841
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100842 dbg_readinode("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100843 ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000844
David Woodhousedf8e96f2007-04-25 03:23:42 +0100845 ret = jffs2_add_tn_to_tree(c, rii, tn);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100846
David Woodhousedf8e96f2007-04-25 03:23:42 +0100847 if (ret) {
848 jffs2_free_full_dnode(tn->fn);
849 free_out:
850 jffs2_free_tmp_dnode_info(tn);
851 return ret;
852 }
853#ifdef JFFS2_DBG_READINODE_MESSAGES
David Woodhouse1123e2a2007-05-05 16:29:34 +0100854 dbg_readinode("After adding ver %d:\n", je32_to_cpu(rd->version));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100855 tn = tn_first(&rii->tn_root);
856 while (tn) {
857 dbg_readinode("%p: v %d r 0x%x-0x%x ov %d\n",
858 tn, tn->version, tn->fn->ofs,
859 tn->fn->ofs+tn->fn->size, tn->overlapped);
860 tn = tn_next(tn);
861 }
862#endif
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100863 return 0;
864}
865
866/*
867 * Helper function for jffs2_get_inode_nodes().
868 * It is called every time an unknown node is found.
869 *
David Woodhouse3877f0b2006-06-18 00:05:26 +0100870 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100871 * 1 if the node should be marked obsolete;
872 * negative error code on failure.
873 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100874static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100875{
876 /* We don't mark unknown nodes as REF_UNCHECKED */
David Woodhousec7258a42007-03-09 11:44:00 +0000877 if (ref_flags(ref) == REF_UNCHECKED) {
878 JFFS2_ERROR("REF_UNCHECKED but unknown node at %#08x\n",
879 ref_offset(ref));
880 JFFS2_ERROR("Node is {%04x,%04x,%08x,%08x}. Please report this error.\n",
881 je16_to_cpu(un->magic), je16_to_cpu(un->nodetype),
882 je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100883 jffs2_mark_node_obsolete(c, ref);
884 return 0;
David Woodhousec7258a42007-03-09 11:44:00 +0000885 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000886
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100887 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
888
David Woodhouse3877f0b2006-06-18 00:05:26 +0100889 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
890
891 case JFFS2_FEATURE_INCOMPAT:
892 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
893 je16_to_cpu(un->nodetype), ref_offset(ref));
894 /* EEP */
895 BUG();
896 break;
897
898 case JFFS2_FEATURE_ROCOMPAT:
899 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
900 je16_to_cpu(un->nodetype), ref_offset(ref));
901 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
902 break;
903
904 case JFFS2_FEATURE_RWCOMPAT_COPY:
905 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
906 je16_to_cpu(un->nodetype), ref_offset(ref));
907 break;
908
909 case JFFS2_FEATURE_RWCOMPAT_DELETE:
910 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
911 je16_to_cpu(un->nodetype), ref_offset(ref));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100912 jffs2_mark_node_obsolete(c, ref);
913 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100914 }
915
916 return 0;
917}
918
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100919/*
920 * Helper function for jffs2_get_inode_nodes().
921 * The function detects whether more data should be read and reads it if yes.
922 *
923 * Returns: 0 on succes;
924 * negative error code on failure.
925 */
926static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300927 int needed_len, int *rdlen, unsigned char *buf)
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100928{
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300929 int err, to_read = needed_len - *rdlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100930 size_t retlen;
931 uint32_t offs;
932
933 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300934 int rem = to_read % c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100935
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300936 if (rem)
937 to_read += c->wbuf_pagesize - rem;
938 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100939
940 /* We need to read more data */
941 offs = ref_offset(ref) + *rdlen;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000942
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300943 dbg_readinode("read more %d bytes\n", to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100944
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300945 err = jffs2_flash_read(c, offs, to_read, &retlen, buf + *rdlen);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100946 if (err) {
947 JFFS2_ERROR("can not read %d bytes from 0x%08x, "
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300948 "error code: %d.\n", to_read, offs, err);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100949 return err;
950 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000951
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300952 if (retlen < to_read) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400953 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n",
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300954 offs, retlen, to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100955 return -EIO;
956 }
957
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300958 *rdlen += to_read;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100959 return 0;
960}
961
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100962/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
David Woodhousedf8e96f2007-04-25 03:23:42 +0100963 with this ino. Perform a preliminary ordering on data nodes, throwing away
964 those which are completely obsoleted by newer ones. The naïve approach we
965 use to take of just returning them _all_ in version order will cause us to
966 run out of memory in certain degenerate cases. */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100967static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100968 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100969{
970 struct jffs2_raw_node_ref *ref, *valid_ref;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100971 unsigned char *buf = NULL;
972 union jffs2_node_union *node;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100973 size_t retlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100974 int len, err;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100975
David Woodhousedf8e96f2007-04-25 03:23:42 +0100976 rii->mctime_ver = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000977
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100978 dbg_readinode("ino #%u\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100979
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100980 /* FIXME: in case of NOR and available ->point() this
981 * needs to be fixed. */
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300982 len = sizeof(union jffs2_node_union) + c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100983 buf = kmalloc(len, GFP_KERNEL);
984 if (!buf)
985 return -ENOMEM;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000986
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100987 spin_lock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100988 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100989 if (!valid_ref && f->inocache->ino != 1)
990 JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100991 while (valid_ref) {
992 /* We can hold a pointer to a non-obsolete node without the spinlock,
993 but _obsolete_ nodes may disappear at any time, if the block
994 they're in gets erased. So if we mark 'ref' obsolete while we're
995 not holding the lock, it can go away immediately. For that reason,
996 we find the next valid node first, before processing 'ref'.
997 */
998 ref = valid_ref;
999 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
1000 spin_unlock(&c->erase_completion_lock);
1001
1002 cond_resched();
1003
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001004 /*
1005 * At this point we don't know the type of the node we're going
1006 * to read, so we do not know the size of its header. In order
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001007 * to minimize the amount of flash IO we assume the header is
1008 * of size = JFFS2_MIN_NODE_HEADER.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001009 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001010 len = JFFS2_MIN_NODE_HEADER;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001011 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001012 int end, rem;
1013
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001014 /*
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001015 * We are about to read JFFS2_MIN_NODE_HEADER bytes,
1016 * but this flash has some minimal I/O unit. It is
1017 * possible that we'll need to read more soon, so read
1018 * up to the next min. I/O unit, in order not to
1019 * re-read the same min. I/O unit twice.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001020 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001021 end = ref_offset(ref) + len;
1022 rem = end % c->wbuf_pagesize;
1023 if (rem)
1024 end += c->wbuf_pagesize - rem;
1025 len = end - ref_offset(ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001026 }
1027
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001028 dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001029
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001030 /* FIXME: point() */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001031 err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001032 if (err) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001033 JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001034 goto free_out;
1035 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001036
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001037 if (retlen < len) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -04001038 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001039 err = -EIO;
1040 goto free_out;
1041 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001042
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001043 node = (union jffs2_node_union *)buf;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001044
David Woodhouse3877f0b2006-06-18 00:05:26 +01001045 /* No need to mask in the valid bit; it shouldn't be invalid */
1046 if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) {
1047 JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n",
1048 ref_offset(ref), je16_to_cpu(node->u.magic),
1049 je16_to_cpu(node->u.nodetype),
1050 je32_to_cpu(node->u.totlen),
1051 je32_to_cpu(node->u.hdr_crc));
1052 jffs2_dbg_dump_node(c, ref_offset(ref));
1053 jffs2_mark_node_obsolete(c, ref);
1054 goto cont;
1055 }
Joakim Tjernlund0dec4c82007-03-10 17:08:44 +01001056 if (je16_to_cpu(node->u.magic) != JFFS2_MAGIC_BITMASK) {
1057 /* Not a JFFS2 node, whinge and move on */
1058 JFFS2_NOTICE("Wrong magic bitmask 0x%04x in node header at %#08x.\n",
1059 je16_to_cpu(node->u.magic), ref_offset(ref));
David Woodhousec7258a42007-03-09 11:44:00 +00001060 jffs2_mark_node_obsolete(c, ref);
1061 goto cont;
1062 }
David Woodhouse3877f0b2006-06-18 00:05:26 +01001063
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001064 switch (je16_to_cpu(node->u.nodetype)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001065
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001066 case JFFS2_NODETYPE_DIRENT:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001067
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001068 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001069 err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001070 if (unlikely(err))
1071 goto free_out;
1072 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001073
David Woodhousedf8e96f2007-04-25 03:23:42 +01001074 err = read_direntry(c, ref, &node->d, retlen, rii);
1075 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001076 goto free_out;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001077
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001078 break;
1079
1080 case JFFS2_NODETYPE_INODE:
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001081
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001082 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001083 err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001084 if (unlikely(err))
1085 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001086 }
1087
David Woodhousedf8e96f2007-04-25 03:23:42 +01001088 err = read_dnode(c, ref, &node->i, len, rii);
1089 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001090 goto free_out;
1091
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 break;
1093
1094 default:
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001095 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001096 err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001097 if (unlikely(err))
1098 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001099 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001100
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001101 err = read_unknown(c, ref, &node->u);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001102 if (err == 1) {
1103 jffs2_mark_node_obsolete(c, ref);
1104 break;
1105 } else if (unlikely(err))
1106 goto free_out;
1107
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 }
David Woodhouse3877f0b2006-06-18 00:05:26 +01001109 cont:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001110 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001112
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001113 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001114 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
David Woodhousedf8e96f2007-04-25 03:23:42 +01001116 f->highest_version = rii->highest_version;
1117
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001118 dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +01001119 f->inocache->ino, rii->highest_version, rii->latest_mctime,
1120 rii->mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001121 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001123 free_out:
David Woodhousedf8e96f2007-04-25 03:23:42 +01001124 jffs2_free_tmp_dnode_info_list(&rii->tn_root);
1125 jffs2_free_full_dirent_list(rii->fds);
1126 rii->fds = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001127 kfree(buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001128 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129}
1130
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001131static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 struct jffs2_inode_info *f,
1133 struct jffs2_raw_inode *latest_node)
1134{
David Woodhousedf8e96f2007-04-25 03:23:42 +01001135 struct jffs2_readinode_info rii;
David Woodhouse61c4b232007-04-25 17:04:23 +01001136 uint32_t crc, new_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 size_t retlen;
1138 int ret;
1139
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001140 dbg_readinode("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
David Woodhousedf8e96f2007-04-25 03:23:42 +01001142 memset(&rii, 0, sizeof(rii));
1143
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 /* Grab all nodes relevant to this ino */
David Woodhousedf8e96f2007-04-25 03:23:42 +01001145 ret = jffs2_get_inode_nodes(c, f, &rii);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
1147 if (ret) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001148 JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 if (f->inocache->state == INO_STATE_READING)
1150 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1151 return ret;
1152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
David Woodhousedf8e96f2007-04-25 03:23:42 +01001154 ret = jffs2_build_inode_fragtree(c, f, &rii);
1155 if (ret) {
1156 JFFS2_ERROR("Failed to build final fragtree for inode #%u: error %d\n",
1157 f->inocache->ino, ret);
1158 if (f->inocache->state == INO_STATE_READING)
1159 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1160 jffs2_free_tmp_dnode_info_list(&rii.tn_root);
1161 /* FIXME: We could at least crc-check them all */
1162 if (rii.mdata_tn) {
1163 jffs2_free_full_dnode(rii.mdata_tn->fn);
1164 jffs2_free_tmp_dnode_info(rii.mdata_tn);
1165 rii.mdata_tn = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001166 }
David Woodhousedf8e96f2007-04-25 03:23:42 +01001167 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 }
David Woodhousedf8e96f2007-04-25 03:23:42 +01001169
1170 if (rii.mdata_tn) {
1171 if (rii.mdata_tn->fn->raw == rii.latest_ref) {
1172 f->metadata = rii.mdata_tn->fn;
1173 jffs2_free_tmp_dnode_info(rii.mdata_tn);
1174 } else {
1175 jffs2_kill_tn(c, rii.mdata_tn);
1176 }
1177 rii.mdata_tn = NULL;
1178 }
1179
1180 f->dents = rii.fds;
1181
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +01001182 jffs2_dbg_fragtree_paranoia_check_nolock(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
David Woodhousedf8e96f2007-04-25 03:23:42 +01001184 if (unlikely(!rii.latest_ref)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 /* No data nodes for this inode. */
1186 if (f->inocache->ino != 1) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001187 JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
David Woodhousedf8e96f2007-04-25 03:23:42 +01001188 if (!rii.fds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 if (f->inocache->state == INO_STATE_READING)
1190 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1191 return -EIO;
1192 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001193 JFFS2_NOTICE("but it has children so we fake some modes for it\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 }
1195 latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
1196 latest_node->version = cpu_to_je32(0);
1197 latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
1198 latest_node->isize = cpu_to_je32(0);
1199 latest_node->gid = cpu_to_je16(0);
1200 latest_node->uid = cpu_to_je16(0);
1201 if (f->inocache->state == INO_STATE_READING)
1202 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
1203 return 0;
1204 }
1205
David Woodhousedf8e96f2007-04-25 03:23:42 +01001206 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref), sizeof(*latest_node), &retlen, (void *)latest_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 if (ret || retlen != sizeof(*latest_node)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001208 JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
1209 ret, retlen, sizeof(*latest_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 /* FIXME: If this fails, there seems to be a memory leak. Find it. */
1211 up(&f->sem);
1212 jffs2_do_clear_inode(c, f);
1213 return ret?ret:-EIO;
1214 }
1215
1216 crc = crc32(0, latest_node, sizeof(*latest_node)-8);
1217 if (crc != je32_to_cpu(latest_node->node_crc)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001218 JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +01001219 f->inocache->ino, ref_offset(rii.latest_ref));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 up(&f->sem);
1221 jffs2_do_clear_inode(c, f);
1222 return -EIO;
1223 }
1224
1225 switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
1226 case S_IFDIR:
David Woodhousedf8e96f2007-04-25 03:23:42 +01001227 if (rii.mctime_ver > je32_to_cpu(latest_node->version)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 /* The times in the latest_node are actually older than
1229 mctime in the latest dirent. Cheat. */
David Woodhousedf8e96f2007-04-25 03:23:42 +01001230 latest_node->ctime = latest_node->mtime = cpu_to_je32(rii.latest_mctime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 }
1232 break;
1233
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 case S_IFREG:
1236 /* If it was a regular file, truncate it to the latest node's isize */
David Woodhouse61c4b232007-04-25 17:04:23 +01001237 new_size = jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
1238 if (new_size != je32_to_cpu(latest_node->isize)) {
1239 JFFS2_WARNING("Truncating ino #%u to %d bytes failed because it only had %d bytes to start with!\n",
1240 f->inocache->ino, je32_to_cpu(latest_node->isize), new_size);
1241 latest_node->isize = cpu_to_je32(new_size);
1242 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 break;
1244
1245 case S_IFLNK:
1246 /* Hack to work around broken isize in old symlink code.
1247 Remove this when dwmw2 comes to his senses and stops
1248 symlinks from being an entirely gratuitous special
1249 case. */
1250 if (!je32_to_cpu(latest_node->isize))
1251 latest_node->isize = latest_node->dsize;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001252
1253 if (f->inocache->state != INO_STATE_CHECKING) {
1254 /* Symlink's inode data is the target path. Read it and
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001255 * keep in RAM to facilitate quick follow symlink
1256 * operation. */
1257 f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
1258 if (!f->target) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001259 JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001260 up(&f->sem);
1261 jffs2_do_clear_inode(c, f);
1262 return -ENOMEM;
1263 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001264
David Woodhousedf8e96f2007-04-25 03:23:42 +01001265 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node),
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001266 je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001267
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001268 if (ret || retlen != je32_to_cpu(latest_node->csize)) {
1269 if (retlen != je32_to_cpu(latest_node->csize))
1270 ret = -EIO;
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001271 kfree(f->target);
1272 f->target = NULL;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001273 up(&f->sem);
1274 jffs2_do_clear_inode(c, f);
1275 return -ret;
1276 }
1277
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001278 f->target[je32_to_cpu(latest_node->csize)] = '\0';
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001279 dbg_readinode("symlink's target '%s' cached\n", f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001280 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001281
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 /* fall through... */
1283
1284 case S_IFBLK:
1285 case S_IFCHR:
1286 /* Certain inode types should have only one data node, and it's
1287 kept as the metadata node */
1288 if (f->metadata) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001289 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 f->inocache->ino, jemode_to_cpu(latest_node->mode));
1291 up(&f->sem);
1292 jffs2_do_clear_inode(c, f);
1293 return -EIO;
1294 }
1295 if (!frag_first(&f->fragtree)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001296 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 f->inocache->ino, jemode_to_cpu(latest_node->mode));
1298 up(&f->sem);
1299 jffs2_do_clear_inode(c, f);
1300 return -EIO;
1301 }
1302 /* ASSERT: f->fraglist != NULL */
1303 if (frag_next(frag_first(&f->fragtree))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001304 JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 f->inocache->ino, jemode_to_cpu(latest_node->mode));
1306 /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
1307 up(&f->sem);
1308 jffs2_do_clear_inode(c, f);
1309 return -EIO;
1310 }
1311 /* OK. We're happy */
1312 f->metadata = frag_first(&f->fragtree)->node;
1313 jffs2_free_node_frag(frag_first(&f->fragtree));
1314 f->fragtree = RB_ROOT;
1315 break;
1316 }
1317 if (f->inocache->state == INO_STATE_READING)
1318 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
1319
1320 return 0;
1321}
1322
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001323/* Scan the list of all nodes present for this ino, build map of versions, etc. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001324int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001325 uint32_t ino, struct jffs2_raw_inode *latest_node)
1326{
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001327 dbg_readinode("read inode #%u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001328
1329 retry_inocache:
1330 spin_lock(&c->inocache_lock);
1331 f->inocache = jffs2_get_ino_cache(c, ino);
1332
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001333 if (f->inocache) {
1334 /* Check its state. We may need to wait before we can use it */
1335 switch(f->inocache->state) {
1336 case INO_STATE_UNCHECKED:
1337 case INO_STATE_CHECKEDABSENT:
1338 f->inocache->state = INO_STATE_READING;
1339 break;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001340
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001341 case INO_STATE_CHECKING:
1342 case INO_STATE_GC:
1343 /* If it's in either of these states, we need
1344 to wait for whoever's got it to finish and
1345 put it back. */
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001346 dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001347 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
1348 goto retry_inocache;
1349
1350 case INO_STATE_READING:
1351 case INO_STATE_PRESENT:
1352 /* Eep. This should never happen. It can
1353 happen if Linux calls read_inode() again
1354 before clear_inode() has finished though. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001355 JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001356 /* Fail. That's probably better than allowing it to succeed */
1357 f->inocache = NULL;
1358 break;
1359
1360 default:
1361 BUG();
1362 }
1363 }
1364 spin_unlock(&c->inocache_lock);
1365
1366 if (!f->inocache && ino == 1) {
1367 /* Special case - no root inode on medium */
1368 f->inocache = jffs2_alloc_inode_cache();
1369 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001370 JFFS2_ERROR("cannot allocate inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001371 return -ENOMEM;
1372 }
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001373 dbg_readinode("creating inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001374 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
1375 f->inocache->ino = f->inocache->nlink = 1;
1376 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
1377 f->inocache->state = INO_STATE_READING;
1378 jffs2_add_ino_cache(c, f->inocache);
1379 }
1380 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001381 JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001382 return -ENOENT;
1383 }
1384
1385 return jffs2_do_read_inode_internal(c, f, latest_node);
1386}
1387
1388int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
1389{
1390 struct jffs2_raw_inode n;
Yan Burman3d375d92006-12-04 15:03:01 -08001391 struct jffs2_inode_info *f = kzalloc(sizeof(*f), GFP_KERNEL);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001392 int ret;
1393
1394 if (!f)
1395 return -ENOMEM;
1396
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001397 init_MUTEX_LOCKED(&f->sem);
1398 f->inocache = ic;
1399
1400 ret = jffs2_do_read_inode_internal(c, f, &n);
1401 if (!ret) {
1402 up(&f->sem);
1403 jffs2_do_clear_inode(c, f);
1404 }
1405 kfree (f);
1406 return ret;
1407}
1408
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
1410{
1411 struct jffs2_full_dirent *fd, *fds;
1412 int deleted;
1413
KaiGai Koheic7afb0f2006-07-02 15:13:46 +01001414 jffs2_clear_acl(f);
KaiGai Kohei355ed4e2006-06-24 09:15:36 +09001415 jffs2_xattr_delete_inode(c, f->inocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 down(&f->sem);
1417 deleted = f->inocache && !f->inocache->nlink;
1418
David Woodhouse67e345d2005-02-27 23:01:36 +00001419 if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
1420 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
1421
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 if (f->metadata) {
1423 if (deleted)
1424 jffs2_mark_node_obsolete(c, f->metadata->raw);
1425 jffs2_free_full_dnode(f->metadata);
1426 }
1427
1428 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
1429
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001430 if (f->target) {
1431 kfree(f->target);
1432 f->target = NULL;
1433 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001434
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001435 fds = f->dents;
1436 while(fds) {
1437 fd = fds;
1438 fds = fd->next;
1439 jffs2_free_full_dirent(fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 }
1441
David Woodhouse67e345d2005-02-27 23:01:36 +00001442 if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
David Woodhouse67e345d2005-02-27 23:01:36 +00001444 if (f->inocache->nodes == (void *)f->inocache)
1445 jffs2_del_ino_cache(c, f->inocache);
1446 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
1448 up(&f->sem);
1449}