blob: 49d4b0a67c55b4ca8c86faf558ef75a5e6abfad9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Thomas Gleixner182ec4e2005-11-07 11:16:07 +000010 * $Id: readinode.c,v 1.143 2005/11/07 11:14:41 gleixner Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 */
13
14#include <linux/kernel.h>
Andrew Lunn737b7662005-07-30 16:29:30 +010015#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/slab.h>
17#include <linux/fs.h>
18#include <linux/crc32.h>
19#include <linux/pagemap.h>
20#include <linux/mtd/mtd.h>
21#include <linux/compiler.h>
22#include "nodelist.h"
23
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010024/*
David Woodhousedf8e96f2007-04-25 03:23:42 +010025 * Check the data CRC of the node.
26 *
27 * Returns: 0 if the data CRC is correct;
28 * 1 - if incorrect;
29 * error code if an error occured.
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010030 */
David Woodhousedf8e96f2007-04-25 03:23:42 +010031static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032{
David Woodhousedf8e96f2007-04-25 03:23:42 +010033 struct jffs2_raw_node_ref *ref = tn->fn->raw;
34 int err = 0, pointed = 0;
35 struct jffs2_eraseblock *jeb;
36 unsigned char *buffer;
37 uint32_t crc, ofs, len;
38 size_t retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
David Woodhousedf8e96f2007-04-25 03:23:42 +010040 BUG_ON(tn->csize == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
David Woodhousedf8e96f2007-04-25 03:23:42 +010042 if (!jffs2_is_writebuffered(c))
43 goto adj_acc;
44
45 /* Calculate how many bytes were already checked */
46 ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode);
47 len = ofs % c->wbuf_pagesize;
48 if (likely(len))
49 len = c->wbuf_pagesize - len;
50
51 if (len >= tn->csize) {
52 dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
53 ref_offset(ref), tn->csize, ofs);
54 goto adj_acc;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010055 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
David Woodhousedf8e96f2007-04-25 03:23:42 +010057 ofs += len;
58 len = tn->csize - len;
59
60 dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n",
61 ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len);
62
63#ifndef __ECOS
64 /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(),
65 * adding and jffs2_flash_read_end() interface. */
66 if (c->mtd->point) {
67 err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
68 if (!err && retlen < tn->csize) {
69 JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
70 c->mtd->unpoint(c->mtd, buffer, ofs, len);
71 } else if (err)
72 JFFS2_WARNING("MTD point failed: error code %d.\n", err);
73 else
74 pointed = 1; /* succefully pointed to device */
75 }
76#endif
77
78 if (!pointed) {
79 buffer = kmalloc(len, GFP_KERNEL);
80 if (unlikely(!buffer))
81 return -ENOMEM;
82
83 /* TODO: this is very frequent pattern, make it a separate
84 * routine */
85 err = jffs2_flash_read(c, ofs, len, &retlen, buffer);
86 if (err) {
87 JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err);
88 goto free_out;
89 }
90
91 if (retlen != len) {
92 JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len);
93 err = -EIO;
94 goto free_out;
95 }
96 }
97
98 /* Continue calculating CRC */
99 crc = crc32(tn->partial_crc, buffer, len);
100 if(!pointed)
101 kfree(buffer);
102#ifndef __ECOS
103 else
104 c->mtd->unpoint(c->mtd, buffer, ofs, len);
105#endif
106
107 if (crc != tn->data_crc) {
108 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
109 ofs, tn->data_crc, crc);
110 return 1;
111 }
112
113adj_acc:
114 jeb = &c->blocks[ref->flash_offset / c->sector_size];
115 len = ref_totlen(c, jeb, ref);
116 /* If it should be REF_NORMAL, it'll get marked as such when
117 we build the fragtree, shortly. No need to worry about GC
118 moving it while it's marked REF_PRISTINE -- GC won't happen
119 till we've finished checking every inode anyway. */
120 ref->flash_offset |= REF_PRISTINE;
121 /*
122 * Mark the node as having been checked and fix the
123 * accounting accordingly.
124 */
125 spin_lock(&c->erase_completion_lock);
126 jeb->used_size += len;
127 jeb->unchecked_size -= len;
128 c->used_size += len;
129 c->unchecked_size -= len;
130 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
131 spin_unlock(&c->erase_completion_lock);
132
133 return 0;
134
135free_out:
136 if(!pointed)
137 kfree(buffer);
138#ifndef __ECOS
139 else
140 c->mtd->unpoint(c->mtd, buffer, ofs, len);
141#endif
142 return err;
143}
144
145/*
146 * Helper function for jffs2_add_older_frag_to_fragtree().
147 *
148 * Checks the node if we are in the checking stage.
149 */
150static int check_tn_node(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
151{
152 int ret;
153
154 BUG_ON(ref_obsolete(tn->fn->raw));
155
156 /* We only check the data CRC of unchecked nodes */
157 if (ref_flags(tn->fn->raw) != REF_UNCHECKED)
158 return 0;
159
160 dbg_readinode("check node %#04x-%#04x, phys offs %#08x\n",
161 tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw));
162
163 ret = check_node_data(c, tn);
164 if (unlikely(ret < 0)) {
165 JFFS2_ERROR("check_node_data() returned error: %d.\n",
166 ret);
167 } else if (unlikely(ret > 0)) {
168 dbg_readinode("CRC error, mark it obsolete.\n");
169 jffs2_mark_node_obsolete(c, tn->fn->raw);
170 }
171
172 return ret;
173}
174
175static struct jffs2_tmp_dnode_info *jffs2_lookup_tn(struct rb_root *tn_root, uint32_t offset)
176{
177 struct rb_node *next;
178 struct jffs2_tmp_dnode_info *tn = NULL;
179
180 dbg_readinode("root %p, offset %d\n", tn_root, offset);
181
182 next = tn_root->rb_node;
183
184 while (next) {
185 tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb);
186
187 if (tn->fn->ofs < offset)
188 next = tn->rb.rb_right;
189 else if (tn->fn->ofs >= offset)
190 next = tn->rb.rb_left;
191 else
192 break;
193 }
194
195 return tn;
196}
197
198
199static void jffs2_kill_tn(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
200{
201 jffs2_mark_node_obsolete(c, tn->fn->raw);
202 jffs2_free_full_dnode(tn->fn);
203 jffs2_free_tmp_dnode_info(tn);
204}
205/*
206 * This function is used when we read an inode. Data nodes arrive in
207 * arbitrary order -- they may be older or newer than the nodes which
208 * are already in the tree. Where overlaps occur, the older node can
209 * be discarded as long as the newer passes the CRC check. We don't
210 * bother to keep track of holes in this rbtree, and neither do we deal
211 * with frags -- we can have multiple entries starting at the same
212 * offset, and the one with the smallest length will come first in the
213 * ordering.
214 *
215 * Returns 0 if the node was inserted
216 * 1 if the node is obsolete (because we can't mark it so yet)
217 * < 0 an if error occurred
218 */
219static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
220 struct jffs2_readinode_info *rii,
221 struct jffs2_tmp_dnode_info *tn)
222{
223 uint32_t fn_end = tn->fn->ofs + tn->fn->size;
224 struct jffs2_tmp_dnode_info *insert_point = NULL, *this;
225
226 dbg_readinode("insert fragment %#04x-%#04x, ver %u\n", tn->fn->ofs, fn_end, tn->version);
227
228 /* If a node has zero dsize, we only have to keep if it if it might be the
229 node with highest version -- i.e. the one which will end up as f->metadata.
230 Note that such nodes won't be REF_UNCHECKED since there are no data to
231 check anyway. */
232 if (!tn->fn->size) {
233 if (rii->mdata_tn) {
234 /* We had a candidate mdata node already */
235 dbg_readinode("kill old mdata with ver %d\n", rii->mdata_tn->version);
236 jffs2_kill_tn(c, rii->mdata_tn);
237 }
238 rii->mdata_tn = tn;
239 dbg_readinode("keep new mdata with ver %d\n", tn->version);
240 return 0;
241 }
242
243 /* Find the earliest node which _may_ be relevant to this one */
244 this = jffs2_lookup_tn(&rii->tn_root, tn->fn->ofs);
245 if (!this) {
246 /* First addition to empty tree. $DEITY how I love the easy cases */
247 rb_link_node(&tn->rb, NULL, &rii->tn_root.rb_node);
248 rb_insert_color(&tn->rb, &rii->tn_root);
249 dbg_readinode("keep new frag\n");
250 return 0;
251 }
252
253 /* If we add a new node it'll be somewhere under here. */
254 insert_point = this;
255
256 /* If the node is coincident with another at a lower address,
257 back up until the other node is found. It may be relevant */
258 while (tn->overlapped)
259 tn = tn_prev(tn);
260
261 dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole");
262
263 while (this) {
264 if (this->fn->ofs > fn_end)
265 break;
266 dbg_readinode("Ponder this ver %d, 0x%x-0x%x\n",
267 this->version, this->fn->ofs, this->fn->size);
268
269 if (this->version == tn->version) {
270 /* Version number collision means REF_PRISTINE GC. Accept either of them
271 as long as the CRC is correct. Check the one we have already... */
272 if (!check_tn_node(c, this)) {
273 /* The one we already had was OK. Keep it and throw away the new one */
274 dbg_readinode("Like old node. Throw away new\n");
275 jffs2_kill_tn(c, tn);
276 return 0;
277 } else {
278 /* Who cares if the new one is good; keep it for now anyway. */
279 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root);
280 /* Same overlapping from in front and behind */
281 tn->overlapped = this->overlapped;
282 jffs2_kill_tn(c, this);
283 dbg_readinode("Like new node. Throw away old\n");
284 return 0;
285 }
286 }
287 if (this->version < tn->version &&
288 this->fn->ofs >= tn->fn->ofs &&
289 this->fn->ofs + this->fn->size <= fn_end) {
290 /* New node entirely overlaps 'this' */
291 if (check_tn_node(c, tn)) {
292 dbg_readinode("new node bad CRC\n");
293 jffs2_kill_tn(c, tn);
294 return 0;
295 }
296 /* ... and is good. Kill 'this'... */
297 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root);
298 tn->overlapped = this->overlapped;
299 jffs2_kill_tn(c, this);
300 /* ... and any subsequent nodes which are also overlapped */
301 this = tn_next(tn);
302 while (this && this->fn->ofs + this->fn->size < fn_end) {
303 struct jffs2_tmp_dnode_info *next = tn_next(this);
304 if (this->version < tn->version) {
305 tn_erase(this, &rii->tn_root);
306 dbg_readinode("Kill overlapped ver %d, 0x%x-0x%x\n",
307 this->version, this->fn->ofs,
308 this->fn->ofs+this->fn->size);
309 jffs2_kill_tn(c, this);
310 }
311 this = next;
312 }
313 dbg_readinode("Done inserting new\n");
314 return 0;
315 }
316 if (this->version > tn->version &&
317 this->fn->ofs <= tn->fn->ofs &&
318 this->fn->ofs+this->fn->size >= fn_end) {
319 /* New node entirely overlapped by 'this' */
320 if (!check_tn_node(c, this)) {
321 dbg_readinode("Good CRC on old node. Kill new\n");
322 jffs2_kill_tn(c, tn);
323 return 0;
324 }
325 /* ... but 'this' was bad. Replace it... */
326 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root);
327 dbg_readinode("Bad CRC on old overlapping node. Kill it\n");
328 jffs2_kill_tn(c, this);
329 return 0;
330 }
331 /* We want to be inserted under the last node which is
332 either at a lower offset _or_ has a smaller range */
333 if (this->fn->ofs < tn->fn->ofs ||
334 (this->fn->ofs == tn->fn->ofs &&
335 this->fn->size <= tn->fn->size))
336 insert_point = this;
337
338 this = tn_next(this);
339 }
340 dbg_readinode("insert_point %p, ver %d, 0x%x-0x%x, ov %d\n",
341 insert_point, insert_point->version, insert_point->fn->ofs,
342 insert_point->fn->ofs+insert_point->fn->size,
343 insert_point->overlapped);
344 /* We neither completely obsoleted nor were completely
345 obsoleted by an earlier node. Insert under insert_point */
346 {
347 struct rb_node *parent = &insert_point->rb;
348 struct rb_node **link = &parent;
349
350 while (*link) {
351 parent = *link;
352 insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
353 if (tn->fn->ofs > insert_point->fn->ofs)
354 link = &insert_point->rb.rb_right;
355 else if (tn->fn->ofs < insert_point->fn->ofs ||
356 tn->fn->size < insert_point->fn->size)
357 link = &insert_point->rb.rb_left;
358 else
359 link = &insert_point->rb.rb_right;
360 }
361 rb_link_node(&tn->rb, &insert_point->rb, link);
362 rb_insert_color(&tn->rb, &rii->tn_root);
363 }
364 /* If there's anything behind that overlaps us, note it */
365 this = tn_prev(tn);
366 if (this) {
367 while (1) {
368 if (this->fn->ofs + this->fn->size > tn->fn->ofs) {
369 dbg_readinode("Node is overlapped by %p (v %d, 0x%x-0x%x)\n",
370 this, this->version, this->fn->ofs,
371 this->fn->ofs+this->fn->size);
372 tn->overlapped = 1;
373 break;
374 }
375 if (!this->overlapped)
376 break;
377 this = tn_prev(this);
378 }
379 }
380
381 /* If the new node overlaps anything ahead, note it */
382 this = tn_next(tn);
383 while (this && this->fn->ofs < fn_end) {
384 this->overlapped = 1;
385 dbg_readinode("Node ver %d, 0x%x-0x%x is overlapped\n",
386 this->version, this->fn->ofs,
387 this->fn->ofs+this->fn->size);
388 this = tn_next(this);
389 }
390 return 0;
391}
392
393/* Trivial function to remove the last node in the tree. Which by definition
394 has no right-hand -- so can be removed just by making its only child (if
395 any) take its place under its parent. */
396static void eat_last(struct rb_root *root, struct rb_node *node)
397{
398 struct rb_node *parent = rb_parent(node);
399 struct rb_node **link;
400
401 /* LAST! */
402 BUG_ON(node->rb_right);
403
404 if (!parent)
405 link = &root->rb_node;
406 else if (node == parent->rb_left)
407 link = &parent->rb_left;
408 else
409 link = &parent->rb_right;
410
411 *link = node->rb_left;
412 /* Colour doesn't matter now. Only the parent pointer. */
413 if (node->rb_left)
414 node->rb_left->rb_parent_color = node->rb_parent_color;
415}
416
417/* We put this in reverse order, so we can just use eat_last */
418static void ver_insert(struct rb_root *ver_root, struct jffs2_tmp_dnode_info *tn)
419{
420 struct rb_node **link = &ver_root->rb_node;
421 struct rb_node *parent = NULL;
422 struct jffs2_tmp_dnode_info *this_tn;
423
424 while (*link) {
425 parent = *link;
426 this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
427
428 if (tn->version > this_tn->version)
429 link = &parent->rb_left;
430 else
431 link = &parent->rb_right;
432 }
433 dbg_readinode("Link new node at %p (root is %p)\n", link, ver_root);
434 rb_link_node(&tn->rb, parent, link);
435 rb_insert_color(&tn->rb, ver_root);
436}
437
438/* Build final, normal fragtree from tn tree. It doesn't matter which order
439 we add nodes to the real fragtree, as long as they don't overlap. And
440 having thrown away the majority of overlapped nodes as we went, there
441 really shouldn't be many sets of nodes which do overlap. If we start at
442 the end, we can use the overlap markers -- we can just eat nodes which
443 aren't overlapped, and when we encounter nodes which _do_ overlap we
444 sort them all into a temporary tree in version order before replaying them. */
445static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c,
446 struct jffs2_inode_info *f,
447 struct jffs2_readinode_info *rii)
448{
449 struct jffs2_tmp_dnode_info *pen, *last, *this;
450 struct rb_root ver_root = RB_ROOT;
451 uint32_t high_ver = 0;
452
453 if (rii->mdata_tn) {
454 dbg_readinode("potential mdata is ver %d at %p\n", rii->mdata_tn->version, rii->mdata_tn);
455 high_ver = rii->mdata_tn->version;
456 rii->latest_ref = rii->mdata_tn->fn->raw;
457 }
458#ifdef JFFS2_DBG_READINODE_MESSAGES
459 this = tn_last(&rii->tn_root);
460 while (this) {
461 dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d\n", this, this->version, this->fn->ofs,
462 this->fn->ofs+this->fn->size, this->overlapped);
463 this = tn_prev(this);
464 }
465#endif
466 pen = tn_last(&rii->tn_root);
467 while ((last = pen)) {
468 pen = tn_prev(last);
469
470 eat_last(&rii->tn_root, &last->rb);
471 ver_insert(&ver_root, last);
472
473 if (unlikely(last->overlapped))
474 continue;
475
476 /* Now we have a bunch of nodes in reverse version
477 order, in the tree at ver_root. Most of the time,
478 there'll actually be only one node in the 'tree',
479 in fact. */
480 this = tn_last(&ver_root);
481
482 while (this) {
483 struct jffs2_tmp_dnode_info *vers_next;
484 int ret;
485 vers_next = tn_prev(this);
486 eat_last(&ver_root, &this->rb);
487 if (check_tn_node(c, this)) {
488 dbg_readinode("node ver %x, 0x%x-0x%x failed CRC\n",
489 this->version, this->fn->ofs,
490 this->fn->ofs+this->fn->size);
491 jffs2_kill_tn(c, this);
492 } else {
493 if (this->version > high_ver) {
494 /* Note that this is different from the other
495 highest_version, because this one is only
496 counting _valid_ nodes which could give the
497 latest inode metadata */
498 high_ver = this->version;
499 rii->latest_ref = this->fn->raw;
500 }
501 dbg_readinode("Add %p (v %x, 0x%x-0x%x, ov %d) to fragtree\n",
502 this, this->version, this->fn->ofs,
503 this->fn->ofs+this->fn->size, this->overlapped);
504
505 ret = jffs2_add_full_dnode_to_inode(c, f, this->fn);
506 if (ret) {
507 /* Free the nodes in vers_root; let the caller
508 deal with the rest */
509 JFFS2_ERROR("Add node to tree failed %d\n", ret);
510 while (1) {
511 vers_next = tn_prev(this);
512 if (check_tn_node(c, this))
513 jffs2_mark_node_obsolete(c, this->fn->raw);
514 jffs2_free_full_dnode(this->fn);
515 jffs2_free_tmp_dnode_info(this);
516 this = vers_next;
517 if (!this)
518 break;
519 eat_last(&ver_root, &vers_next->rb);
520 }
521 return ret;
522 }
523 jffs2_free_tmp_dnode_info(this);
524 }
525 this = vers_next;
526 }
527 }
528 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100529}
530
531static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
532{
533 struct rb_node *this;
534 struct jffs2_tmp_dnode_info *tn;
535
536 this = list->rb_node;
537
538 /* Now at bottom of tree */
539 while (this) {
540 if (this->rb_left)
541 this = this->rb_left;
542 else if (this->rb_right)
543 this = this->rb_right;
544 else {
545 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
546 jffs2_free_full_dnode(tn->fn);
547 jffs2_free_tmp_dnode_info(tn);
548
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100549 this = rb_parent(this);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100550 if (!this)
551 break;
552
553 if (this->rb_left == &tn->rb)
554 this->rb_left = NULL;
555 else if (this->rb_right == &tn->rb)
556 this->rb_right = NULL;
557 else BUG();
558 }
559 }
560 list->rb_node = NULL;
561}
562
563static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
564{
565 struct jffs2_full_dirent *next;
566
567 while (fd) {
568 next = fd->next;
569 jffs2_free_full_dirent(fd);
570 fd = next;
571 }
572}
573
574/* Returns first valid node after 'ref'. May return 'ref' */
575static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
576{
577 while (ref && ref->next_in_ino) {
578 if (!ref_obsolete(ref))
579 return ref;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100580 dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100581 ref = ref->next_in_ino;
582 }
583 return NULL;
584}
585
586/*
587 * Helper function for jffs2_get_inode_nodes().
588 * It is called every time an directory entry node is found.
589 *
590 * Returns: 0 on succes;
591 * 1 if the node should be marked obsolete;
592 * negative error code on failure.
593 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100594static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100595 struct jffs2_raw_dirent *rd, size_t read,
596 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100597{
598 struct jffs2_full_dirent *fd;
David Woodhouse1046d882006-06-18 22:44:21 +0100599 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000600
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100601 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
602 BUG_ON(ref_obsolete(ref));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000603
David Woodhouse1046d882006-06-18 22:44:21 +0100604 crc = crc32(0, rd, sizeof(*rd) - 8);
605 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
606 JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n",
607 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100608 jffs2_mark_node_obsolete(c, ref);
609 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100610 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000611
David Woodhouse1046d882006-06-18 22:44:21 +0100612 /* If we've never checked the CRCs on this node, check them now */
613 if (ref_flags(ref) == REF_UNCHECKED) {
614 struct jffs2_eraseblock *jeb;
615 int len;
616
617 /* Sanity check */
618 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
619 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
620 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100621 jffs2_mark_node_obsolete(c, ref);
622 return 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100623 }
624
625 jeb = &c->blocks[ref->flash_offset / c->sector_size];
626 len = ref_totlen(c, jeb, ref);
627
628 spin_lock(&c->erase_completion_lock);
629 jeb->used_size += len;
630 jeb->unchecked_size -= len;
631 c->used_size += len;
632 c->unchecked_size -= len;
633 ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
634 spin_unlock(&c->erase_completion_lock);
635 }
636
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100637 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
638 if (unlikely(!fd))
639 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100641 fd->raw = ref;
642 fd->version = je32_to_cpu(rd->version);
643 fd->ino = je32_to_cpu(rd->ino);
644 fd->type = rd->type;
645
David Woodhousedf8e96f2007-04-25 03:23:42 +0100646 if (fd->version > rii->highest_version)
647 rii->highest_version = fd->version;
648
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100649 /* Pick out the mctime of the latest dirent */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100650 if(fd->version > rii->mctime_ver && je32_to_cpu(rd->mctime)) {
651 rii->mctime_ver = fd->version;
652 rii->latest_mctime = je32_to_cpu(rd->mctime);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100653 }
654
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000655 /*
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100656 * Copy as much of the name as possible from the raw
657 * dirent we've already read from the flash.
658 */
659 if (read > sizeof(*rd))
660 memcpy(&fd->name[0], &rd->name[0],
661 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000662
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100663 /* Do we need to copy any more of the name directly from the flash? */
664 if (rd->nsize + sizeof(*rd) > read) {
665 /* FIXME: point() */
666 int err;
667 int already = read - sizeof(*rd);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000668
669 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100670 rd->nsize - already, &read, &fd->name[already]);
671 if (unlikely(read != rd->nsize - already) && likely(!err))
672 return -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000673
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100674 if (unlikely(err)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100675 JFFS2_ERROR("read remainder of name: error %d\n", err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100676 jffs2_free_full_dirent(fd);
677 return -EIO;
678 }
679 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000680
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100681 fd->nhash = full_name_hash(fd->name, rd->nsize);
682 fd->next = NULL;
683 fd->name[rd->nsize] = '\0';
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000684
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100685 /*
686 * Wheee. We now have a complete jffs2_full_dirent structure, with
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000687 * the name in it and everything. Link it into the list
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100688 */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100689 jffs2_add_fd_to_list(c, fd, &rii->fds);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100690
691 return 0;
692}
693
694/*
695 * Helper function for jffs2_get_inode_nodes().
696 * It is called every time an inode node is found.
697 *
David Woodhousedf8e96f2007-04-25 03:23:42 +0100698 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100699 * 1 if the node should be marked obsolete;
700 * negative error code on failure.
701 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100702static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100703 struct jffs2_raw_inode *rd, int rdlen,
704 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100705{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100706 struct jffs2_tmp_dnode_info *tn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100707 uint32_t len, csize;
708 int ret = 1;
David Woodhouse1046d882006-06-18 22:44:21 +0100709 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000710
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100711 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
712 BUG_ON(ref_obsolete(ref));
713
David Woodhouse1046d882006-06-18 22:44:21 +0100714 crc = crc32(0, rd, sizeof(*rd) - 8);
715 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
716 JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n",
717 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100718 jffs2_mark_node_obsolete(c, ref);
719 return 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100720 }
721
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100722 tn = jffs2_alloc_tmp_dnode_info();
723 if (!tn) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400724 JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100725 return -ENOMEM;
726 }
727
728 tn->partial_crc = 0;
729 csize = je32_to_cpu(rd->csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000730
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100731 /* If we've never checked the CRCs on this node, check them now */
732 if (ref_flags(ref) == REF_UNCHECKED) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000733
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100734 /* Sanity checks */
735 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
736 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100737 JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
Andrew Lunn737b7662005-07-30 16:29:30 +0100738 jffs2_dbg_dump_node(c, ref_offset(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100739 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100740 }
741
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100742 if (jffs2_is_writebuffered(c) && csize != 0) {
743 /* At this point we are supposed to check the data CRC
744 * of our unchecked node. But thus far, we do not
745 * know whether the node is valid or obsolete. To
746 * figure this out, we need to walk all the nodes of
747 * the inode and build the inode fragtree. We don't
748 * want to spend time checking data of nodes which may
749 * later be found to be obsolete. So we put off the full
750 * data CRC checking until we have read all the inode
751 * nodes and have started building the fragtree.
752 *
753 * The fragtree is being built starting with nodes
754 * having the highest version number, so we'll be able
755 * to detect whether a node is valid (i.e., it is not
756 * overlapped by a node with higher version) or not.
757 * And we'll be able to check only those nodes, which
758 * are not obsolete.
759 *
760 * Of course, this optimization only makes sense in case
761 * of NAND flashes (or other flashes whith
762 * !jffs2_can_mark_obsolete()), since on NOR flashes
763 * nodes are marked obsolete physically.
764 *
765 * Since NAND flashes (or other flashes with
766 * jffs2_is_writebuffered(c)) are anyway read by
767 * fractions of c->wbuf_pagesize, and we have just read
768 * the node header, it is likely that the starting part
769 * of the node data is also read when we read the
770 * header. So we don't mind to check the CRC of the
771 * starting part of the data of the node now, and check
772 * the second part later (in jffs2_check_node_data()).
773 * Of course, we will not need to re-read and re-check
774 * the NAND page which we have just read. This is why we
775 * read the whole NAND page at jffs2_get_inode_nodes(),
776 * while we needed only the node header.
777 */
778 unsigned char *buf;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100779
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100780 /* 'buf' will point to the start of data */
781 buf = (unsigned char *)rd + sizeof(*rd);
782 /* len will be the read data length */
783 len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100784 tn->partial_crc = crc32(0, buf, len);
785
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100786 dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100787
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100788 /* If we actually calculated the whole data CRC
789 * and it is wrong, drop the node. */
Artem B. Bityutskiy3c091332005-08-04 12:40:02 +0100790 if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100791 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
792 ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100793 goto free_out;
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100794 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100795
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100796 } else if (csize == 0) {
797 /*
798 * We checked the header CRC. If the node has no data, adjust
799 * the space accounting now. For other nodes this will be done
800 * later either when the node is marked obsolete or when its
801 * data is checked.
802 */
803 struct jffs2_eraseblock *jeb;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100804
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100805 dbg_readinode("the node has no data.\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100806 jeb = &c->blocks[ref->flash_offset / c->sector_size];
807 len = ref_totlen(c, jeb, ref);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100808
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100809 spin_lock(&c->erase_completion_lock);
810 jeb->used_size += len;
811 jeb->unchecked_size -= len;
812 c->used_size += len;
813 c->unchecked_size -= len;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100814 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100815 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100816 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100817 }
818
819 tn->fn = jffs2_alloc_full_dnode();
820 if (!tn->fn) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100821 JFFS2_ERROR("alloc fn failed\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100822 ret = -ENOMEM;
823 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100824 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000825
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100826 tn->version = je32_to_cpu(rd->version);
827 tn->fn->ofs = je32_to_cpu(rd->offset);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100828 tn->data_crc = je32_to_cpu(rd->data_crc);
829 tn->csize = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100830 tn->fn->raw = ref;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100831 tn->overlapped = 0;
832
833 if (tn->version > rii->highest_version)
834 rii->highest_version = tn->version;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000835
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100836 /* There was a bug where we wrote hole nodes out with
837 csize/dsize swapped. Deal with it */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100838 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
839 tn->fn->size = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100840 else // normal case...
841 tn->fn->size = je32_to_cpu(rd->dsize);
842
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100843 dbg_readinode("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100844 ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000845
David Woodhousedf8e96f2007-04-25 03:23:42 +0100846 ret = jffs2_add_tn_to_tree(c, rii, tn);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100847
David Woodhousedf8e96f2007-04-25 03:23:42 +0100848 if (ret) {
849 jffs2_free_full_dnode(tn->fn);
850 free_out:
851 jffs2_free_tmp_dnode_info(tn);
852 return ret;
853 }
854#ifdef JFFS2_DBG_READINODE_MESSAGES
855 dbg_readinode("After adding ver %d:\n", tn->version);
856 tn = tn_first(&rii->tn_root);
857 while (tn) {
858 dbg_readinode("%p: v %d r 0x%x-0x%x ov %d\n",
859 tn, tn->version, tn->fn->ofs,
860 tn->fn->ofs+tn->fn->size, tn->overlapped);
861 tn = tn_next(tn);
862 }
863#endif
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100864 return 0;
865}
866
867/*
868 * Helper function for jffs2_get_inode_nodes().
869 * It is called every time an unknown node is found.
870 *
David Woodhouse3877f0b2006-06-18 00:05:26 +0100871 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100872 * 1 if the node should be marked obsolete;
873 * negative error code on failure.
874 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100875static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100876{
877 /* We don't mark unknown nodes as REF_UNCHECKED */
David Woodhousec7258a42007-03-09 11:44:00 +0000878 if (ref_flags(ref) == REF_UNCHECKED) {
879 JFFS2_ERROR("REF_UNCHECKED but unknown node at %#08x\n",
880 ref_offset(ref));
881 JFFS2_ERROR("Node is {%04x,%04x,%08x,%08x}. Please report this error.\n",
882 je16_to_cpu(un->magic), je16_to_cpu(un->nodetype),
883 je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100884 jffs2_mark_node_obsolete(c, ref);
885 return 0;
David Woodhousec7258a42007-03-09 11:44:00 +0000886 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000887
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100888 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
889
David Woodhouse3877f0b2006-06-18 00:05:26 +0100890 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
891
892 case JFFS2_FEATURE_INCOMPAT:
893 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
894 je16_to_cpu(un->nodetype), ref_offset(ref));
895 /* EEP */
896 BUG();
897 break;
898
899 case JFFS2_FEATURE_ROCOMPAT:
900 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
901 je16_to_cpu(un->nodetype), ref_offset(ref));
902 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
903 break;
904
905 case JFFS2_FEATURE_RWCOMPAT_COPY:
906 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
907 je16_to_cpu(un->nodetype), ref_offset(ref));
908 break;
909
910 case JFFS2_FEATURE_RWCOMPAT_DELETE:
911 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
912 je16_to_cpu(un->nodetype), ref_offset(ref));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100913 jffs2_mark_node_obsolete(c, ref);
914 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100915 }
916
917 return 0;
918}
919
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100920/*
921 * Helper function for jffs2_get_inode_nodes().
922 * The function detects whether more data should be read and reads it if yes.
923 *
924 * Returns: 0 on succes;
925 * negative error code on failure.
926 */
927static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300928 int needed_len, int *rdlen, unsigned char *buf)
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100929{
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300930 int err, to_read = needed_len - *rdlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100931 size_t retlen;
932 uint32_t offs;
933
934 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300935 int rem = to_read % c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100936
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300937 if (rem)
938 to_read += c->wbuf_pagesize - rem;
939 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100940
941 /* We need to read more data */
942 offs = ref_offset(ref) + *rdlen;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000943
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300944 dbg_readinode("read more %d bytes\n", to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100945
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300946 err = jffs2_flash_read(c, offs, to_read, &retlen, buf + *rdlen);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100947 if (err) {
948 JFFS2_ERROR("can not read %d bytes from 0x%08x, "
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300949 "error code: %d.\n", to_read, offs, err);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100950 return err;
951 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000952
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300953 if (retlen < to_read) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400954 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n",
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300955 offs, retlen, to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100956 return -EIO;
957 }
958
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300959 *rdlen += to_read;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100960 return 0;
961}
962
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100963/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
David Woodhousedf8e96f2007-04-25 03:23:42 +0100964 with this ino. Perform a preliminary ordering on data nodes, throwing away
965 those which are completely obsoleted by newer ones. The naïve approach we
966 use to take of just returning them _all_ in version order will cause us to
967 run out of memory in certain degenerate cases. */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100968static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100969 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100970{
971 struct jffs2_raw_node_ref *ref, *valid_ref;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100972 unsigned char *buf = NULL;
973 union jffs2_node_union *node;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100974 size_t retlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100975 int len, err;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100976
David Woodhousedf8e96f2007-04-25 03:23:42 +0100977 rii->mctime_ver = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000978
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100979 dbg_readinode("ino #%u\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100980
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100981 /* FIXME: in case of NOR and available ->point() this
982 * needs to be fixed. */
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300983 len = sizeof(union jffs2_node_union) + c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100984 buf = kmalloc(len, GFP_KERNEL);
985 if (!buf)
986 return -ENOMEM;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000987
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100988 spin_lock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100989 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100990 if (!valid_ref && f->inocache->ino != 1)
991 JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100992 while (valid_ref) {
993 /* We can hold a pointer to a non-obsolete node without the spinlock,
994 but _obsolete_ nodes may disappear at any time, if the block
995 they're in gets erased. So if we mark 'ref' obsolete while we're
996 not holding the lock, it can go away immediately. For that reason,
997 we find the next valid node first, before processing 'ref'.
998 */
999 ref = valid_ref;
1000 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
1001 spin_unlock(&c->erase_completion_lock);
1002
1003 cond_resched();
1004
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001005 /*
1006 * At this point we don't know the type of the node we're going
1007 * to read, so we do not know the size of its header. In order
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001008 * to minimize the amount of flash IO we assume the header is
1009 * of size = JFFS2_MIN_NODE_HEADER.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001010 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001011 len = JFFS2_MIN_NODE_HEADER;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001012 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001013 int end, rem;
1014
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001015 /*
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001016 * We are about to read JFFS2_MIN_NODE_HEADER bytes,
1017 * but this flash has some minimal I/O unit. It is
1018 * possible that we'll need to read more soon, so read
1019 * up to the next min. I/O unit, in order not to
1020 * re-read the same min. I/O unit twice.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001021 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001022 end = ref_offset(ref) + len;
1023 rem = end % c->wbuf_pagesize;
1024 if (rem)
1025 end += c->wbuf_pagesize - rem;
1026 len = end - ref_offset(ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001027 }
1028
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001029 dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001030
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001031 /* FIXME: point() */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001032 err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001033 if (err) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001034 JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001035 goto free_out;
1036 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001037
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001038 if (retlen < len) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -04001039 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001040 err = -EIO;
1041 goto free_out;
1042 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001043
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001044 node = (union jffs2_node_union *)buf;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001045
David Woodhouse3877f0b2006-06-18 00:05:26 +01001046 /* No need to mask in the valid bit; it shouldn't be invalid */
1047 if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) {
1048 JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n",
1049 ref_offset(ref), je16_to_cpu(node->u.magic),
1050 je16_to_cpu(node->u.nodetype),
1051 je32_to_cpu(node->u.totlen),
1052 je32_to_cpu(node->u.hdr_crc));
1053 jffs2_dbg_dump_node(c, ref_offset(ref));
1054 jffs2_mark_node_obsolete(c, ref);
1055 goto cont;
1056 }
David Woodhousec7258a42007-03-09 11:44:00 +00001057 /* Due to poor choice of crc32 seed, an all-zero node will have a correct CRC */
1058 if (!je32_to_cpu(node->u.hdr_crc) && !je16_to_cpu(node->u.nodetype) &&
1059 !je16_to_cpu(node->u.magic) && !je32_to_cpu(node->u.totlen)) {
1060 JFFS2_NOTICE("All zero node header at %#08x.\n", ref_offset(ref));
1061 jffs2_mark_node_obsolete(c, ref);
1062 goto cont;
1063 }
David Woodhouse3877f0b2006-06-18 00:05:26 +01001064
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001065 switch (je16_to_cpu(node->u.nodetype)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001066
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001067 case JFFS2_NODETYPE_DIRENT:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001068
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001069 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001070 err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001071 if (unlikely(err))
1072 goto free_out;
1073 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001074
David Woodhousedf8e96f2007-04-25 03:23:42 +01001075 err = read_direntry(c, ref, &node->d, retlen, rii);
1076 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001077 goto free_out;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001078
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001079 break;
1080
1081 case JFFS2_NODETYPE_INODE:
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001082
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001083 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001084 err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001085 if (unlikely(err))
1086 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001087 }
1088
David Woodhousedf8e96f2007-04-25 03:23:42 +01001089 err = read_dnode(c, ref, &node->i, len, rii);
1090 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001091 goto free_out;
1092
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 break;
1094
1095 default:
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001096 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001097 err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001098 if (unlikely(err))
1099 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001100 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001101
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001102 err = read_unknown(c, ref, &node->u);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001103 if (err == 1) {
1104 jffs2_mark_node_obsolete(c, ref);
1105 break;
1106 } else if (unlikely(err))
1107 goto free_out;
1108
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 }
David Woodhouse3877f0b2006-06-18 00:05:26 +01001110 cont:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001111 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001113
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001114 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001115 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
David Woodhousedf8e96f2007-04-25 03:23:42 +01001117 f->highest_version = rii->highest_version;
1118
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001119 dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +01001120 f->inocache->ino, rii->highest_version, rii->latest_mctime,
1121 rii->mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001122 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001124 free_out:
David Woodhousedf8e96f2007-04-25 03:23:42 +01001125 jffs2_free_tmp_dnode_info_list(&rii->tn_root);
1126 jffs2_free_full_dirent_list(rii->fds);
1127 rii->fds = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001128 kfree(buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001129 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130}
1131
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001132static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 struct jffs2_inode_info *f,
1134 struct jffs2_raw_inode *latest_node)
1135{
David Woodhousedf8e96f2007-04-25 03:23:42 +01001136 struct jffs2_readinode_info rii;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 uint32_t crc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 size_t retlen;
1139 int ret;
1140
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001141 dbg_readinode("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142
David Woodhousedf8e96f2007-04-25 03:23:42 +01001143 memset(&rii, 0, sizeof(rii));
1144
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 /* Grab all nodes relevant to this ino */
David Woodhousedf8e96f2007-04-25 03:23:42 +01001146 ret = jffs2_get_inode_nodes(c, f, &rii);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147
1148 if (ret) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001149 JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 if (f->inocache->state == INO_STATE_READING)
1151 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1152 return ret;
1153 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
David Woodhousedf8e96f2007-04-25 03:23:42 +01001155 ret = jffs2_build_inode_fragtree(c, f, &rii);
1156 if (ret) {
1157 JFFS2_ERROR("Failed to build final fragtree for inode #%u: error %d\n",
1158 f->inocache->ino, ret);
1159 if (f->inocache->state == INO_STATE_READING)
1160 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1161 jffs2_free_tmp_dnode_info_list(&rii.tn_root);
1162 /* FIXME: We could at least crc-check them all */
1163 if (rii.mdata_tn) {
1164 jffs2_free_full_dnode(rii.mdata_tn->fn);
1165 jffs2_free_tmp_dnode_info(rii.mdata_tn);
1166 rii.mdata_tn = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001167 }
David Woodhousedf8e96f2007-04-25 03:23:42 +01001168 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 }
David Woodhousedf8e96f2007-04-25 03:23:42 +01001170
1171 if (rii.mdata_tn) {
1172 if (rii.mdata_tn->fn->raw == rii.latest_ref) {
1173 f->metadata = rii.mdata_tn->fn;
1174 jffs2_free_tmp_dnode_info(rii.mdata_tn);
1175 } else {
1176 jffs2_kill_tn(c, rii.mdata_tn);
1177 }
1178 rii.mdata_tn = NULL;
1179 }
1180
1181 f->dents = rii.fds;
1182
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +01001183 jffs2_dbg_fragtree_paranoia_check_nolock(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
David Woodhousedf8e96f2007-04-25 03:23:42 +01001185 if (unlikely(!rii.latest_ref)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 /* No data nodes for this inode. */
1187 if (f->inocache->ino != 1) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001188 JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
David Woodhousedf8e96f2007-04-25 03:23:42 +01001189 if (!rii.fds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 if (f->inocache->state == INO_STATE_READING)
1191 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1192 return -EIO;
1193 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001194 JFFS2_NOTICE("but it has children so we fake some modes for it\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 }
1196 latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
1197 latest_node->version = cpu_to_je32(0);
1198 latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
1199 latest_node->isize = cpu_to_je32(0);
1200 latest_node->gid = cpu_to_je16(0);
1201 latest_node->uid = cpu_to_je16(0);
1202 if (f->inocache->state == INO_STATE_READING)
1203 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
1204 return 0;
1205 }
1206
David Woodhousedf8e96f2007-04-25 03:23:42 +01001207 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref), sizeof(*latest_node), &retlen, (void *)latest_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 if (ret || retlen != sizeof(*latest_node)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001209 JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
1210 ret, retlen, sizeof(*latest_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 /* FIXME: If this fails, there seems to be a memory leak. Find it. */
1212 up(&f->sem);
1213 jffs2_do_clear_inode(c, f);
1214 return ret?ret:-EIO;
1215 }
1216
1217 crc = crc32(0, latest_node, sizeof(*latest_node)-8);
1218 if (crc != je32_to_cpu(latest_node->node_crc)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001219 JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +01001220 f->inocache->ino, ref_offset(rii.latest_ref));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 up(&f->sem);
1222 jffs2_do_clear_inode(c, f);
1223 return -EIO;
1224 }
1225
1226 switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
1227 case S_IFDIR:
David Woodhousedf8e96f2007-04-25 03:23:42 +01001228 if (rii.mctime_ver > je32_to_cpu(latest_node->version)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 /* The times in the latest_node are actually older than
1230 mctime in the latest dirent. Cheat. */
David Woodhousedf8e96f2007-04-25 03:23:42 +01001231 latest_node->ctime = latest_node->mtime = cpu_to_je32(rii.latest_mctime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 }
1233 break;
1234
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001235
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 case S_IFREG:
1237 /* If it was a regular file, truncate it to the latest node's isize */
Artem B. Bityutskiyf302cd02005-07-24 16:29:59 +01001238 jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 break;
1240
1241 case S_IFLNK:
1242 /* Hack to work around broken isize in old symlink code.
1243 Remove this when dwmw2 comes to his senses and stops
1244 symlinks from being an entirely gratuitous special
1245 case. */
1246 if (!je32_to_cpu(latest_node->isize))
1247 latest_node->isize = latest_node->dsize;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001248
1249 if (f->inocache->state != INO_STATE_CHECKING) {
1250 /* Symlink's inode data is the target path. Read it and
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001251 * keep in RAM to facilitate quick follow symlink
1252 * operation. */
1253 f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
1254 if (!f->target) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001255 JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001256 up(&f->sem);
1257 jffs2_do_clear_inode(c, f);
1258 return -ENOMEM;
1259 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001260
David Woodhousedf8e96f2007-04-25 03:23:42 +01001261 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node),
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001262 je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001263
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001264 if (ret || retlen != je32_to_cpu(latest_node->csize)) {
1265 if (retlen != je32_to_cpu(latest_node->csize))
1266 ret = -EIO;
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001267 kfree(f->target);
1268 f->target = NULL;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001269 up(&f->sem);
1270 jffs2_do_clear_inode(c, f);
1271 return -ret;
1272 }
1273
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001274 f->target[je32_to_cpu(latest_node->csize)] = '\0';
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001275 dbg_readinode("symlink's target '%s' cached\n", f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001276 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001277
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 /* fall through... */
1279
1280 case S_IFBLK:
1281 case S_IFCHR:
1282 /* Certain inode types should have only one data node, and it's
1283 kept as the metadata node */
1284 if (f->metadata) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001285 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 f->inocache->ino, jemode_to_cpu(latest_node->mode));
1287 up(&f->sem);
1288 jffs2_do_clear_inode(c, f);
1289 return -EIO;
1290 }
1291 if (!frag_first(&f->fragtree)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001292 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 f->inocache->ino, jemode_to_cpu(latest_node->mode));
1294 up(&f->sem);
1295 jffs2_do_clear_inode(c, f);
1296 return -EIO;
1297 }
1298 /* ASSERT: f->fraglist != NULL */
1299 if (frag_next(frag_first(&f->fragtree))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001300 JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 f->inocache->ino, jemode_to_cpu(latest_node->mode));
1302 /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
1303 up(&f->sem);
1304 jffs2_do_clear_inode(c, f);
1305 return -EIO;
1306 }
1307 /* OK. We're happy */
1308 f->metadata = frag_first(&f->fragtree)->node;
1309 jffs2_free_node_frag(frag_first(&f->fragtree));
1310 f->fragtree = RB_ROOT;
1311 break;
1312 }
1313 if (f->inocache->state == INO_STATE_READING)
1314 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
1315
1316 return 0;
1317}
1318
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001319/* Scan the list of all nodes present for this ino, build map of versions, etc. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001320int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001321 uint32_t ino, struct jffs2_raw_inode *latest_node)
1322{
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001323 dbg_readinode("read inode #%u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001324
1325 retry_inocache:
1326 spin_lock(&c->inocache_lock);
1327 f->inocache = jffs2_get_ino_cache(c, ino);
1328
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001329 if (f->inocache) {
1330 /* Check its state. We may need to wait before we can use it */
1331 switch(f->inocache->state) {
1332 case INO_STATE_UNCHECKED:
1333 case INO_STATE_CHECKEDABSENT:
1334 f->inocache->state = INO_STATE_READING;
1335 break;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001336
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001337 case INO_STATE_CHECKING:
1338 case INO_STATE_GC:
1339 /* If it's in either of these states, we need
1340 to wait for whoever's got it to finish and
1341 put it back. */
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001342 dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001343 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
1344 goto retry_inocache;
1345
1346 case INO_STATE_READING:
1347 case INO_STATE_PRESENT:
1348 /* Eep. This should never happen. It can
1349 happen if Linux calls read_inode() again
1350 before clear_inode() has finished though. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001351 JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001352 /* Fail. That's probably better than allowing it to succeed */
1353 f->inocache = NULL;
1354 break;
1355
1356 default:
1357 BUG();
1358 }
1359 }
1360 spin_unlock(&c->inocache_lock);
1361
1362 if (!f->inocache && ino == 1) {
1363 /* Special case - no root inode on medium */
1364 f->inocache = jffs2_alloc_inode_cache();
1365 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001366 JFFS2_ERROR("cannot allocate inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001367 return -ENOMEM;
1368 }
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001369 dbg_readinode("creating inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001370 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
1371 f->inocache->ino = f->inocache->nlink = 1;
1372 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
1373 f->inocache->state = INO_STATE_READING;
1374 jffs2_add_ino_cache(c, f->inocache);
1375 }
1376 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001377 JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001378 return -ENOENT;
1379 }
1380
1381 return jffs2_do_read_inode_internal(c, f, latest_node);
1382}
1383
1384int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
1385{
1386 struct jffs2_raw_inode n;
Yan Burman3d375d92006-12-04 15:03:01 -08001387 struct jffs2_inode_info *f = kzalloc(sizeof(*f), GFP_KERNEL);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001388 int ret;
1389
1390 if (!f)
1391 return -ENOMEM;
1392
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001393 init_MUTEX_LOCKED(&f->sem);
1394 f->inocache = ic;
1395
1396 ret = jffs2_do_read_inode_internal(c, f, &n);
1397 if (!ret) {
1398 up(&f->sem);
1399 jffs2_do_clear_inode(c, f);
1400 }
1401 kfree (f);
1402 return ret;
1403}
1404
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
1406{
1407 struct jffs2_full_dirent *fd, *fds;
1408 int deleted;
1409
KaiGai Koheic7afb0f2006-07-02 15:13:46 +01001410 jffs2_clear_acl(f);
KaiGai Kohei355ed4e2006-06-24 09:15:36 +09001411 jffs2_xattr_delete_inode(c, f->inocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 down(&f->sem);
1413 deleted = f->inocache && !f->inocache->nlink;
1414
David Woodhouse67e345d2005-02-27 23:01:36 +00001415 if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
1416 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
1417
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 if (f->metadata) {
1419 if (deleted)
1420 jffs2_mark_node_obsolete(c, f->metadata->raw);
1421 jffs2_free_full_dnode(f->metadata);
1422 }
1423
1424 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
1425
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001426 if (f->target) {
1427 kfree(f->target);
1428 f->target = NULL;
1429 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001430
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001431 fds = f->dents;
1432 while(fds) {
1433 fd = fds;
1434 fds = fd->next;
1435 jffs2_free_full_dirent(fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 }
1437
David Woodhouse67e345d2005-02-27 23:01:36 +00001438 if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
David Woodhouse67e345d2005-02-27 23:01:36 +00001440 if (f->inocache->nodes == (void *)f->inocache)
1441 jffs2_del_ino_cache(c, f->inocache);
1442 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
1444 up(&f->sem);
1445}