blob: 4884d5edfe658282d626c6674497231a9ff4d20a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
David Woodhousec00c3102007-04-25 14:16:47 +01004 * Copyright © 2001-2007 Red Hat, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11
12#include <linux/kernel.h>
Andrew Lunn737b7662005-07-30 16:29:30 +010013#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/slab.h>
15#include <linux/fs.h>
16#include <linux/crc32.h>
17#include <linux/pagemap.h>
18#include <linux/mtd/mtd.h>
19#include <linux/compiler.h>
20#include "nodelist.h"
21
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010022/*
David Woodhousedf8e96f2007-04-25 03:23:42 +010023 * Check the data CRC of the node.
24 *
25 * Returns: 0 if the data CRC is correct;
26 * 1 - if incorrect;
27 * error code if an error occured.
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010028 */
David Woodhousedf8e96f2007-04-25 03:23:42 +010029static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
Linus Torvalds1da177e2005-04-16 15:20:36 -070030{
David Woodhousedf8e96f2007-04-25 03:23:42 +010031 struct jffs2_raw_node_ref *ref = tn->fn->raw;
32 int err = 0, pointed = 0;
33 struct jffs2_eraseblock *jeb;
34 unsigned char *buffer;
35 uint32_t crc, ofs, len;
36 size_t retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
David Woodhousedf8e96f2007-04-25 03:23:42 +010038 BUG_ON(tn->csize == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
David Woodhousedf8e96f2007-04-25 03:23:42 +010040 if (!jffs2_is_writebuffered(c))
41 goto adj_acc;
42
43 /* Calculate how many bytes were already checked */
44 ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode);
45 len = ofs % c->wbuf_pagesize;
46 if (likely(len))
47 len = c->wbuf_pagesize - len;
48
49 if (len >= tn->csize) {
50 dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
51 ref_offset(ref), tn->csize, ofs);
52 goto adj_acc;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010053 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
David Woodhousedf8e96f2007-04-25 03:23:42 +010055 ofs += len;
56 len = tn->csize - len;
57
58 dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n",
59 ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len);
60
61#ifndef __ECOS
62 /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(),
63 * adding and jffs2_flash_read_end() interface. */
64 if (c->mtd->point) {
65 err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
66 if (!err && retlen < tn->csize) {
67 JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
68 c->mtd->unpoint(c->mtd, buffer, ofs, len);
69 } else if (err)
70 JFFS2_WARNING("MTD point failed: error code %d.\n", err);
71 else
72 pointed = 1; /* succefully pointed to device */
73 }
74#endif
75
76 if (!pointed) {
77 buffer = kmalloc(len, GFP_KERNEL);
78 if (unlikely(!buffer))
79 return -ENOMEM;
80
81 /* TODO: this is very frequent pattern, make it a separate
82 * routine */
83 err = jffs2_flash_read(c, ofs, len, &retlen, buffer);
84 if (err) {
85 JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err);
86 goto free_out;
87 }
88
89 if (retlen != len) {
90 JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len);
91 err = -EIO;
92 goto free_out;
93 }
94 }
95
96 /* Continue calculating CRC */
97 crc = crc32(tn->partial_crc, buffer, len);
98 if(!pointed)
99 kfree(buffer);
100#ifndef __ECOS
101 else
102 c->mtd->unpoint(c->mtd, buffer, ofs, len);
103#endif
104
105 if (crc != tn->data_crc) {
106 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
107 ofs, tn->data_crc, crc);
108 return 1;
109 }
110
111adj_acc:
112 jeb = &c->blocks[ref->flash_offset / c->sector_size];
113 len = ref_totlen(c, jeb, ref);
114 /* If it should be REF_NORMAL, it'll get marked as such when
115 we build the fragtree, shortly. No need to worry about GC
116 moving it while it's marked REF_PRISTINE -- GC won't happen
117 till we've finished checking every inode anyway. */
118 ref->flash_offset |= REF_PRISTINE;
119 /*
120 * Mark the node as having been checked and fix the
121 * accounting accordingly.
122 */
123 spin_lock(&c->erase_completion_lock);
124 jeb->used_size += len;
125 jeb->unchecked_size -= len;
126 c->used_size += len;
127 c->unchecked_size -= len;
128 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
129 spin_unlock(&c->erase_completion_lock);
130
131 return 0;
132
133free_out:
134 if(!pointed)
135 kfree(buffer);
136#ifndef __ECOS
137 else
138 c->mtd->unpoint(c->mtd, buffer, ofs, len);
139#endif
140 return err;
141}
142
143/*
144 * Helper function for jffs2_add_older_frag_to_fragtree().
145 *
146 * Checks the node if we are in the checking stage.
147 */
148static int check_tn_node(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
149{
150 int ret;
151
152 BUG_ON(ref_obsolete(tn->fn->raw));
153
154 /* We only check the data CRC of unchecked nodes */
155 if (ref_flags(tn->fn->raw) != REF_UNCHECKED)
156 return 0;
157
158 dbg_readinode("check node %#04x-%#04x, phys offs %#08x\n",
159 tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw));
160
161 ret = check_node_data(c, tn);
162 if (unlikely(ret < 0)) {
163 JFFS2_ERROR("check_node_data() returned error: %d.\n",
164 ret);
165 } else if (unlikely(ret > 0)) {
166 dbg_readinode("CRC error, mark it obsolete.\n");
167 jffs2_mark_node_obsolete(c, tn->fn->raw);
168 }
169
170 return ret;
171}
172
173static struct jffs2_tmp_dnode_info *jffs2_lookup_tn(struct rb_root *tn_root, uint32_t offset)
174{
175 struct rb_node *next;
176 struct jffs2_tmp_dnode_info *tn = NULL;
177
178 dbg_readinode("root %p, offset %d\n", tn_root, offset);
179
180 next = tn_root->rb_node;
181
182 while (next) {
183 tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb);
184
185 if (tn->fn->ofs < offset)
186 next = tn->rb.rb_right;
187 else if (tn->fn->ofs >= offset)
188 next = tn->rb.rb_left;
189 else
190 break;
191 }
192
193 return tn;
194}
195
196
197static void jffs2_kill_tn(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
198{
199 jffs2_mark_node_obsolete(c, tn->fn->raw);
200 jffs2_free_full_dnode(tn->fn);
201 jffs2_free_tmp_dnode_info(tn);
202}
203/*
204 * This function is used when we read an inode. Data nodes arrive in
205 * arbitrary order -- they may be older or newer than the nodes which
206 * are already in the tree. Where overlaps occur, the older node can
207 * be discarded as long as the newer passes the CRC check. We don't
208 * bother to keep track of holes in this rbtree, and neither do we deal
209 * with frags -- we can have multiple entries starting at the same
210 * offset, and the one with the smallest length will come first in the
211 * ordering.
212 *
213 * Returns 0 if the node was inserted
214 * 1 if the node is obsolete (because we can't mark it so yet)
215 * < 0 an if error occurred
216 */
217static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
218 struct jffs2_readinode_info *rii,
219 struct jffs2_tmp_dnode_info *tn)
220{
221 uint32_t fn_end = tn->fn->ofs + tn->fn->size;
David Woodhouse96dd8d22007-05-06 14:41:40 +0100222 struct jffs2_tmp_dnode_info *this;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100223
David Woodhousefcf3caf2007-05-07 13:16:13 +0100224 dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100225
226 /* If a node has zero dsize, we only have to keep if it if it might be the
227 node with highest version -- i.e. the one which will end up as f->metadata.
228 Note that such nodes won't be REF_UNCHECKED since there are no data to
229 check anyway. */
230 if (!tn->fn->size) {
231 if (rii->mdata_tn) {
232 /* We had a candidate mdata node already */
233 dbg_readinode("kill old mdata with ver %d\n", rii->mdata_tn->version);
234 jffs2_kill_tn(c, rii->mdata_tn);
235 }
236 rii->mdata_tn = tn;
237 dbg_readinode("keep new mdata with ver %d\n", tn->version);
238 return 0;
239 }
240
241 /* Find the earliest node which _may_ be relevant to this one */
242 this = jffs2_lookup_tn(&rii->tn_root, tn->fn->ofs);
David Woodhouse1c979642007-05-08 00:19:54 +0100243 if (this) {
244 /* If the node is coincident with another at a lower address,
245 back up until the other node is found. It may be relevant */
246 while (this->overlapped)
247 this = tn_prev(this);
248
249 /* First node should never be marked overlapped */
250 BUG_ON(!this);
251 dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole");
David Woodhousedf8e96f2007-04-25 03:23:42 +0100252 }
253
David Woodhousedf8e96f2007-04-25 03:23:42 +0100254 while (this) {
255 if (this->fn->ofs > fn_end)
256 break;
257 dbg_readinode("Ponder this ver %d, 0x%x-0x%x\n",
258 this->version, this->fn->ofs, this->fn->size);
259
260 if (this->version == tn->version) {
261 /* Version number collision means REF_PRISTINE GC. Accept either of them
262 as long as the CRC is correct. Check the one we have already... */
263 if (!check_tn_node(c, this)) {
264 /* The one we already had was OK. Keep it and throw away the new one */
265 dbg_readinode("Like old node. Throw away new\n");
266 jffs2_kill_tn(c, tn);
267 return 0;
268 } else {
269 /* Who cares if the new one is good; keep it for now anyway. */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100270 dbg_readinode("Like new node. Throw away old\n");
David Woodhousefcf3caf2007-05-07 13:16:13 +0100271 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root);
272 jffs2_kill_tn(c, this);
273 /* Same overlapping from in front and behind */
274 return 0;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100275 }
276 }
277 if (this->version < tn->version &&
278 this->fn->ofs >= tn->fn->ofs &&
279 this->fn->ofs + this->fn->size <= fn_end) {
280 /* New node entirely overlaps 'this' */
281 if (check_tn_node(c, tn)) {
282 dbg_readinode("new node bad CRC\n");
283 jffs2_kill_tn(c, tn);
284 return 0;
285 }
David Woodhousefcf3caf2007-05-07 13:16:13 +0100286 /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */
David Woodhouse1c979642007-05-08 00:19:54 +0100287 while (this && this->fn->ofs + this->fn->size <= fn_end) {
David Woodhousedf8e96f2007-04-25 03:23:42 +0100288 struct jffs2_tmp_dnode_info *next = tn_next(this);
289 if (this->version < tn->version) {
290 tn_erase(this, &rii->tn_root);
291 dbg_readinode("Kill overlapped ver %d, 0x%x-0x%x\n",
292 this->version, this->fn->ofs,
293 this->fn->ofs+this->fn->size);
294 jffs2_kill_tn(c, this);
295 }
296 this = next;
297 }
David Woodhousefcf3caf2007-05-07 13:16:13 +0100298 dbg_readinode("Done killing overlapped nodes\n");
David Woodhouse1c979642007-05-08 00:19:54 +0100299 continue;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100300 }
301 if (this->version > tn->version &&
302 this->fn->ofs <= tn->fn->ofs &&
303 this->fn->ofs+this->fn->size >= fn_end) {
304 /* New node entirely overlapped by 'this' */
305 if (!check_tn_node(c, this)) {
306 dbg_readinode("Good CRC on old node. Kill new\n");
307 jffs2_kill_tn(c, tn);
308 return 0;
309 }
310 /* ... but 'this' was bad. Replace it... */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100311 dbg_readinode("Bad CRC on old overlapping node. Kill it\n");
David Woodhousefcf3caf2007-05-07 13:16:13 +0100312 tn_erase(this, &rii->tn_root);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100313 jffs2_kill_tn(c, this);
David Woodhousefcf3caf2007-05-07 13:16:13 +0100314 break;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100315 }
David Woodhousedf8e96f2007-04-25 03:23:42 +0100316
317 this = tn_next(this);
318 }
David Woodhouse96dd8d22007-05-06 14:41:40 +0100319
David Woodhousedf8e96f2007-04-25 03:23:42 +0100320 /* We neither completely obsoleted nor were completely
David Woodhouse96dd8d22007-05-06 14:41:40 +0100321 obsoleted by an earlier node. Insert into the tree */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100322 {
David Woodhouse96dd8d22007-05-06 14:41:40 +0100323 struct rb_node *parent;
324 struct rb_node **link = &rii->tn_root.rb_node;
David Woodhouse1c979642007-05-08 00:19:54 +0100325 struct jffs2_tmp_dnode_info *insert_point = NULL;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100326
327 while (*link) {
328 parent = *link;
329 insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
330 if (tn->fn->ofs > insert_point->fn->ofs)
331 link = &insert_point->rb.rb_right;
332 else if (tn->fn->ofs < insert_point->fn->ofs ||
333 tn->fn->size < insert_point->fn->size)
334 link = &insert_point->rb.rb_left;
335 else
336 link = &insert_point->rb.rb_right;
337 }
338 rb_link_node(&tn->rb, &insert_point->rb, link);
339 rb_insert_color(&tn->rb, &rii->tn_root);
340 }
David Woodhouse1123e2a2007-05-05 16:29:34 +0100341
David Woodhousedf8e96f2007-04-25 03:23:42 +0100342 /* If there's anything behind that overlaps us, note it */
343 this = tn_prev(tn);
344 if (this) {
345 while (1) {
346 if (this->fn->ofs + this->fn->size > tn->fn->ofs) {
347 dbg_readinode("Node is overlapped by %p (v %d, 0x%x-0x%x)\n",
348 this, this->version, this->fn->ofs,
349 this->fn->ofs+this->fn->size);
350 tn->overlapped = 1;
351 break;
352 }
353 if (!this->overlapped)
354 break;
355 this = tn_prev(this);
356 }
357 }
358
359 /* If the new node overlaps anything ahead, note it */
360 this = tn_next(tn);
361 while (this && this->fn->ofs < fn_end) {
362 this->overlapped = 1;
363 dbg_readinode("Node ver %d, 0x%x-0x%x is overlapped\n",
364 this->version, this->fn->ofs,
365 this->fn->ofs+this->fn->size);
366 this = tn_next(this);
367 }
368 return 0;
369}
370
371/* Trivial function to remove the last node in the tree. Which by definition
372 has no right-hand -- so can be removed just by making its only child (if
373 any) take its place under its parent. */
374static void eat_last(struct rb_root *root, struct rb_node *node)
375{
376 struct rb_node *parent = rb_parent(node);
377 struct rb_node **link;
378
379 /* LAST! */
380 BUG_ON(node->rb_right);
381
382 if (!parent)
383 link = &root->rb_node;
384 else if (node == parent->rb_left)
385 link = &parent->rb_left;
386 else
387 link = &parent->rb_right;
388
389 *link = node->rb_left;
390 /* Colour doesn't matter now. Only the parent pointer. */
391 if (node->rb_left)
392 node->rb_left->rb_parent_color = node->rb_parent_color;
393}
394
395/* We put this in reverse order, so we can just use eat_last */
396static void ver_insert(struct rb_root *ver_root, struct jffs2_tmp_dnode_info *tn)
397{
398 struct rb_node **link = &ver_root->rb_node;
399 struct rb_node *parent = NULL;
400 struct jffs2_tmp_dnode_info *this_tn;
401
402 while (*link) {
403 parent = *link;
404 this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
405
406 if (tn->version > this_tn->version)
407 link = &parent->rb_left;
408 else
409 link = &parent->rb_right;
410 }
411 dbg_readinode("Link new node at %p (root is %p)\n", link, ver_root);
412 rb_link_node(&tn->rb, parent, link);
413 rb_insert_color(&tn->rb, ver_root);
414}
415
416/* Build final, normal fragtree from tn tree. It doesn't matter which order
417 we add nodes to the real fragtree, as long as they don't overlap. And
418 having thrown away the majority of overlapped nodes as we went, there
419 really shouldn't be many sets of nodes which do overlap. If we start at
420 the end, we can use the overlap markers -- we can just eat nodes which
421 aren't overlapped, and when we encounter nodes which _do_ overlap we
422 sort them all into a temporary tree in version order before replaying them. */
423static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c,
424 struct jffs2_inode_info *f,
425 struct jffs2_readinode_info *rii)
426{
427 struct jffs2_tmp_dnode_info *pen, *last, *this;
428 struct rb_root ver_root = RB_ROOT;
429 uint32_t high_ver = 0;
430
431 if (rii->mdata_tn) {
432 dbg_readinode("potential mdata is ver %d at %p\n", rii->mdata_tn->version, rii->mdata_tn);
433 high_ver = rii->mdata_tn->version;
434 rii->latest_ref = rii->mdata_tn->fn->raw;
435 }
436#ifdef JFFS2_DBG_READINODE_MESSAGES
437 this = tn_last(&rii->tn_root);
438 while (this) {
439 dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d\n", this, this->version, this->fn->ofs,
David Woodhouse96dd8d22007-05-06 14:41:40 +0100440 this->fn->ofs+this->fn->size, this->overlapped);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100441 this = tn_prev(this);
442 }
443#endif
444 pen = tn_last(&rii->tn_root);
445 while ((last = pen)) {
446 pen = tn_prev(last);
447
448 eat_last(&rii->tn_root, &last->rb);
449 ver_insert(&ver_root, last);
450
451 if (unlikely(last->overlapped))
452 continue;
453
454 /* Now we have a bunch of nodes in reverse version
455 order, in the tree at ver_root. Most of the time,
456 there'll actually be only one node in the 'tree',
457 in fact. */
458 this = tn_last(&ver_root);
459
460 while (this) {
461 struct jffs2_tmp_dnode_info *vers_next;
462 int ret;
463 vers_next = tn_prev(this);
464 eat_last(&ver_root, &this->rb);
465 if (check_tn_node(c, this)) {
David Woodhouse1123e2a2007-05-05 16:29:34 +0100466 dbg_readinode("node ver %d, 0x%x-0x%x failed CRC\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +0100467 this->version, this->fn->ofs,
468 this->fn->ofs+this->fn->size);
469 jffs2_kill_tn(c, this);
470 } else {
471 if (this->version > high_ver) {
472 /* Note that this is different from the other
473 highest_version, because this one is only
474 counting _valid_ nodes which could give the
475 latest inode metadata */
476 high_ver = this->version;
477 rii->latest_ref = this->fn->raw;
478 }
David Woodhouse1123e2a2007-05-05 16:29:34 +0100479 dbg_readinode("Add %p (v %d, 0x%x-0x%x, ov %d) to fragtree\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +0100480 this, this->version, this->fn->ofs,
481 this->fn->ofs+this->fn->size, this->overlapped);
482
483 ret = jffs2_add_full_dnode_to_inode(c, f, this->fn);
484 if (ret) {
485 /* Free the nodes in vers_root; let the caller
486 deal with the rest */
487 JFFS2_ERROR("Add node to tree failed %d\n", ret);
488 while (1) {
489 vers_next = tn_prev(this);
490 if (check_tn_node(c, this))
491 jffs2_mark_node_obsolete(c, this->fn->raw);
492 jffs2_free_full_dnode(this->fn);
493 jffs2_free_tmp_dnode_info(this);
494 this = vers_next;
495 if (!this)
496 break;
497 eat_last(&ver_root, &vers_next->rb);
498 }
499 return ret;
500 }
501 jffs2_free_tmp_dnode_info(this);
502 }
503 this = vers_next;
504 }
505 }
506 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100507}
508
509static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
510{
511 struct rb_node *this;
512 struct jffs2_tmp_dnode_info *tn;
513
514 this = list->rb_node;
515
516 /* Now at bottom of tree */
517 while (this) {
518 if (this->rb_left)
519 this = this->rb_left;
520 else if (this->rb_right)
521 this = this->rb_right;
522 else {
523 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
524 jffs2_free_full_dnode(tn->fn);
525 jffs2_free_tmp_dnode_info(tn);
526
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100527 this = rb_parent(this);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100528 if (!this)
529 break;
530
531 if (this->rb_left == &tn->rb)
532 this->rb_left = NULL;
533 else if (this->rb_right == &tn->rb)
534 this->rb_right = NULL;
535 else BUG();
536 }
537 }
538 list->rb_node = NULL;
539}
540
541static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
542{
543 struct jffs2_full_dirent *next;
544
545 while (fd) {
546 next = fd->next;
547 jffs2_free_full_dirent(fd);
548 fd = next;
549 }
550}
551
552/* Returns first valid node after 'ref'. May return 'ref' */
553static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
554{
555 while (ref && ref->next_in_ino) {
556 if (!ref_obsolete(ref))
557 return ref;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100558 dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100559 ref = ref->next_in_ino;
560 }
561 return NULL;
562}
563
564/*
565 * Helper function for jffs2_get_inode_nodes().
566 * It is called every time an directory entry node is found.
567 *
568 * Returns: 0 on succes;
569 * 1 if the node should be marked obsolete;
570 * negative error code on failure.
571 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100572static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100573 struct jffs2_raw_dirent *rd, size_t read,
574 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100575{
576 struct jffs2_full_dirent *fd;
David Woodhouse1046d882006-06-18 22:44:21 +0100577 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000578
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100579 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
580 BUG_ON(ref_obsolete(ref));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000581
David Woodhouse1046d882006-06-18 22:44:21 +0100582 crc = crc32(0, rd, sizeof(*rd) - 8);
583 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
584 JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n",
585 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100586 jffs2_mark_node_obsolete(c, ref);
587 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100588 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000589
David Woodhouse1046d882006-06-18 22:44:21 +0100590 /* If we've never checked the CRCs on this node, check them now */
591 if (ref_flags(ref) == REF_UNCHECKED) {
592 struct jffs2_eraseblock *jeb;
593 int len;
594
595 /* Sanity check */
596 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
597 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
598 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100599 jffs2_mark_node_obsolete(c, ref);
600 return 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100601 }
602
603 jeb = &c->blocks[ref->flash_offset / c->sector_size];
604 len = ref_totlen(c, jeb, ref);
605
606 spin_lock(&c->erase_completion_lock);
607 jeb->used_size += len;
608 jeb->unchecked_size -= len;
609 c->used_size += len;
610 c->unchecked_size -= len;
611 ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
612 spin_unlock(&c->erase_completion_lock);
613 }
614
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100615 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
616 if (unlikely(!fd))
617 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100619 fd->raw = ref;
620 fd->version = je32_to_cpu(rd->version);
621 fd->ino = je32_to_cpu(rd->ino);
622 fd->type = rd->type;
623
David Woodhousedf8e96f2007-04-25 03:23:42 +0100624 if (fd->version > rii->highest_version)
625 rii->highest_version = fd->version;
626
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100627 /* Pick out the mctime of the latest dirent */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100628 if(fd->version > rii->mctime_ver && je32_to_cpu(rd->mctime)) {
629 rii->mctime_ver = fd->version;
630 rii->latest_mctime = je32_to_cpu(rd->mctime);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100631 }
632
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000633 /*
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100634 * Copy as much of the name as possible from the raw
635 * dirent we've already read from the flash.
636 */
637 if (read > sizeof(*rd))
638 memcpy(&fd->name[0], &rd->name[0],
639 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000640
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100641 /* Do we need to copy any more of the name directly from the flash? */
642 if (rd->nsize + sizeof(*rd) > read) {
643 /* FIXME: point() */
644 int err;
645 int already = read - sizeof(*rd);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000646
647 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100648 rd->nsize - already, &read, &fd->name[already]);
649 if (unlikely(read != rd->nsize - already) && likely(!err))
650 return -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000651
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100652 if (unlikely(err)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100653 JFFS2_ERROR("read remainder of name: error %d\n", err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100654 jffs2_free_full_dirent(fd);
655 return -EIO;
656 }
657 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000658
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100659 fd->nhash = full_name_hash(fd->name, rd->nsize);
660 fd->next = NULL;
661 fd->name[rd->nsize] = '\0';
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000662
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100663 /*
664 * Wheee. We now have a complete jffs2_full_dirent structure, with
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000665 * the name in it and everything. Link it into the list
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100666 */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100667 jffs2_add_fd_to_list(c, fd, &rii->fds);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100668
669 return 0;
670}
671
672/*
673 * Helper function for jffs2_get_inode_nodes().
674 * It is called every time an inode node is found.
675 *
David Woodhousedf8e96f2007-04-25 03:23:42 +0100676 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100677 * 1 if the node should be marked obsolete;
678 * negative error code on failure.
679 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100680static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100681 struct jffs2_raw_inode *rd, int rdlen,
682 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100683{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100684 struct jffs2_tmp_dnode_info *tn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100685 uint32_t len, csize;
686 int ret = 1;
David Woodhouse1046d882006-06-18 22:44:21 +0100687 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000688
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100689 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
690 BUG_ON(ref_obsolete(ref));
691
David Woodhouse1046d882006-06-18 22:44:21 +0100692 crc = crc32(0, rd, sizeof(*rd) - 8);
693 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
694 JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n",
695 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100696 jffs2_mark_node_obsolete(c, ref);
697 return 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100698 }
699
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100700 tn = jffs2_alloc_tmp_dnode_info();
701 if (!tn) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400702 JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100703 return -ENOMEM;
704 }
705
706 tn->partial_crc = 0;
707 csize = je32_to_cpu(rd->csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000708
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100709 /* If we've never checked the CRCs on this node, check them now */
710 if (ref_flags(ref) == REF_UNCHECKED) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000711
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100712 /* Sanity checks */
713 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
714 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100715 JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
Andrew Lunn737b7662005-07-30 16:29:30 +0100716 jffs2_dbg_dump_node(c, ref_offset(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100717 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100718 }
719
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100720 if (jffs2_is_writebuffered(c) && csize != 0) {
721 /* At this point we are supposed to check the data CRC
722 * of our unchecked node. But thus far, we do not
723 * know whether the node is valid or obsolete. To
724 * figure this out, we need to walk all the nodes of
725 * the inode and build the inode fragtree. We don't
726 * want to spend time checking data of nodes which may
727 * later be found to be obsolete. So we put off the full
728 * data CRC checking until we have read all the inode
729 * nodes and have started building the fragtree.
730 *
731 * The fragtree is being built starting with nodes
732 * having the highest version number, so we'll be able
733 * to detect whether a node is valid (i.e., it is not
734 * overlapped by a node with higher version) or not.
735 * And we'll be able to check only those nodes, which
736 * are not obsolete.
737 *
738 * Of course, this optimization only makes sense in case
739 * of NAND flashes (or other flashes whith
740 * !jffs2_can_mark_obsolete()), since on NOR flashes
741 * nodes are marked obsolete physically.
742 *
743 * Since NAND flashes (or other flashes with
744 * jffs2_is_writebuffered(c)) are anyway read by
745 * fractions of c->wbuf_pagesize, and we have just read
746 * the node header, it is likely that the starting part
747 * of the node data is also read when we read the
748 * header. So we don't mind to check the CRC of the
749 * starting part of the data of the node now, and check
750 * the second part later (in jffs2_check_node_data()).
751 * Of course, we will not need to re-read and re-check
752 * the NAND page which we have just read. This is why we
753 * read the whole NAND page at jffs2_get_inode_nodes(),
754 * while we needed only the node header.
755 */
756 unsigned char *buf;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100757
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100758 /* 'buf' will point to the start of data */
759 buf = (unsigned char *)rd + sizeof(*rd);
760 /* len will be the read data length */
761 len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100762 tn->partial_crc = crc32(0, buf, len);
763
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100764 dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100765
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100766 /* If we actually calculated the whole data CRC
767 * and it is wrong, drop the node. */
Artem B. Bityutskiy3c091332005-08-04 12:40:02 +0100768 if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100769 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
770 ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100771 goto free_out;
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100772 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100773
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100774 } else if (csize == 0) {
775 /*
776 * We checked the header CRC. If the node has no data, adjust
777 * the space accounting now. For other nodes this will be done
778 * later either when the node is marked obsolete or when its
779 * data is checked.
780 */
781 struct jffs2_eraseblock *jeb;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100782
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100783 dbg_readinode("the node has no data.\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100784 jeb = &c->blocks[ref->flash_offset / c->sector_size];
785 len = ref_totlen(c, jeb, ref);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100786
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100787 spin_lock(&c->erase_completion_lock);
788 jeb->used_size += len;
789 jeb->unchecked_size -= len;
790 c->used_size += len;
791 c->unchecked_size -= len;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100792 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100793 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100794 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100795 }
796
797 tn->fn = jffs2_alloc_full_dnode();
798 if (!tn->fn) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100799 JFFS2_ERROR("alloc fn failed\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100800 ret = -ENOMEM;
801 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100802 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000803
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100804 tn->version = je32_to_cpu(rd->version);
805 tn->fn->ofs = je32_to_cpu(rd->offset);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100806 tn->data_crc = je32_to_cpu(rd->data_crc);
807 tn->csize = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100808 tn->fn->raw = ref;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100809 tn->overlapped = 0;
810
811 if (tn->version > rii->highest_version)
812 rii->highest_version = tn->version;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000813
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100814 /* There was a bug where we wrote hole nodes out with
815 csize/dsize swapped. Deal with it */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100816 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
817 tn->fn->size = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100818 else // normal case...
819 tn->fn->size = je32_to_cpu(rd->dsize);
820
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100821 dbg_readinode("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100822 ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000823
David Woodhousedf8e96f2007-04-25 03:23:42 +0100824 ret = jffs2_add_tn_to_tree(c, rii, tn);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100825
David Woodhousedf8e96f2007-04-25 03:23:42 +0100826 if (ret) {
827 jffs2_free_full_dnode(tn->fn);
828 free_out:
829 jffs2_free_tmp_dnode_info(tn);
830 return ret;
831 }
832#ifdef JFFS2_DBG_READINODE_MESSAGES
David Woodhouse1123e2a2007-05-05 16:29:34 +0100833 dbg_readinode("After adding ver %d:\n", je32_to_cpu(rd->version));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100834 tn = tn_first(&rii->tn_root);
835 while (tn) {
836 dbg_readinode("%p: v %d r 0x%x-0x%x ov %d\n",
837 tn, tn->version, tn->fn->ofs,
838 tn->fn->ofs+tn->fn->size, tn->overlapped);
839 tn = tn_next(tn);
840 }
841#endif
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100842 return 0;
843}
844
845/*
846 * Helper function for jffs2_get_inode_nodes().
847 * It is called every time an unknown node is found.
848 *
David Woodhouse3877f0b2006-06-18 00:05:26 +0100849 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100850 * 1 if the node should be marked obsolete;
851 * negative error code on failure.
852 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100853static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100854{
855 /* We don't mark unknown nodes as REF_UNCHECKED */
David Woodhousec7258a42007-03-09 11:44:00 +0000856 if (ref_flags(ref) == REF_UNCHECKED) {
857 JFFS2_ERROR("REF_UNCHECKED but unknown node at %#08x\n",
858 ref_offset(ref));
859 JFFS2_ERROR("Node is {%04x,%04x,%08x,%08x}. Please report this error.\n",
860 je16_to_cpu(un->magic), je16_to_cpu(un->nodetype),
861 je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100862 jffs2_mark_node_obsolete(c, ref);
863 return 0;
David Woodhousec7258a42007-03-09 11:44:00 +0000864 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000865
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100866 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
867
David Woodhouse3877f0b2006-06-18 00:05:26 +0100868 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
869
870 case JFFS2_FEATURE_INCOMPAT:
871 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
872 je16_to_cpu(un->nodetype), ref_offset(ref));
873 /* EEP */
874 BUG();
875 break;
876
877 case JFFS2_FEATURE_ROCOMPAT:
878 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
879 je16_to_cpu(un->nodetype), ref_offset(ref));
880 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
881 break;
882
883 case JFFS2_FEATURE_RWCOMPAT_COPY:
884 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
885 je16_to_cpu(un->nodetype), ref_offset(ref));
886 break;
887
888 case JFFS2_FEATURE_RWCOMPAT_DELETE:
889 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
890 je16_to_cpu(un->nodetype), ref_offset(ref));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100891 jffs2_mark_node_obsolete(c, ref);
892 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100893 }
894
895 return 0;
896}
897
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100898/*
899 * Helper function for jffs2_get_inode_nodes().
900 * The function detects whether more data should be read and reads it if yes.
901 *
902 * Returns: 0 on succes;
903 * negative error code on failure.
904 */
905static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300906 int needed_len, int *rdlen, unsigned char *buf)
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100907{
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300908 int err, to_read = needed_len - *rdlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100909 size_t retlen;
910 uint32_t offs;
911
912 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300913 int rem = to_read % c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100914
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300915 if (rem)
916 to_read += c->wbuf_pagesize - rem;
917 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100918
919 /* We need to read more data */
920 offs = ref_offset(ref) + *rdlen;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000921
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300922 dbg_readinode("read more %d bytes\n", to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100923
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300924 err = jffs2_flash_read(c, offs, to_read, &retlen, buf + *rdlen);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100925 if (err) {
926 JFFS2_ERROR("can not read %d bytes from 0x%08x, "
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300927 "error code: %d.\n", to_read, offs, err);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100928 return err;
929 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000930
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300931 if (retlen < to_read) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400932 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n",
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300933 offs, retlen, to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100934 return -EIO;
935 }
936
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300937 *rdlen += to_read;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100938 return 0;
939}
940
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100941/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
David Woodhousedf8e96f2007-04-25 03:23:42 +0100942 with this ino. Perform a preliminary ordering on data nodes, throwing away
943 those which are completely obsoleted by newer ones. The naïve approach we
944 use to take of just returning them _all_ in version order will cause us to
945 run out of memory in certain degenerate cases. */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100946static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100947 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100948{
949 struct jffs2_raw_node_ref *ref, *valid_ref;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100950 unsigned char *buf = NULL;
951 union jffs2_node_union *node;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100952 size_t retlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100953 int len, err;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100954
David Woodhousedf8e96f2007-04-25 03:23:42 +0100955 rii->mctime_ver = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000956
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100957 dbg_readinode("ino #%u\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100958
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100959 /* FIXME: in case of NOR and available ->point() this
960 * needs to be fixed. */
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300961 len = sizeof(union jffs2_node_union) + c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100962 buf = kmalloc(len, GFP_KERNEL);
963 if (!buf)
964 return -ENOMEM;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000965
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100966 spin_lock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100967 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100968 if (!valid_ref && f->inocache->ino != 1)
969 JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100970 while (valid_ref) {
971 /* We can hold a pointer to a non-obsolete node without the spinlock,
972 but _obsolete_ nodes may disappear at any time, if the block
973 they're in gets erased. So if we mark 'ref' obsolete while we're
974 not holding the lock, it can go away immediately. For that reason,
975 we find the next valid node first, before processing 'ref'.
976 */
977 ref = valid_ref;
978 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
979 spin_unlock(&c->erase_completion_lock);
980
981 cond_resched();
982
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100983 /*
984 * At this point we don't know the type of the node we're going
985 * to read, so we do not know the size of its header. In order
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300986 * to minimize the amount of flash IO we assume the header is
987 * of size = JFFS2_MIN_NODE_HEADER.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100988 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300989 len = JFFS2_MIN_NODE_HEADER;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100990 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300991 int end, rem;
992
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000993 /*
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300994 * We are about to read JFFS2_MIN_NODE_HEADER bytes,
995 * but this flash has some minimal I/O unit. It is
996 * possible that we'll need to read more soon, so read
997 * up to the next min. I/O unit, in order not to
998 * re-read the same min. I/O unit twice.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100999 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001000 end = ref_offset(ref) + len;
1001 rem = end % c->wbuf_pagesize;
1002 if (rem)
1003 end += c->wbuf_pagesize - rem;
1004 len = end - ref_offset(ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001005 }
1006
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001007 dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001008
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001009 /* FIXME: point() */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001010 err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001011 if (err) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001012 JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001013 goto free_out;
1014 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001015
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001016 if (retlen < len) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -04001017 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001018 err = -EIO;
1019 goto free_out;
1020 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001021
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001022 node = (union jffs2_node_union *)buf;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001023
David Woodhouse3877f0b2006-06-18 00:05:26 +01001024 /* No need to mask in the valid bit; it shouldn't be invalid */
1025 if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) {
1026 JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n",
1027 ref_offset(ref), je16_to_cpu(node->u.magic),
1028 je16_to_cpu(node->u.nodetype),
1029 je32_to_cpu(node->u.totlen),
1030 je32_to_cpu(node->u.hdr_crc));
1031 jffs2_dbg_dump_node(c, ref_offset(ref));
1032 jffs2_mark_node_obsolete(c, ref);
1033 goto cont;
1034 }
Joakim Tjernlund0dec4c82007-03-10 17:08:44 +01001035 if (je16_to_cpu(node->u.magic) != JFFS2_MAGIC_BITMASK) {
1036 /* Not a JFFS2 node, whinge and move on */
1037 JFFS2_NOTICE("Wrong magic bitmask 0x%04x in node header at %#08x.\n",
1038 je16_to_cpu(node->u.magic), ref_offset(ref));
David Woodhousec7258a42007-03-09 11:44:00 +00001039 jffs2_mark_node_obsolete(c, ref);
1040 goto cont;
1041 }
David Woodhouse3877f0b2006-06-18 00:05:26 +01001042
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001043 switch (je16_to_cpu(node->u.nodetype)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001044
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001045 case JFFS2_NODETYPE_DIRENT:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001046
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001047 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001048 err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001049 if (unlikely(err))
1050 goto free_out;
1051 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001052
David Woodhousedf8e96f2007-04-25 03:23:42 +01001053 err = read_direntry(c, ref, &node->d, retlen, rii);
1054 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001055 goto free_out;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001056
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001057 break;
1058
1059 case JFFS2_NODETYPE_INODE:
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001060
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001061 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001062 err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001063 if (unlikely(err))
1064 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001065 }
1066
David Woodhousedf8e96f2007-04-25 03:23:42 +01001067 err = read_dnode(c, ref, &node->i, len, rii);
1068 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001069 goto free_out;
1070
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 break;
1072
1073 default:
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001074 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001075 err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001076 if (unlikely(err))
1077 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001078 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001079
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001080 err = read_unknown(c, ref, &node->u);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001081 if (err == 1) {
1082 jffs2_mark_node_obsolete(c, ref);
1083 break;
1084 } else if (unlikely(err))
1085 goto free_out;
1086
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 }
David Woodhouse3877f0b2006-06-18 00:05:26 +01001088 cont:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001089 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001091
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001092 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001093 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094
David Woodhousedf8e96f2007-04-25 03:23:42 +01001095 f->highest_version = rii->highest_version;
1096
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001097 dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +01001098 f->inocache->ino, rii->highest_version, rii->latest_mctime,
1099 rii->mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001100 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001102 free_out:
David Woodhousedf8e96f2007-04-25 03:23:42 +01001103 jffs2_free_tmp_dnode_info_list(&rii->tn_root);
1104 jffs2_free_full_dirent_list(rii->fds);
1105 rii->fds = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001106 kfree(buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001107 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108}
1109
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001110static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 struct jffs2_inode_info *f,
1112 struct jffs2_raw_inode *latest_node)
1113{
David Woodhousedf8e96f2007-04-25 03:23:42 +01001114 struct jffs2_readinode_info rii;
David Woodhouse61c4b232007-04-25 17:04:23 +01001115 uint32_t crc, new_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 size_t retlen;
1117 int ret;
1118
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001119 dbg_readinode("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
David Woodhousedf8e96f2007-04-25 03:23:42 +01001121 memset(&rii, 0, sizeof(rii));
1122
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 /* Grab all nodes relevant to this ino */
David Woodhousedf8e96f2007-04-25 03:23:42 +01001124 ret = jffs2_get_inode_nodes(c, f, &rii);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125
1126 if (ret) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001127 JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 if (f->inocache->state == INO_STATE_READING)
1129 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1130 return ret;
1131 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
David Woodhousedf8e96f2007-04-25 03:23:42 +01001133 ret = jffs2_build_inode_fragtree(c, f, &rii);
1134 if (ret) {
1135 JFFS2_ERROR("Failed to build final fragtree for inode #%u: error %d\n",
1136 f->inocache->ino, ret);
1137 if (f->inocache->state == INO_STATE_READING)
1138 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1139 jffs2_free_tmp_dnode_info_list(&rii.tn_root);
1140 /* FIXME: We could at least crc-check them all */
1141 if (rii.mdata_tn) {
1142 jffs2_free_full_dnode(rii.mdata_tn->fn);
1143 jffs2_free_tmp_dnode_info(rii.mdata_tn);
1144 rii.mdata_tn = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001145 }
David Woodhousedf8e96f2007-04-25 03:23:42 +01001146 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 }
David Woodhousedf8e96f2007-04-25 03:23:42 +01001148
1149 if (rii.mdata_tn) {
1150 if (rii.mdata_tn->fn->raw == rii.latest_ref) {
1151 f->metadata = rii.mdata_tn->fn;
1152 jffs2_free_tmp_dnode_info(rii.mdata_tn);
1153 } else {
1154 jffs2_kill_tn(c, rii.mdata_tn);
1155 }
1156 rii.mdata_tn = NULL;
1157 }
1158
1159 f->dents = rii.fds;
1160
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +01001161 jffs2_dbg_fragtree_paranoia_check_nolock(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
David Woodhousedf8e96f2007-04-25 03:23:42 +01001163 if (unlikely(!rii.latest_ref)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 /* No data nodes for this inode. */
1165 if (f->inocache->ino != 1) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001166 JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
David Woodhousedf8e96f2007-04-25 03:23:42 +01001167 if (!rii.fds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 if (f->inocache->state == INO_STATE_READING)
1169 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1170 return -EIO;
1171 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001172 JFFS2_NOTICE("but it has children so we fake some modes for it\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 }
1174 latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
1175 latest_node->version = cpu_to_je32(0);
1176 latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
1177 latest_node->isize = cpu_to_je32(0);
1178 latest_node->gid = cpu_to_je16(0);
1179 latest_node->uid = cpu_to_je16(0);
1180 if (f->inocache->state == INO_STATE_READING)
1181 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
1182 return 0;
1183 }
1184
David Woodhousedf8e96f2007-04-25 03:23:42 +01001185 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref), sizeof(*latest_node), &retlen, (void *)latest_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 if (ret || retlen != sizeof(*latest_node)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001187 JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
1188 ret, retlen, sizeof(*latest_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 /* FIXME: If this fails, there seems to be a memory leak. Find it. */
1190 up(&f->sem);
1191 jffs2_do_clear_inode(c, f);
1192 return ret?ret:-EIO;
1193 }
1194
1195 crc = crc32(0, latest_node, sizeof(*latest_node)-8);
1196 if (crc != je32_to_cpu(latest_node->node_crc)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001197 JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +01001198 f->inocache->ino, ref_offset(rii.latest_ref));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 up(&f->sem);
1200 jffs2_do_clear_inode(c, f);
1201 return -EIO;
1202 }
1203
1204 switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
1205 case S_IFDIR:
David Woodhousedf8e96f2007-04-25 03:23:42 +01001206 if (rii.mctime_ver > je32_to_cpu(latest_node->version)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 /* The times in the latest_node are actually older than
1208 mctime in the latest dirent. Cheat. */
David Woodhousedf8e96f2007-04-25 03:23:42 +01001209 latest_node->ctime = latest_node->mtime = cpu_to_je32(rii.latest_mctime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 }
1211 break;
1212
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 case S_IFREG:
1215 /* If it was a regular file, truncate it to the latest node's isize */
David Woodhouse61c4b232007-04-25 17:04:23 +01001216 new_size = jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
1217 if (new_size != je32_to_cpu(latest_node->isize)) {
1218 JFFS2_WARNING("Truncating ino #%u to %d bytes failed because it only had %d bytes to start with!\n",
1219 f->inocache->ino, je32_to_cpu(latest_node->isize), new_size);
1220 latest_node->isize = cpu_to_je32(new_size);
1221 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 break;
1223
1224 case S_IFLNK:
1225 /* Hack to work around broken isize in old symlink code.
1226 Remove this when dwmw2 comes to his senses and stops
1227 symlinks from being an entirely gratuitous special
1228 case. */
1229 if (!je32_to_cpu(latest_node->isize))
1230 latest_node->isize = latest_node->dsize;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001231
1232 if (f->inocache->state != INO_STATE_CHECKING) {
1233 /* Symlink's inode data is the target path. Read it and
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001234 * keep in RAM to facilitate quick follow symlink
1235 * operation. */
1236 f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
1237 if (!f->target) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001238 JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001239 up(&f->sem);
1240 jffs2_do_clear_inode(c, f);
1241 return -ENOMEM;
1242 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001243
David Woodhousedf8e96f2007-04-25 03:23:42 +01001244 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node),
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001245 je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001246
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001247 if (ret || retlen != je32_to_cpu(latest_node->csize)) {
1248 if (retlen != je32_to_cpu(latest_node->csize))
1249 ret = -EIO;
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001250 kfree(f->target);
1251 f->target = NULL;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001252 up(&f->sem);
1253 jffs2_do_clear_inode(c, f);
1254 return -ret;
1255 }
1256
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001257 f->target[je32_to_cpu(latest_node->csize)] = '\0';
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001258 dbg_readinode("symlink's target '%s' cached\n", f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001259 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001260
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 /* fall through... */
1262
1263 case S_IFBLK:
1264 case S_IFCHR:
1265 /* Certain inode types should have only one data node, and it's
1266 kept as the metadata node */
1267 if (f->metadata) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001268 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 f->inocache->ino, jemode_to_cpu(latest_node->mode));
1270 up(&f->sem);
1271 jffs2_do_clear_inode(c, f);
1272 return -EIO;
1273 }
1274 if (!frag_first(&f->fragtree)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001275 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 f->inocache->ino, jemode_to_cpu(latest_node->mode));
1277 up(&f->sem);
1278 jffs2_do_clear_inode(c, f);
1279 return -EIO;
1280 }
1281 /* ASSERT: f->fraglist != NULL */
1282 if (frag_next(frag_first(&f->fragtree))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001283 JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 f->inocache->ino, jemode_to_cpu(latest_node->mode));
1285 /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
1286 up(&f->sem);
1287 jffs2_do_clear_inode(c, f);
1288 return -EIO;
1289 }
1290 /* OK. We're happy */
1291 f->metadata = frag_first(&f->fragtree)->node;
1292 jffs2_free_node_frag(frag_first(&f->fragtree));
1293 f->fragtree = RB_ROOT;
1294 break;
1295 }
1296 if (f->inocache->state == INO_STATE_READING)
1297 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
1298
1299 return 0;
1300}
1301
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001302/* Scan the list of all nodes present for this ino, build map of versions, etc. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001303int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001304 uint32_t ino, struct jffs2_raw_inode *latest_node)
1305{
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001306 dbg_readinode("read inode #%u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001307
1308 retry_inocache:
1309 spin_lock(&c->inocache_lock);
1310 f->inocache = jffs2_get_ino_cache(c, ino);
1311
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001312 if (f->inocache) {
1313 /* Check its state. We may need to wait before we can use it */
1314 switch(f->inocache->state) {
1315 case INO_STATE_UNCHECKED:
1316 case INO_STATE_CHECKEDABSENT:
1317 f->inocache->state = INO_STATE_READING;
1318 break;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001319
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001320 case INO_STATE_CHECKING:
1321 case INO_STATE_GC:
1322 /* If it's in either of these states, we need
1323 to wait for whoever's got it to finish and
1324 put it back. */
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001325 dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001326 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
1327 goto retry_inocache;
1328
1329 case INO_STATE_READING:
1330 case INO_STATE_PRESENT:
1331 /* Eep. This should never happen. It can
1332 happen if Linux calls read_inode() again
1333 before clear_inode() has finished though. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001334 JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001335 /* Fail. That's probably better than allowing it to succeed */
1336 f->inocache = NULL;
1337 break;
1338
1339 default:
1340 BUG();
1341 }
1342 }
1343 spin_unlock(&c->inocache_lock);
1344
1345 if (!f->inocache && ino == 1) {
1346 /* Special case - no root inode on medium */
1347 f->inocache = jffs2_alloc_inode_cache();
1348 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001349 JFFS2_ERROR("cannot allocate inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001350 return -ENOMEM;
1351 }
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001352 dbg_readinode("creating inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001353 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
1354 f->inocache->ino = f->inocache->nlink = 1;
1355 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
1356 f->inocache->state = INO_STATE_READING;
1357 jffs2_add_ino_cache(c, f->inocache);
1358 }
1359 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001360 JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001361 return -ENOENT;
1362 }
1363
1364 return jffs2_do_read_inode_internal(c, f, latest_node);
1365}
1366
1367int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
1368{
1369 struct jffs2_raw_inode n;
Yan Burman3d375d92006-12-04 15:03:01 -08001370 struct jffs2_inode_info *f = kzalloc(sizeof(*f), GFP_KERNEL);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001371 int ret;
1372
1373 if (!f)
1374 return -ENOMEM;
1375
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001376 init_MUTEX_LOCKED(&f->sem);
1377 f->inocache = ic;
1378
1379 ret = jffs2_do_read_inode_internal(c, f, &n);
1380 if (!ret) {
1381 up(&f->sem);
1382 jffs2_do_clear_inode(c, f);
1383 }
1384 kfree (f);
1385 return ret;
1386}
1387
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
1389{
1390 struct jffs2_full_dirent *fd, *fds;
1391 int deleted;
1392
KaiGai Koheic7afb0f2006-07-02 15:13:46 +01001393 jffs2_clear_acl(f);
KaiGai Kohei355ed4e2006-06-24 09:15:36 +09001394 jffs2_xattr_delete_inode(c, f->inocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 down(&f->sem);
1396 deleted = f->inocache && !f->inocache->nlink;
1397
David Woodhouse67e345d2005-02-27 23:01:36 +00001398 if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
1399 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
1400
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 if (f->metadata) {
1402 if (deleted)
1403 jffs2_mark_node_obsolete(c, f->metadata->raw);
1404 jffs2_free_full_dnode(f->metadata);
1405 }
1406
1407 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
1408
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001409 if (f->target) {
1410 kfree(f->target);
1411 f->target = NULL;
1412 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001413
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001414 fds = f->dents;
1415 while(fds) {
1416 fd = fds;
1417 fds = fd->next;
1418 jffs2_free_full_dirent(fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 }
1420
David Woodhouse67e345d2005-02-27 23:01:36 +00001421 if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
David Woodhouse67e345d2005-02-27 23:01:36 +00001423 if (f->inocache->nodes == (void *)f->inocache)
1424 jffs2_del_ino_cache(c, f->inocache);
1425 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
1427 up(&f->sem);
1428}