blob: 6ca08ad887c09211bf98ab57cb1ec8578bd5d632 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
David Woodhousec00c3102007-04-25 14:16:47 +01004 * Copyright © 2001-2007 Red Hat, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11
12#include <linux/kernel.h>
Andrew Lunn737b7662005-07-30 16:29:30 +010013#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/slab.h>
15#include <linux/fs.h>
16#include <linux/crc32.h>
17#include <linux/pagemap.h>
18#include <linux/mtd/mtd.h>
19#include <linux/compiler.h>
20#include "nodelist.h"
21
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010022/*
David Woodhousedf8e96f2007-04-25 03:23:42 +010023 * Check the data CRC of the node.
24 *
25 * Returns: 0 if the data CRC is correct;
26 * 1 - if incorrect;
27 * error code if an error occured.
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010028 */
David Woodhousedf8e96f2007-04-25 03:23:42 +010029static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
Linus Torvalds1da177e2005-04-16 15:20:36 -070030{
David Woodhousedf8e96f2007-04-25 03:23:42 +010031 struct jffs2_raw_node_ref *ref = tn->fn->raw;
32 int err = 0, pointed = 0;
33 struct jffs2_eraseblock *jeb;
34 unsigned char *buffer;
35 uint32_t crc, ofs, len;
36 size_t retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
David Woodhousedf8e96f2007-04-25 03:23:42 +010038 BUG_ON(tn->csize == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
David Woodhousedf8e96f2007-04-25 03:23:42 +010040 /* Calculate how many bytes were already checked */
41 ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode);
David Woodhouse92525722007-11-21 12:08:16 -050042 len = tn->csize;
David Woodhousedf8e96f2007-04-25 03:23:42 +010043
David Woodhouse92525722007-11-21 12:08:16 -050044 if (jffs2_is_writebuffered(c)) {
45 int adj = ofs % c->wbuf_pagesize;
46 if (likely(adj))
47 adj = c->wbuf_pagesize - adj;
48
49 if (adj >= tn->csize) {
50 dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
51 ref_offset(ref), tn->csize, ofs);
52 goto adj_acc;
53 }
54
55 ofs += adj;
56 len -= adj;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010057 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
David Woodhousedf8e96f2007-04-25 03:23:42 +010059 dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n",
60 ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len);
61
62#ifndef __ECOS
63 /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(),
64 * adding and jffs2_flash_read_end() interface. */
65 if (c->mtd->point) {
Jared Hulberta98889f2008-04-29 23:26:49 -070066 err = c->mtd->point(c->mtd, ofs, len, &retlen,
67 (void **)&buffer, NULL);
Alexey Korolevc2056e12007-11-27 11:25:10 +000068 if (!err && retlen < len) {
David Woodhousedf8e96f2007-04-25 03:23:42 +010069 JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
Jared Hulberta98889f2008-04-29 23:26:49 -070070 c->mtd->unpoint(c->mtd, ofs, retlen);
David Woodhousedf8e96f2007-04-25 03:23:42 +010071 } else if (err)
72 JFFS2_WARNING("MTD point failed: error code %d.\n", err);
73 else
74 pointed = 1; /* succefully pointed to device */
75 }
76#endif
77
78 if (!pointed) {
79 buffer = kmalloc(len, GFP_KERNEL);
80 if (unlikely(!buffer))
81 return -ENOMEM;
82
83 /* TODO: this is very frequent pattern, make it a separate
84 * routine */
85 err = jffs2_flash_read(c, ofs, len, &retlen, buffer);
86 if (err) {
87 JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err);
88 goto free_out;
89 }
90
91 if (retlen != len) {
92 JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len);
93 err = -EIO;
94 goto free_out;
95 }
96 }
97
98 /* Continue calculating CRC */
99 crc = crc32(tn->partial_crc, buffer, len);
100 if(!pointed)
101 kfree(buffer);
102#ifndef __ECOS
103 else
Jared Hulberta98889f2008-04-29 23:26:49 -0700104 c->mtd->unpoint(c->mtd, ofs, len);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100105#endif
106
107 if (crc != tn->data_crc) {
108 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
David Woodhouseb2e25232007-07-05 01:57:26 -0400109 ref_offset(ref), tn->data_crc, crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100110 return 1;
111 }
112
113adj_acc:
114 jeb = &c->blocks[ref->flash_offset / c->sector_size];
115 len = ref_totlen(c, jeb, ref);
116 /* If it should be REF_NORMAL, it'll get marked as such when
117 we build the fragtree, shortly. No need to worry about GC
118 moving it while it's marked REF_PRISTINE -- GC won't happen
119 till we've finished checking every inode anyway. */
120 ref->flash_offset |= REF_PRISTINE;
121 /*
122 * Mark the node as having been checked and fix the
123 * accounting accordingly.
124 */
125 spin_lock(&c->erase_completion_lock);
126 jeb->used_size += len;
127 jeb->unchecked_size -= len;
128 c->used_size += len;
129 c->unchecked_size -= len;
130 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
131 spin_unlock(&c->erase_completion_lock);
132
133 return 0;
134
135free_out:
136 if(!pointed)
137 kfree(buffer);
138#ifndef __ECOS
139 else
Jared Hulberta98889f2008-04-29 23:26:49 -0700140 c->mtd->unpoint(c->mtd, ofs, len);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100141#endif
142 return err;
143}
144
145/*
146 * Helper function for jffs2_add_older_frag_to_fragtree().
147 *
148 * Checks the node if we are in the checking stage.
149 */
150static int check_tn_node(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
151{
152 int ret;
153
154 BUG_ON(ref_obsolete(tn->fn->raw));
155
156 /* We only check the data CRC of unchecked nodes */
157 if (ref_flags(tn->fn->raw) != REF_UNCHECKED)
158 return 0;
159
160 dbg_readinode("check node %#04x-%#04x, phys offs %#08x\n",
161 tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw));
162
163 ret = check_node_data(c, tn);
164 if (unlikely(ret < 0)) {
165 JFFS2_ERROR("check_node_data() returned error: %d.\n",
166 ret);
167 } else if (unlikely(ret > 0)) {
168 dbg_readinode("CRC error, mark it obsolete.\n");
169 jffs2_mark_node_obsolete(c, tn->fn->raw);
170 }
171
172 return ret;
173}
174
175static struct jffs2_tmp_dnode_info *jffs2_lookup_tn(struct rb_root *tn_root, uint32_t offset)
176{
177 struct rb_node *next;
178 struct jffs2_tmp_dnode_info *tn = NULL;
179
180 dbg_readinode("root %p, offset %d\n", tn_root, offset);
181
182 next = tn_root->rb_node;
183
184 while (next) {
185 tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb);
186
187 if (tn->fn->ofs < offset)
188 next = tn->rb.rb_right;
189 else if (tn->fn->ofs >= offset)
190 next = tn->rb.rb_left;
191 else
192 break;
193 }
194
195 return tn;
196}
197
198
199static void jffs2_kill_tn(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
200{
201 jffs2_mark_node_obsolete(c, tn->fn->raw);
202 jffs2_free_full_dnode(tn->fn);
203 jffs2_free_tmp_dnode_info(tn);
204}
205/*
206 * This function is used when we read an inode. Data nodes arrive in
207 * arbitrary order -- they may be older or newer than the nodes which
208 * are already in the tree. Where overlaps occur, the older node can
209 * be discarded as long as the newer passes the CRC check. We don't
210 * bother to keep track of holes in this rbtree, and neither do we deal
211 * with frags -- we can have multiple entries starting at the same
212 * offset, and the one with the smallest length will come first in the
213 * ordering.
214 *
David Woodhouse14c63812007-07-03 16:51:19 -0400215 * Returns 0 if the node was handled (including marking it obsolete)
David Woodhouseef53cb02007-07-10 10:01:22 +0100216 * < 0 an if error occurred
David Woodhousedf8e96f2007-04-25 03:23:42 +0100217 */
218static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
219 struct jffs2_readinode_info *rii,
220 struct jffs2_tmp_dnode_info *tn)
221{
222 uint32_t fn_end = tn->fn->ofs + tn->fn->size;
David Woodhouse96dd8d22007-05-06 14:41:40 +0100223 struct jffs2_tmp_dnode_info *this;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100224
David Woodhousefcf3caf2007-05-07 13:16:13 +0100225 dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100226
227 /* If a node has zero dsize, we only have to keep if it if it might be the
228 node with highest version -- i.e. the one which will end up as f->metadata.
229 Note that such nodes won't be REF_UNCHECKED since there are no data to
230 check anyway. */
231 if (!tn->fn->size) {
232 if (rii->mdata_tn) {
David Woodhouse0477d242007-06-01 20:04:43 +0100233 if (rii->mdata_tn->version < tn->version) {
234 /* We had a candidate mdata node already */
235 dbg_readinode("kill old mdata with ver %d\n", rii->mdata_tn->version);
236 jffs2_kill_tn(c, rii->mdata_tn);
237 } else {
238 dbg_readinode("kill new mdata with ver %d (older than existing %d\n",
239 tn->version, rii->mdata_tn->version);
240 jffs2_kill_tn(c, tn);
241 return 0;
242 }
David Woodhousedf8e96f2007-04-25 03:23:42 +0100243 }
244 rii->mdata_tn = tn;
245 dbg_readinode("keep new mdata with ver %d\n", tn->version);
246 return 0;
247 }
248
249 /* Find the earliest node which _may_ be relevant to this one */
250 this = jffs2_lookup_tn(&rii->tn_root, tn->fn->ofs);
David Woodhouse1c979642007-05-08 00:19:54 +0100251 if (this) {
252 /* If the node is coincident with another at a lower address,
253 back up until the other node is found. It may be relevant */
254 while (this->overlapped)
255 this = tn_prev(this);
256
257 /* First node should never be marked overlapped */
258 BUG_ON(!this);
259 dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole");
David Woodhousedf8e96f2007-04-25 03:23:42 +0100260 }
261
David Woodhousedf8e96f2007-04-25 03:23:42 +0100262 while (this) {
263 if (this->fn->ofs > fn_end)
264 break;
265 dbg_readinode("Ponder this ver %d, 0x%x-0x%x\n",
266 this->version, this->fn->ofs, this->fn->size);
267
268 if (this->version == tn->version) {
269 /* Version number collision means REF_PRISTINE GC. Accept either of them
270 as long as the CRC is correct. Check the one we have already... */
271 if (!check_tn_node(c, this)) {
272 /* The one we already had was OK. Keep it and throw away the new one */
273 dbg_readinode("Like old node. Throw away new\n");
274 jffs2_kill_tn(c, tn);
275 return 0;
276 } else {
277 /* Who cares if the new one is good; keep it for now anyway. */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100278 dbg_readinode("Like new node. Throw away old\n");
David Woodhousefcf3caf2007-05-07 13:16:13 +0100279 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root);
280 jffs2_kill_tn(c, this);
281 /* Same overlapping from in front and behind */
282 return 0;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100283 }
284 }
285 if (this->version < tn->version &&
286 this->fn->ofs >= tn->fn->ofs &&
287 this->fn->ofs + this->fn->size <= fn_end) {
288 /* New node entirely overlaps 'this' */
289 if (check_tn_node(c, tn)) {
290 dbg_readinode("new node bad CRC\n");
291 jffs2_kill_tn(c, tn);
292 return 0;
293 }
David Woodhousefcf3caf2007-05-07 13:16:13 +0100294 /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */
David Woodhouse1c979642007-05-08 00:19:54 +0100295 while (this && this->fn->ofs + this->fn->size <= fn_end) {
David Woodhousedf8e96f2007-04-25 03:23:42 +0100296 struct jffs2_tmp_dnode_info *next = tn_next(this);
297 if (this->version < tn->version) {
298 tn_erase(this, &rii->tn_root);
299 dbg_readinode("Kill overlapped ver %d, 0x%x-0x%x\n",
300 this->version, this->fn->ofs,
301 this->fn->ofs+this->fn->size);
302 jffs2_kill_tn(c, this);
303 }
304 this = next;
305 }
David Woodhousefcf3caf2007-05-07 13:16:13 +0100306 dbg_readinode("Done killing overlapped nodes\n");
David Woodhouse1c979642007-05-08 00:19:54 +0100307 continue;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100308 }
309 if (this->version > tn->version &&
310 this->fn->ofs <= tn->fn->ofs &&
311 this->fn->ofs+this->fn->size >= fn_end) {
312 /* New node entirely overlapped by 'this' */
313 if (!check_tn_node(c, this)) {
314 dbg_readinode("Good CRC on old node. Kill new\n");
315 jffs2_kill_tn(c, tn);
316 return 0;
317 }
318 /* ... but 'this' was bad. Replace it... */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100319 dbg_readinode("Bad CRC on old overlapping node. Kill it\n");
David Woodhousefcf3caf2007-05-07 13:16:13 +0100320 tn_erase(this, &rii->tn_root);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100321 jffs2_kill_tn(c, this);
David Woodhousefcf3caf2007-05-07 13:16:13 +0100322 break;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100323 }
David Woodhousedf8e96f2007-04-25 03:23:42 +0100324
325 this = tn_next(this);
326 }
David Woodhouse96dd8d22007-05-06 14:41:40 +0100327
David Woodhousedf8e96f2007-04-25 03:23:42 +0100328 /* We neither completely obsoleted nor were completely
David Woodhouse96dd8d22007-05-06 14:41:40 +0100329 obsoleted by an earlier node. Insert into the tree */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100330 {
David Woodhouse96dd8d22007-05-06 14:41:40 +0100331 struct rb_node *parent;
332 struct rb_node **link = &rii->tn_root.rb_node;
David Woodhouse1c979642007-05-08 00:19:54 +0100333 struct jffs2_tmp_dnode_info *insert_point = NULL;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100334
335 while (*link) {
336 parent = *link;
337 insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
338 if (tn->fn->ofs > insert_point->fn->ofs)
339 link = &insert_point->rb.rb_right;
340 else if (tn->fn->ofs < insert_point->fn->ofs ||
341 tn->fn->size < insert_point->fn->size)
342 link = &insert_point->rb.rb_left;
343 else
344 link = &insert_point->rb.rb_right;
345 }
346 rb_link_node(&tn->rb, &insert_point->rb, link);
347 rb_insert_color(&tn->rb, &rii->tn_root);
348 }
David Woodhouse1123e2a2007-05-05 16:29:34 +0100349
David Woodhousedf8e96f2007-04-25 03:23:42 +0100350 /* If there's anything behind that overlaps us, note it */
351 this = tn_prev(tn);
352 if (this) {
353 while (1) {
354 if (this->fn->ofs + this->fn->size > tn->fn->ofs) {
355 dbg_readinode("Node is overlapped by %p (v %d, 0x%x-0x%x)\n",
356 this, this->version, this->fn->ofs,
357 this->fn->ofs+this->fn->size);
358 tn->overlapped = 1;
359 break;
360 }
361 if (!this->overlapped)
362 break;
363 this = tn_prev(this);
364 }
365 }
366
367 /* If the new node overlaps anything ahead, note it */
368 this = tn_next(tn);
369 while (this && this->fn->ofs < fn_end) {
370 this->overlapped = 1;
371 dbg_readinode("Node ver %d, 0x%x-0x%x is overlapped\n",
372 this->version, this->fn->ofs,
373 this->fn->ofs+this->fn->size);
374 this = tn_next(this);
375 }
376 return 0;
377}
378
379/* Trivial function to remove the last node in the tree. Which by definition
380 has no right-hand -- so can be removed just by making its only child (if
381 any) take its place under its parent. */
382static void eat_last(struct rb_root *root, struct rb_node *node)
383{
384 struct rb_node *parent = rb_parent(node);
385 struct rb_node **link;
386
387 /* LAST! */
388 BUG_ON(node->rb_right);
389
390 if (!parent)
391 link = &root->rb_node;
392 else if (node == parent->rb_left)
393 link = &parent->rb_left;
394 else
395 link = &parent->rb_right;
396
397 *link = node->rb_left;
398 /* Colour doesn't matter now. Only the parent pointer. */
399 if (node->rb_left)
400 node->rb_left->rb_parent_color = node->rb_parent_color;
401}
402
403/* We put this in reverse order, so we can just use eat_last */
404static void ver_insert(struct rb_root *ver_root, struct jffs2_tmp_dnode_info *tn)
405{
406 struct rb_node **link = &ver_root->rb_node;
407 struct rb_node *parent = NULL;
408 struct jffs2_tmp_dnode_info *this_tn;
409
410 while (*link) {
411 parent = *link;
412 this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
413
414 if (tn->version > this_tn->version)
415 link = &parent->rb_left;
416 else
417 link = &parent->rb_right;
418 }
419 dbg_readinode("Link new node at %p (root is %p)\n", link, ver_root);
420 rb_link_node(&tn->rb, parent, link);
421 rb_insert_color(&tn->rb, ver_root);
422}
423
424/* Build final, normal fragtree from tn tree. It doesn't matter which order
425 we add nodes to the real fragtree, as long as they don't overlap. And
426 having thrown away the majority of overlapped nodes as we went, there
427 really shouldn't be many sets of nodes which do overlap. If we start at
428 the end, we can use the overlap markers -- we can just eat nodes which
429 aren't overlapped, and when we encounter nodes which _do_ overlap we
430 sort them all into a temporary tree in version order before replaying them. */
431static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c,
432 struct jffs2_inode_info *f,
433 struct jffs2_readinode_info *rii)
434{
435 struct jffs2_tmp_dnode_info *pen, *last, *this;
436 struct rb_root ver_root = RB_ROOT;
437 uint32_t high_ver = 0;
438
439 if (rii->mdata_tn) {
440 dbg_readinode("potential mdata is ver %d at %p\n", rii->mdata_tn->version, rii->mdata_tn);
441 high_ver = rii->mdata_tn->version;
442 rii->latest_ref = rii->mdata_tn->fn->raw;
443 }
444#ifdef JFFS2_DBG_READINODE_MESSAGES
445 this = tn_last(&rii->tn_root);
446 while (this) {
447 dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d\n", this, this->version, this->fn->ofs,
David Woodhouse96dd8d22007-05-06 14:41:40 +0100448 this->fn->ofs+this->fn->size, this->overlapped);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100449 this = tn_prev(this);
450 }
451#endif
452 pen = tn_last(&rii->tn_root);
453 while ((last = pen)) {
454 pen = tn_prev(last);
455
456 eat_last(&rii->tn_root, &last->rb);
457 ver_insert(&ver_root, last);
458
459 if (unlikely(last->overlapped))
460 continue;
461
462 /* Now we have a bunch of nodes in reverse version
463 order, in the tree at ver_root. Most of the time,
464 there'll actually be only one node in the 'tree',
465 in fact. */
466 this = tn_last(&ver_root);
467
468 while (this) {
469 struct jffs2_tmp_dnode_info *vers_next;
470 int ret;
471 vers_next = tn_prev(this);
472 eat_last(&ver_root, &this->rb);
473 if (check_tn_node(c, this)) {
David Woodhouse1123e2a2007-05-05 16:29:34 +0100474 dbg_readinode("node ver %d, 0x%x-0x%x failed CRC\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +0100475 this->version, this->fn->ofs,
476 this->fn->ofs+this->fn->size);
477 jffs2_kill_tn(c, this);
478 } else {
479 if (this->version > high_ver) {
480 /* Note that this is different from the other
481 highest_version, because this one is only
482 counting _valid_ nodes which could give the
483 latest inode metadata */
484 high_ver = this->version;
485 rii->latest_ref = this->fn->raw;
486 }
David Woodhouse1123e2a2007-05-05 16:29:34 +0100487 dbg_readinode("Add %p (v %d, 0x%x-0x%x, ov %d) to fragtree\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +0100488 this, this->version, this->fn->ofs,
489 this->fn->ofs+this->fn->size, this->overlapped);
490
491 ret = jffs2_add_full_dnode_to_inode(c, f, this->fn);
492 if (ret) {
493 /* Free the nodes in vers_root; let the caller
494 deal with the rest */
495 JFFS2_ERROR("Add node to tree failed %d\n", ret);
496 while (1) {
497 vers_next = tn_prev(this);
498 if (check_tn_node(c, this))
499 jffs2_mark_node_obsolete(c, this->fn->raw);
500 jffs2_free_full_dnode(this->fn);
501 jffs2_free_tmp_dnode_info(this);
502 this = vers_next;
503 if (!this)
504 break;
505 eat_last(&ver_root, &vers_next->rb);
506 }
507 return ret;
508 }
509 jffs2_free_tmp_dnode_info(this);
510 }
511 this = vers_next;
512 }
513 }
514 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100515}
516
517static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
518{
519 struct rb_node *this;
520 struct jffs2_tmp_dnode_info *tn;
521
522 this = list->rb_node;
523
524 /* Now at bottom of tree */
525 while (this) {
526 if (this->rb_left)
527 this = this->rb_left;
528 else if (this->rb_right)
529 this = this->rb_right;
530 else {
531 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
532 jffs2_free_full_dnode(tn->fn);
533 jffs2_free_tmp_dnode_info(tn);
534
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100535 this = rb_parent(this);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100536 if (!this)
537 break;
538
539 if (this->rb_left == &tn->rb)
540 this->rb_left = NULL;
541 else if (this->rb_right == &tn->rb)
542 this->rb_right = NULL;
543 else BUG();
544 }
545 }
546 list->rb_node = NULL;
547}
548
549static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
550{
551 struct jffs2_full_dirent *next;
552
553 while (fd) {
554 next = fd->next;
555 jffs2_free_full_dirent(fd);
556 fd = next;
557 }
558}
559
560/* Returns first valid node after 'ref'. May return 'ref' */
561static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
562{
563 while (ref && ref->next_in_ino) {
564 if (!ref_obsolete(ref))
565 return ref;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100566 dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100567 ref = ref->next_in_ino;
568 }
569 return NULL;
570}
571
572/*
573 * Helper function for jffs2_get_inode_nodes().
574 * It is called every time an directory entry node is found.
575 *
David Woodhouse14c63812007-07-03 16:51:19 -0400576 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100577 * negative error code on failure.
578 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100579static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100580 struct jffs2_raw_dirent *rd, size_t read,
581 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100582{
583 struct jffs2_full_dirent *fd;
David Woodhouse1046d882006-06-18 22:44:21 +0100584 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000585
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100586 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
587 BUG_ON(ref_obsolete(ref));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000588
David Woodhouse1046d882006-06-18 22:44:21 +0100589 crc = crc32(0, rd, sizeof(*rd) - 8);
590 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
591 JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n",
592 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100593 jffs2_mark_node_obsolete(c, ref);
594 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100595 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000596
David Woodhouse1046d882006-06-18 22:44:21 +0100597 /* If we've never checked the CRCs on this node, check them now */
598 if (ref_flags(ref) == REF_UNCHECKED) {
599 struct jffs2_eraseblock *jeb;
600 int len;
601
602 /* Sanity check */
603 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
604 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
605 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100606 jffs2_mark_node_obsolete(c, ref);
607 return 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100608 }
609
610 jeb = &c->blocks[ref->flash_offset / c->sector_size];
611 len = ref_totlen(c, jeb, ref);
612
613 spin_lock(&c->erase_completion_lock);
614 jeb->used_size += len;
615 jeb->unchecked_size -= len;
616 c->used_size += len;
617 c->unchecked_size -= len;
David Woodhouse43dfa072007-06-29 13:39:57 +0100618 ref->flash_offset = ref_offset(ref) | dirent_node_state(rd);
David Woodhouse1046d882006-06-18 22:44:21 +0100619 spin_unlock(&c->erase_completion_lock);
620 }
621
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100622 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
623 if (unlikely(!fd))
624 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100626 fd->raw = ref;
627 fd->version = je32_to_cpu(rd->version);
628 fd->ino = je32_to_cpu(rd->ino);
629 fd->type = rd->type;
630
David Woodhousedf8e96f2007-04-25 03:23:42 +0100631 if (fd->version > rii->highest_version)
632 rii->highest_version = fd->version;
633
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100634 /* Pick out the mctime of the latest dirent */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100635 if(fd->version > rii->mctime_ver && je32_to_cpu(rd->mctime)) {
636 rii->mctime_ver = fd->version;
637 rii->latest_mctime = je32_to_cpu(rd->mctime);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100638 }
639
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000640 /*
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100641 * Copy as much of the name as possible from the raw
642 * dirent we've already read from the flash.
643 */
644 if (read > sizeof(*rd))
645 memcpy(&fd->name[0], &rd->name[0],
646 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000647
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100648 /* Do we need to copy any more of the name directly from the flash? */
649 if (rd->nsize + sizeof(*rd) > read) {
650 /* FIXME: point() */
651 int err;
652 int already = read - sizeof(*rd);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000653
654 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100655 rd->nsize - already, &read, &fd->name[already]);
656 if (unlikely(read != rd->nsize - already) && likely(!err))
657 return -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000658
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100659 if (unlikely(err)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100660 JFFS2_ERROR("read remainder of name: error %d\n", err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100661 jffs2_free_full_dirent(fd);
662 return -EIO;
663 }
664 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000665
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100666 fd->nhash = full_name_hash(fd->name, rd->nsize);
667 fd->next = NULL;
668 fd->name[rd->nsize] = '\0';
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000669
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100670 /*
671 * Wheee. We now have a complete jffs2_full_dirent structure, with
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000672 * the name in it and everything. Link it into the list
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100673 */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100674 jffs2_add_fd_to_list(c, fd, &rii->fds);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100675
676 return 0;
677}
678
679/*
680 * Helper function for jffs2_get_inode_nodes().
681 * It is called every time an inode node is found.
682 *
David Woodhouse14c63812007-07-03 16:51:19 -0400683 * Returns: 0 on success (possibly after marking a bad node obsolete);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100684 * negative error code on failure.
685 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100686static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100687 struct jffs2_raw_inode *rd, int rdlen,
688 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100689{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100690 struct jffs2_tmp_dnode_info *tn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100691 uint32_t len, csize;
David Woodhouse14c63812007-07-03 16:51:19 -0400692 int ret = 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100693 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000694
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100695 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
696 BUG_ON(ref_obsolete(ref));
697
David Woodhouse1046d882006-06-18 22:44:21 +0100698 crc = crc32(0, rd, sizeof(*rd) - 8);
699 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
700 JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n",
701 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100702 jffs2_mark_node_obsolete(c, ref);
703 return 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100704 }
705
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100706 tn = jffs2_alloc_tmp_dnode_info();
707 if (!tn) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400708 JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100709 return -ENOMEM;
710 }
711
712 tn->partial_crc = 0;
713 csize = je32_to_cpu(rd->csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000714
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100715 /* If we've never checked the CRCs on this node, check them now */
716 if (ref_flags(ref) == REF_UNCHECKED) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000717
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100718 /* Sanity checks */
719 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
720 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
David Woodhouse14c63812007-07-03 16:51:19 -0400721 JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
722 jffs2_dbg_dump_node(c, ref_offset(ref));
723 jffs2_mark_node_obsolete(c, ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100724 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100725 }
726
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100727 if (jffs2_is_writebuffered(c) && csize != 0) {
728 /* At this point we are supposed to check the data CRC
729 * of our unchecked node. But thus far, we do not
730 * know whether the node is valid or obsolete. To
731 * figure this out, we need to walk all the nodes of
732 * the inode and build the inode fragtree. We don't
733 * want to spend time checking data of nodes which may
734 * later be found to be obsolete. So we put off the full
735 * data CRC checking until we have read all the inode
736 * nodes and have started building the fragtree.
737 *
738 * The fragtree is being built starting with nodes
739 * having the highest version number, so we'll be able
740 * to detect whether a node is valid (i.e., it is not
741 * overlapped by a node with higher version) or not.
742 * And we'll be able to check only those nodes, which
743 * are not obsolete.
744 *
745 * Of course, this optimization only makes sense in case
Robert P. J. Daye1b85132008-02-03 15:14:02 +0200746 * of NAND flashes (or other flashes with
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100747 * !jffs2_can_mark_obsolete()), since on NOR flashes
748 * nodes are marked obsolete physically.
749 *
750 * Since NAND flashes (or other flashes with
751 * jffs2_is_writebuffered(c)) are anyway read by
752 * fractions of c->wbuf_pagesize, and we have just read
753 * the node header, it is likely that the starting part
754 * of the node data is also read when we read the
755 * header. So we don't mind to check the CRC of the
756 * starting part of the data of the node now, and check
757 * the second part later (in jffs2_check_node_data()).
758 * Of course, we will not need to re-read and re-check
759 * the NAND page which we have just read. This is why we
760 * read the whole NAND page at jffs2_get_inode_nodes(),
761 * while we needed only the node header.
762 */
763 unsigned char *buf;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100764
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100765 /* 'buf' will point to the start of data */
766 buf = (unsigned char *)rd + sizeof(*rd);
767 /* len will be the read data length */
768 len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100769 tn->partial_crc = crc32(0, buf, len);
770
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100771 dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100772
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100773 /* If we actually calculated the whole data CRC
774 * and it is wrong, drop the node. */
Artem B. Bityutskiy3c091332005-08-04 12:40:02 +0100775 if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100776 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
777 ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
David Woodhouse14c63812007-07-03 16:51:19 -0400778 jffs2_mark_node_obsolete(c, ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100779 goto free_out;
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100780 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100781
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100782 } else if (csize == 0) {
783 /*
784 * We checked the header CRC. If the node has no data, adjust
785 * the space accounting now. For other nodes this will be done
786 * later either when the node is marked obsolete or when its
787 * data is checked.
788 */
789 struct jffs2_eraseblock *jeb;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100790
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100791 dbg_readinode("the node has no data.\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100792 jeb = &c->blocks[ref->flash_offset / c->sector_size];
793 len = ref_totlen(c, jeb, ref);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100794
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100795 spin_lock(&c->erase_completion_lock);
796 jeb->used_size += len;
797 jeb->unchecked_size -= len;
798 c->used_size += len;
799 c->unchecked_size -= len;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100800 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100801 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100802 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100803 }
804
805 tn->fn = jffs2_alloc_full_dnode();
806 if (!tn->fn) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100807 JFFS2_ERROR("alloc fn failed\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100808 ret = -ENOMEM;
809 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100810 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000811
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100812 tn->version = je32_to_cpu(rd->version);
813 tn->fn->ofs = je32_to_cpu(rd->offset);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100814 tn->data_crc = je32_to_cpu(rd->data_crc);
815 tn->csize = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100816 tn->fn->raw = ref;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100817 tn->overlapped = 0;
818
819 if (tn->version > rii->highest_version)
820 rii->highest_version = tn->version;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000821
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100822 /* There was a bug where we wrote hole nodes out with
823 csize/dsize swapped. Deal with it */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100824 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
825 tn->fn->size = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100826 else // normal case...
827 tn->fn->size = je32_to_cpu(rd->dsize);
828
David Woodhouse2c61cb22008-04-23 16:43:15 +0100829 dbg_readinode2("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
830 ref_offset(ref), je32_to_cpu(rd->version),
831 je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000832
David Woodhousedf8e96f2007-04-25 03:23:42 +0100833 ret = jffs2_add_tn_to_tree(c, rii, tn);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100834
David Woodhousedf8e96f2007-04-25 03:23:42 +0100835 if (ret) {
836 jffs2_free_full_dnode(tn->fn);
837 free_out:
838 jffs2_free_tmp_dnode_info(tn);
839 return ret;
840 }
David Woodhouse2c61cb22008-04-23 16:43:15 +0100841#ifdef JFFS2_DBG_READINODE2_MESSAGES
842 dbg_readinode2("After adding ver %d:\n", je32_to_cpu(rd->version));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100843 tn = tn_first(&rii->tn_root);
844 while (tn) {
David Woodhouse2c61cb22008-04-23 16:43:15 +0100845 dbg_readinode2("%p: v %d r 0x%x-0x%x ov %d\n",
846 tn, tn->version, tn->fn->ofs,
847 tn->fn->ofs+tn->fn->size, tn->overlapped);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100848 tn = tn_next(tn);
849 }
850#endif
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100851 return 0;
852}
853
854/*
855 * Helper function for jffs2_get_inode_nodes().
856 * It is called every time an unknown node is found.
857 *
David Woodhouse3877f0b2006-06-18 00:05:26 +0100858 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100859 * negative error code on failure.
860 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100861static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100862{
863 /* We don't mark unknown nodes as REF_UNCHECKED */
David Woodhousec7258a42007-03-09 11:44:00 +0000864 if (ref_flags(ref) == REF_UNCHECKED) {
865 JFFS2_ERROR("REF_UNCHECKED but unknown node at %#08x\n",
866 ref_offset(ref));
867 JFFS2_ERROR("Node is {%04x,%04x,%08x,%08x}. Please report this error.\n",
David Woodhouseef53cb02007-07-10 10:01:22 +0100868 je16_to_cpu(un->magic), je16_to_cpu(un->nodetype),
869 je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100870 jffs2_mark_node_obsolete(c, ref);
871 return 0;
David Woodhousec7258a42007-03-09 11:44:00 +0000872 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000873
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100874 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
875
David Woodhouse3877f0b2006-06-18 00:05:26 +0100876 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
877
878 case JFFS2_FEATURE_INCOMPAT:
879 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
880 je16_to_cpu(un->nodetype), ref_offset(ref));
881 /* EEP */
882 BUG();
883 break;
884
885 case JFFS2_FEATURE_ROCOMPAT:
886 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
887 je16_to_cpu(un->nodetype), ref_offset(ref));
888 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
889 break;
890
891 case JFFS2_FEATURE_RWCOMPAT_COPY:
892 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
893 je16_to_cpu(un->nodetype), ref_offset(ref));
894 break;
895
896 case JFFS2_FEATURE_RWCOMPAT_DELETE:
897 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
898 je16_to_cpu(un->nodetype), ref_offset(ref));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100899 jffs2_mark_node_obsolete(c, ref);
900 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100901 }
902
903 return 0;
904}
905
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100906/*
907 * Helper function for jffs2_get_inode_nodes().
908 * The function detects whether more data should be read and reads it if yes.
909 *
910 * Returns: 0 on succes;
911 * negative error code on failure.
912 */
913static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300914 int needed_len, int *rdlen, unsigned char *buf)
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100915{
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300916 int err, to_read = needed_len - *rdlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100917 size_t retlen;
918 uint32_t offs;
919
920 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300921 int rem = to_read % c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100922
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300923 if (rem)
924 to_read += c->wbuf_pagesize - rem;
925 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100926
927 /* We need to read more data */
928 offs = ref_offset(ref) + *rdlen;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000929
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300930 dbg_readinode("read more %d bytes\n", to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100931
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300932 err = jffs2_flash_read(c, offs, to_read, &retlen, buf + *rdlen);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100933 if (err) {
934 JFFS2_ERROR("can not read %d bytes from 0x%08x, "
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300935 "error code: %d.\n", to_read, offs, err);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100936 return err;
937 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000938
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300939 if (retlen < to_read) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400940 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n",
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300941 offs, retlen, to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100942 return -EIO;
943 }
944
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300945 *rdlen += to_read;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100946 return 0;
947}
948
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100949/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
David Woodhousedf8e96f2007-04-25 03:23:42 +0100950 with this ino. Perform a preliminary ordering on data nodes, throwing away
951 those which are completely obsoleted by newer ones. The naïve approach we
952 use to take of just returning them _all_ in version order will cause us to
953 run out of memory in certain degenerate cases. */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100954static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100955 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100956{
957 struct jffs2_raw_node_ref *ref, *valid_ref;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100958 unsigned char *buf = NULL;
959 union jffs2_node_union *node;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100960 size_t retlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100961 int len, err;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100962
David Woodhousedf8e96f2007-04-25 03:23:42 +0100963 rii->mctime_ver = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000964
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100965 dbg_readinode("ino #%u\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100966
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100967 /* FIXME: in case of NOR and available ->point() this
968 * needs to be fixed. */
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300969 len = sizeof(union jffs2_node_union) + c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100970 buf = kmalloc(len, GFP_KERNEL);
971 if (!buf)
972 return -ENOMEM;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000973
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100974 spin_lock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100975 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100976 if (!valid_ref && f->inocache->ino != 1)
977 JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100978 while (valid_ref) {
979 /* We can hold a pointer to a non-obsolete node without the spinlock,
980 but _obsolete_ nodes may disappear at any time, if the block
981 they're in gets erased. So if we mark 'ref' obsolete while we're
982 not holding the lock, it can go away immediately. For that reason,
983 we find the next valid node first, before processing 'ref'.
984 */
985 ref = valid_ref;
986 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
987 spin_unlock(&c->erase_completion_lock);
988
989 cond_resched();
990
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100991 /*
992 * At this point we don't know the type of the node we're going
993 * to read, so we do not know the size of its header. In order
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300994 * to minimize the amount of flash IO we assume the header is
995 * of size = JFFS2_MIN_NODE_HEADER.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100996 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300997 len = JFFS2_MIN_NODE_HEADER;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100998 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300999 int end, rem;
1000
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001001 /*
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001002 * We are about to read JFFS2_MIN_NODE_HEADER bytes,
1003 * but this flash has some minimal I/O unit. It is
1004 * possible that we'll need to read more soon, so read
1005 * up to the next min. I/O unit, in order not to
1006 * re-read the same min. I/O unit twice.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001007 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001008 end = ref_offset(ref) + len;
1009 rem = end % c->wbuf_pagesize;
1010 if (rem)
1011 end += c->wbuf_pagesize - rem;
1012 len = end - ref_offset(ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001013 }
1014
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001015 dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001016
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001017 /* FIXME: point() */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001018 err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001019 if (err) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001020 JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001021 goto free_out;
1022 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001023
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001024 if (retlen < len) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -04001025 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001026 err = -EIO;
1027 goto free_out;
1028 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001029
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001030 node = (union jffs2_node_union *)buf;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001031
David Woodhouse3877f0b2006-06-18 00:05:26 +01001032 /* No need to mask in the valid bit; it shouldn't be invalid */
1033 if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) {
1034 JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n",
1035 ref_offset(ref), je16_to_cpu(node->u.magic),
1036 je16_to_cpu(node->u.nodetype),
1037 je32_to_cpu(node->u.totlen),
1038 je32_to_cpu(node->u.hdr_crc));
1039 jffs2_dbg_dump_node(c, ref_offset(ref));
1040 jffs2_mark_node_obsolete(c, ref);
1041 goto cont;
1042 }
Joakim Tjernlund0dec4c82007-03-10 17:08:44 +01001043 if (je16_to_cpu(node->u.magic) != JFFS2_MAGIC_BITMASK) {
1044 /* Not a JFFS2 node, whinge and move on */
1045 JFFS2_NOTICE("Wrong magic bitmask 0x%04x in node header at %#08x.\n",
1046 je16_to_cpu(node->u.magic), ref_offset(ref));
David Woodhousec7258a42007-03-09 11:44:00 +00001047 jffs2_mark_node_obsolete(c, ref);
1048 goto cont;
1049 }
David Woodhouse3877f0b2006-06-18 00:05:26 +01001050
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001051 switch (je16_to_cpu(node->u.nodetype)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001052
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001053 case JFFS2_NODETYPE_DIRENT:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001054
Artem Bityutskiyea55d302007-05-30 12:08:14 +03001055 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent) &&
1056 len < sizeof(struct jffs2_raw_dirent)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001057 err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001058 if (unlikely(err))
1059 goto free_out;
1060 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001061
David Woodhousedf8e96f2007-04-25 03:23:42 +01001062 err = read_direntry(c, ref, &node->d, retlen, rii);
1063 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001064 goto free_out;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001065
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001066 break;
1067
1068 case JFFS2_NODETYPE_INODE:
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001069
Artem Bityutskiyea55d302007-05-30 12:08:14 +03001070 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode) &&
1071 len < sizeof(struct jffs2_raw_inode)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001072 err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001073 if (unlikely(err))
1074 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001075 }
1076
David Woodhousedf8e96f2007-04-25 03:23:42 +01001077 err = read_dnode(c, ref, &node->i, len, rii);
1078 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001079 goto free_out;
1080
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 break;
1082
1083 default:
Artem Bityutskiyea55d302007-05-30 12:08:14 +03001084 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node) &&
1085 len < sizeof(struct jffs2_unknown_node)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001086 err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001087 if (unlikely(err))
1088 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001089 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001090
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001091 err = read_unknown(c, ref, &node->u);
David Woodhouse14c63812007-07-03 16:51:19 -04001092 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001093 goto free_out;
1094
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 }
David Woodhouse3877f0b2006-06-18 00:05:26 +01001096 cont:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001097 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001099
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001100 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001101 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102
David Woodhousedf8e96f2007-04-25 03:23:42 +01001103 f->highest_version = rii->highest_version;
1104
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001105 dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +01001106 f->inocache->ino, rii->highest_version, rii->latest_mctime,
1107 rii->mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001108 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001110 free_out:
David Woodhousedf8e96f2007-04-25 03:23:42 +01001111 jffs2_free_tmp_dnode_info_list(&rii->tn_root);
1112 jffs2_free_full_dirent_list(rii->fds);
1113 rii->fds = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001114 kfree(buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001115 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116}
1117
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001118static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 struct jffs2_inode_info *f,
1120 struct jffs2_raw_inode *latest_node)
1121{
David Woodhousedf8e96f2007-04-25 03:23:42 +01001122 struct jffs2_readinode_info rii;
David Woodhouse61c4b232007-04-25 17:04:23 +01001123 uint32_t crc, new_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 size_t retlen;
1125 int ret;
1126
David Woodhouse27c72b02008-05-01 18:47:17 +01001127 dbg_readinode("ino #%u pino/nlink is %d\n", f->inocache->ino,
1128 f->inocache->pino_nlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129
David Woodhousedf8e96f2007-04-25 03:23:42 +01001130 memset(&rii, 0, sizeof(rii));
1131
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 /* Grab all nodes relevant to this ino */
David Woodhousedf8e96f2007-04-25 03:23:42 +01001133 ret = jffs2_get_inode_nodes(c, f, &rii);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
1135 if (ret) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001136 JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 if (f->inocache->state == INO_STATE_READING)
1138 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1139 return ret;
1140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
David Woodhousedf8e96f2007-04-25 03:23:42 +01001142 ret = jffs2_build_inode_fragtree(c, f, &rii);
1143 if (ret) {
1144 JFFS2_ERROR("Failed to build final fragtree for inode #%u: error %d\n",
1145 f->inocache->ino, ret);
1146 if (f->inocache->state == INO_STATE_READING)
1147 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1148 jffs2_free_tmp_dnode_info_list(&rii.tn_root);
1149 /* FIXME: We could at least crc-check them all */
1150 if (rii.mdata_tn) {
1151 jffs2_free_full_dnode(rii.mdata_tn->fn);
1152 jffs2_free_tmp_dnode_info(rii.mdata_tn);
1153 rii.mdata_tn = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001154 }
David Woodhousedf8e96f2007-04-25 03:23:42 +01001155 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 }
David Woodhousedf8e96f2007-04-25 03:23:42 +01001157
1158 if (rii.mdata_tn) {
1159 if (rii.mdata_tn->fn->raw == rii.latest_ref) {
1160 f->metadata = rii.mdata_tn->fn;
1161 jffs2_free_tmp_dnode_info(rii.mdata_tn);
1162 } else {
1163 jffs2_kill_tn(c, rii.mdata_tn);
1164 }
1165 rii.mdata_tn = NULL;
1166 }
1167
1168 f->dents = rii.fds;
1169
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +01001170 jffs2_dbg_fragtree_paranoia_check_nolock(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171
David Woodhousedf8e96f2007-04-25 03:23:42 +01001172 if (unlikely(!rii.latest_ref)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 /* No data nodes for this inode. */
1174 if (f->inocache->ino != 1) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001175 JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
David Woodhousedf8e96f2007-04-25 03:23:42 +01001176 if (!rii.fds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 if (f->inocache->state == INO_STATE_READING)
1178 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1179 return -EIO;
1180 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001181 JFFS2_NOTICE("but it has children so we fake some modes for it\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 }
1183 latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
1184 latest_node->version = cpu_to_je32(0);
1185 latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
1186 latest_node->isize = cpu_to_je32(0);
1187 latest_node->gid = cpu_to_je16(0);
1188 latest_node->uid = cpu_to_je16(0);
1189 if (f->inocache->state == INO_STATE_READING)
1190 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
1191 return 0;
1192 }
1193
David Woodhousedf8e96f2007-04-25 03:23:42 +01001194 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref), sizeof(*latest_node), &retlen, (void *)latest_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 if (ret || retlen != sizeof(*latest_node)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001196 JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
1197 ret, retlen, sizeof(*latest_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 /* FIXME: If this fails, there seems to be a memory leak. Find it. */
David Woodhouseced22072008-04-22 15:13:40 +01001199 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 jffs2_do_clear_inode(c, f);
1201 return ret?ret:-EIO;
1202 }
1203
1204 crc = crc32(0, latest_node, sizeof(*latest_node)-8);
1205 if (crc != je32_to_cpu(latest_node->node_crc)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001206 JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +01001207 f->inocache->ino, ref_offset(rii.latest_ref));
David Woodhouseced22072008-04-22 15:13:40 +01001208 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 jffs2_do_clear_inode(c, f);
1210 return -EIO;
1211 }
1212
1213 switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
1214 case S_IFDIR:
David Woodhousedf8e96f2007-04-25 03:23:42 +01001215 if (rii.mctime_ver > je32_to_cpu(latest_node->version)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 /* The times in the latest_node are actually older than
1217 mctime in the latest dirent. Cheat. */
David Woodhousedf8e96f2007-04-25 03:23:42 +01001218 latest_node->ctime = latest_node->mtime = cpu_to_je32(rii.latest_mctime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 }
1220 break;
1221
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001222
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 case S_IFREG:
1224 /* If it was a regular file, truncate it to the latest node's isize */
David Woodhouse61c4b232007-04-25 17:04:23 +01001225 new_size = jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
1226 if (new_size != je32_to_cpu(latest_node->isize)) {
1227 JFFS2_WARNING("Truncating ino #%u to %d bytes failed because it only had %d bytes to start with!\n",
1228 f->inocache->ino, je32_to_cpu(latest_node->isize), new_size);
1229 latest_node->isize = cpu_to_je32(new_size);
1230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 break;
1232
1233 case S_IFLNK:
1234 /* Hack to work around broken isize in old symlink code.
1235 Remove this when dwmw2 comes to his senses and stops
1236 symlinks from being an entirely gratuitous special
1237 case. */
1238 if (!je32_to_cpu(latest_node->isize))
1239 latest_node->isize = latest_node->dsize;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001240
1241 if (f->inocache->state != INO_STATE_CHECKING) {
1242 /* Symlink's inode data is the target path. Read it and
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001243 * keep in RAM to facilitate quick follow symlink
1244 * operation. */
1245 f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
1246 if (!f->target) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001247 JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
David Woodhouseced22072008-04-22 15:13:40 +01001248 mutex_unlock(&f->sem);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001249 jffs2_do_clear_inode(c, f);
1250 return -ENOMEM;
1251 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001252
David Woodhousedf8e96f2007-04-25 03:23:42 +01001253 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node),
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001254 je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001255
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001256 if (ret || retlen != je32_to_cpu(latest_node->csize)) {
1257 if (retlen != je32_to_cpu(latest_node->csize))
1258 ret = -EIO;
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001259 kfree(f->target);
1260 f->target = NULL;
David Woodhouseced22072008-04-22 15:13:40 +01001261 mutex_unlock(&f->sem);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001262 jffs2_do_clear_inode(c, f);
1263 return -ret;
1264 }
1265
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001266 f->target[je32_to_cpu(latest_node->csize)] = '\0';
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001267 dbg_readinode("symlink's target '%s' cached\n", f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001268 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 /* fall through... */
1271
1272 case S_IFBLK:
1273 case S_IFCHR:
1274 /* Certain inode types should have only one data node, and it's
1275 kept as the metadata node */
1276 if (f->metadata) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001277 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 f->inocache->ino, jemode_to_cpu(latest_node->mode));
David Woodhouseced22072008-04-22 15:13:40 +01001279 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 jffs2_do_clear_inode(c, f);
1281 return -EIO;
1282 }
1283 if (!frag_first(&f->fragtree)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001284 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 f->inocache->ino, jemode_to_cpu(latest_node->mode));
David Woodhouseced22072008-04-22 15:13:40 +01001286 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 jffs2_do_clear_inode(c, f);
1288 return -EIO;
1289 }
1290 /* ASSERT: f->fraglist != NULL */
1291 if (frag_next(frag_first(&f->fragtree))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001292 JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 f->inocache->ino, jemode_to_cpu(latest_node->mode));
1294 /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
David Woodhouseced22072008-04-22 15:13:40 +01001295 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 jffs2_do_clear_inode(c, f);
1297 return -EIO;
1298 }
1299 /* OK. We're happy */
1300 f->metadata = frag_first(&f->fragtree)->node;
1301 jffs2_free_node_frag(frag_first(&f->fragtree));
1302 f->fragtree = RB_ROOT;
1303 break;
1304 }
1305 if (f->inocache->state == INO_STATE_READING)
1306 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
1307
1308 return 0;
1309}
1310
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001311/* Scan the list of all nodes present for this ino, build map of versions, etc. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001312int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001313 uint32_t ino, struct jffs2_raw_inode *latest_node)
1314{
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001315 dbg_readinode("read inode #%u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001316
1317 retry_inocache:
1318 spin_lock(&c->inocache_lock);
1319 f->inocache = jffs2_get_ino_cache(c, ino);
1320
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001321 if (f->inocache) {
1322 /* Check its state. We may need to wait before we can use it */
1323 switch(f->inocache->state) {
1324 case INO_STATE_UNCHECKED:
1325 case INO_STATE_CHECKEDABSENT:
1326 f->inocache->state = INO_STATE_READING;
1327 break;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001328
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001329 case INO_STATE_CHECKING:
1330 case INO_STATE_GC:
1331 /* If it's in either of these states, we need
1332 to wait for whoever's got it to finish and
1333 put it back. */
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001334 dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001335 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
1336 goto retry_inocache;
1337
1338 case INO_STATE_READING:
1339 case INO_STATE_PRESENT:
1340 /* Eep. This should never happen. It can
1341 happen if Linux calls read_inode() again
1342 before clear_inode() has finished though. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001343 JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001344 /* Fail. That's probably better than allowing it to succeed */
1345 f->inocache = NULL;
1346 break;
1347
1348 default:
1349 BUG();
1350 }
1351 }
1352 spin_unlock(&c->inocache_lock);
1353
1354 if (!f->inocache && ino == 1) {
1355 /* Special case - no root inode on medium */
1356 f->inocache = jffs2_alloc_inode_cache();
1357 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001358 JFFS2_ERROR("cannot allocate inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001359 return -ENOMEM;
1360 }
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001361 dbg_readinode("creating inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001362 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
David Woodhouse27c72b02008-05-01 18:47:17 +01001363 f->inocache->ino = f->inocache->pino_nlink = 1;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001364 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
1365 f->inocache->state = INO_STATE_READING;
1366 jffs2_add_ino_cache(c, f->inocache);
1367 }
1368 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001369 JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001370 return -ENOENT;
1371 }
1372
1373 return jffs2_do_read_inode_internal(c, f, latest_node);
1374}
1375
1376int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
1377{
1378 struct jffs2_raw_inode n;
Yan Burman3d375d92006-12-04 15:03:01 -08001379 struct jffs2_inode_info *f = kzalloc(sizeof(*f), GFP_KERNEL);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001380 int ret;
1381
1382 if (!f)
1383 return -ENOMEM;
1384
David Woodhouseced22072008-04-22 15:13:40 +01001385 mutex_init(&f->sem);
1386 mutex_lock(&f->sem);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001387 f->inocache = ic;
1388
1389 ret = jffs2_do_read_inode_internal(c, f, &n);
1390 if (!ret) {
David Woodhouseced22072008-04-22 15:13:40 +01001391 mutex_unlock(&f->sem);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001392 jffs2_do_clear_inode(c, f);
1393 }
1394 kfree (f);
1395 return ret;
1396}
1397
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
1399{
1400 struct jffs2_full_dirent *fd, *fds;
1401 int deleted;
1402
KaiGai Koheic7afb0f2006-07-02 15:13:46 +01001403 jffs2_clear_acl(f);
KaiGai Kohei355ed4e2006-06-24 09:15:36 +09001404 jffs2_xattr_delete_inode(c, f->inocache);
David Woodhouseced22072008-04-22 15:13:40 +01001405 mutex_lock(&f->sem);
David Woodhouse27c72b02008-05-01 18:47:17 +01001406 deleted = f->inocache && !f->inocache->pino_nlink;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
David Woodhouse67e345d2005-02-27 23:01:36 +00001408 if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
1409 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
1410
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 if (f->metadata) {
1412 if (deleted)
1413 jffs2_mark_node_obsolete(c, f->metadata->raw);
1414 jffs2_free_full_dnode(f->metadata);
1415 }
1416
1417 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
1418
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001419 if (f->target) {
1420 kfree(f->target);
1421 f->target = NULL;
1422 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001423
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001424 fds = f->dents;
1425 while(fds) {
1426 fd = fds;
1427 fds = fd->next;
1428 jffs2_free_full_dirent(fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 }
1430
David Woodhouse67e345d2005-02-27 23:01:36 +00001431 if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
David Woodhouse67e345d2005-02-27 23:01:36 +00001433 if (f->inocache->nodes == (void *)f->inocache)
1434 jffs2_del_ino_cache(c, f->inocache);
1435 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
David Woodhouseced22072008-04-22 15:13:40 +01001437 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438}