blob: dc0437e8476322aaff40dc01737dcc2cabdc6976 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
David Woodhousec00c3102007-04-25 14:16:47 +01004 * Copyright © 2001-2007 Red Hat, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 */
11
Joe Perches5a528952012-02-15 15:56:45 -080012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/kernel.h>
Andrew Lunn737b7662005-07-30 16:29:30 +010015#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/slab.h>
17#include <linux/fs.h>
18#include <linux/crc32.h>
19#include <linux/pagemap.h>
20#include <linux/mtd/mtd.h>
21#include <linux/compiler.h>
22#include "nodelist.h"
23
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010024/*
David Woodhousedf8e96f2007-04-25 03:23:42 +010025 * Check the data CRC of the node.
26 *
27 * Returns: 0 if the data CRC is correct;
28 * 1 - if incorrect;
Lucas De Marchi25985ed2011-03-30 22:57:33 -030029 * error code if an error occurred.
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010030 */
David Woodhousedf8e96f2007-04-25 03:23:42 +010031static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032{
David Woodhousedf8e96f2007-04-25 03:23:42 +010033 struct jffs2_raw_node_ref *ref = tn->fn->raw;
34 int err = 0, pointed = 0;
35 struct jffs2_eraseblock *jeb;
36 unsigned char *buffer;
37 uint32_t crc, ofs, len;
38 size_t retlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
David Woodhousedf8e96f2007-04-25 03:23:42 +010040 BUG_ON(tn->csize == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
David Woodhousedf8e96f2007-04-25 03:23:42 +010042 /* Calculate how many bytes were already checked */
43 ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode);
David Woodhouse92525722007-11-21 12:08:16 -050044 len = tn->csize;
David Woodhousedf8e96f2007-04-25 03:23:42 +010045
David Woodhouse92525722007-11-21 12:08:16 -050046 if (jffs2_is_writebuffered(c)) {
47 int adj = ofs % c->wbuf_pagesize;
48 if (likely(adj))
49 adj = c->wbuf_pagesize - adj;
50
51 if (adj >= tn->csize) {
52 dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
53 ref_offset(ref), tn->csize, ofs);
54 goto adj_acc;
55 }
56
57 ofs += adj;
58 len -= adj;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010059 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
David Woodhousedf8e96f2007-04-25 03:23:42 +010061 dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n",
62 ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len);
63
64#ifndef __ECOS
65 /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(),
66 * adding and jffs2_flash_read_end() interface. */
Artem Bityutskiy10934472011-12-28 15:55:42 +020067 err = mtd_point(c->mtd, ofs, len, &retlen, (void **)&buffer, NULL);
68 if (!err && retlen < len) {
69 JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
70 mtd_unpoint(c->mtd, ofs, retlen);
71 } else if (err) {
72 if (err != -EOPNOTSUPP)
David Woodhousedf8e96f2007-04-25 03:23:42 +010073 JFFS2_WARNING("MTD point failed: error code %d.\n", err);
Artem Bityutskiy10934472011-12-28 15:55:42 +020074 } else
75 pointed = 1; /* succefully pointed to device */
David Woodhousedf8e96f2007-04-25 03:23:42 +010076#endif
77
78 if (!pointed) {
79 buffer = kmalloc(len, GFP_KERNEL);
80 if (unlikely(!buffer))
81 return -ENOMEM;
82
83 /* TODO: this is very frequent pattern, make it a separate
84 * routine */
85 err = jffs2_flash_read(c, ofs, len, &retlen, buffer);
86 if (err) {
87 JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err);
88 goto free_out;
89 }
90
91 if (retlen != len) {
92 JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len);
93 err = -EIO;
94 goto free_out;
95 }
96 }
97
98 /* Continue calculating CRC */
99 crc = crc32(tn->partial_crc, buffer, len);
100 if(!pointed)
101 kfree(buffer);
102#ifndef __ECOS
103 else
Artem Bityutskiy72197782011-12-23 17:05:52 +0200104 mtd_unpoint(c->mtd, ofs, len);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100105#endif
106
107 if (crc != tn->data_crc) {
108 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
David Woodhouseb2e25232007-07-05 01:57:26 -0400109 ref_offset(ref), tn->data_crc, crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100110 return 1;
111 }
112
113adj_acc:
114 jeb = &c->blocks[ref->flash_offset / c->sector_size];
115 len = ref_totlen(c, jeb, ref);
116 /* If it should be REF_NORMAL, it'll get marked as such when
117 we build the fragtree, shortly. No need to worry about GC
118 moving it while it's marked REF_PRISTINE -- GC won't happen
119 till we've finished checking every inode anyway. */
120 ref->flash_offset |= REF_PRISTINE;
121 /*
122 * Mark the node as having been checked and fix the
123 * accounting accordingly.
124 */
125 spin_lock(&c->erase_completion_lock);
126 jeb->used_size += len;
127 jeb->unchecked_size -= len;
128 c->used_size += len;
129 c->unchecked_size -= len;
130 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
131 spin_unlock(&c->erase_completion_lock);
132
133 return 0;
134
135free_out:
136 if(!pointed)
137 kfree(buffer);
138#ifndef __ECOS
139 else
Artem Bityutskiy72197782011-12-23 17:05:52 +0200140 mtd_unpoint(c->mtd, ofs, len);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100141#endif
142 return err;
143}
144
145/*
146 * Helper function for jffs2_add_older_frag_to_fragtree().
147 *
148 * Checks the node if we are in the checking stage.
149 */
150static int check_tn_node(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
151{
152 int ret;
153
154 BUG_ON(ref_obsolete(tn->fn->raw));
155
156 /* We only check the data CRC of unchecked nodes */
157 if (ref_flags(tn->fn->raw) != REF_UNCHECKED)
158 return 0;
159
160 dbg_readinode("check node %#04x-%#04x, phys offs %#08x\n",
161 tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw));
162
163 ret = check_node_data(c, tn);
164 if (unlikely(ret < 0)) {
165 JFFS2_ERROR("check_node_data() returned error: %d.\n",
166 ret);
167 } else if (unlikely(ret > 0)) {
168 dbg_readinode("CRC error, mark it obsolete.\n");
169 jffs2_mark_node_obsolete(c, tn->fn->raw);
170 }
171
172 return ret;
173}
174
175static struct jffs2_tmp_dnode_info *jffs2_lookup_tn(struct rb_root *tn_root, uint32_t offset)
176{
177 struct rb_node *next;
178 struct jffs2_tmp_dnode_info *tn = NULL;
179
180 dbg_readinode("root %p, offset %d\n", tn_root, offset);
181
182 next = tn_root->rb_node;
183
184 while (next) {
185 tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb);
186
187 if (tn->fn->ofs < offset)
188 next = tn->rb.rb_right;
189 else if (tn->fn->ofs >= offset)
190 next = tn->rb.rb_left;
191 else
192 break;
193 }
194
195 return tn;
196}
197
198
199static void jffs2_kill_tn(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
200{
201 jffs2_mark_node_obsolete(c, tn->fn->raw);
202 jffs2_free_full_dnode(tn->fn);
203 jffs2_free_tmp_dnode_info(tn);
204}
205/*
206 * This function is used when we read an inode. Data nodes arrive in
207 * arbitrary order -- they may be older or newer than the nodes which
208 * are already in the tree. Where overlaps occur, the older node can
209 * be discarded as long as the newer passes the CRC check. We don't
210 * bother to keep track of holes in this rbtree, and neither do we deal
211 * with frags -- we can have multiple entries starting at the same
212 * offset, and the one with the smallest length will come first in the
213 * ordering.
214 *
David Woodhouse14c63812007-07-03 16:51:19 -0400215 * Returns 0 if the node was handled (including marking it obsolete)
David Woodhouseef53cb02007-07-10 10:01:22 +0100216 * < 0 an if error occurred
David Woodhousedf8e96f2007-04-25 03:23:42 +0100217 */
218static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c,
219 struct jffs2_readinode_info *rii,
220 struct jffs2_tmp_dnode_info *tn)
221{
222 uint32_t fn_end = tn->fn->ofs + tn->fn->size;
Thomas Gleixner4c41bd02009-02-16 21:29:31 +0100223 struct jffs2_tmp_dnode_info *this, *ptn;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100224
David Woodhousefcf3caf2007-05-07 13:16:13 +0100225 dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100226
227 /* If a node has zero dsize, we only have to keep if it if it might be the
228 node with highest version -- i.e. the one which will end up as f->metadata.
229 Note that such nodes won't be REF_UNCHECKED since there are no data to
230 check anyway. */
231 if (!tn->fn->size) {
232 if (rii->mdata_tn) {
David Woodhouse0477d242007-06-01 20:04:43 +0100233 if (rii->mdata_tn->version < tn->version) {
234 /* We had a candidate mdata node already */
235 dbg_readinode("kill old mdata with ver %d\n", rii->mdata_tn->version);
236 jffs2_kill_tn(c, rii->mdata_tn);
237 } else {
238 dbg_readinode("kill new mdata with ver %d (older than existing %d\n",
239 tn->version, rii->mdata_tn->version);
240 jffs2_kill_tn(c, tn);
241 return 0;
242 }
David Woodhousedf8e96f2007-04-25 03:23:42 +0100243 }
244 rii->mdata_tn = tn;
245 dbg_readinode("keep new mdata with ver %d\n", tn->version);
246 return 0;
247 }
248
249 /* Find the earliest node which _may_ be relevant to this one */
250 this = jffs2_lookup_tn(&rii->tn_root, tn->fn->ofs);
David Woodhouse1c979642007-05-08 00:19:54 +0100251 if (this) {
252 /* If the node is coincident with another at a lower address,
253 back up until the other node is found. It may be relevant */
Thomas Gleixner4c41bd02009-02-16 21:29:31 +0100254 while (this->overlapped) {
255 ptn = tn_prev(this);
256 if (!ptn) {
257 /*
258 * We killed a node which set the overlapped
259 * flags during the scan. Fix it up.
260 */
261 this->overlapped = 0;
262 break;
263 }
264 this = ptn;
265 }
David Woodhouse1c979642007-05-08 00:19:54 +0100266 dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole");
David Woodhousedf8e96f2007-04-25 03:23:42 +0100267 }
268
David Woodhousedf8e96f2007-04-25 03:23:42 +0100269 while (this) {
270 if (this->fn->ofs > fn_end)
271 break;
272 dbg_readinode("Ponder this ver %d, 0x%x-0x%x\n",
273 this->version, this->fn->ofs, this->fn->size);
274
275 if (this->version == tn->version) {
276 /* Version number collision means REF_PRISTINE GC. Accept either of them
277 as long as the CRC is correct. Check the one we have already... */
278 if (!check_tn_node(c, this)) {
279 /* The one we already had was OK. Keep it and throw away the new one */
280 dbg_readinode("Like old node. Throw away new\n");
281 jffs2_kill_tn(c, tn);
282 return 0;
283 } else {
284 /* Who cares if the new one is good; keep it for now anyway. */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100285 dbg_readinode("Like new node. Throw away old\n");
David Woodhousefcf3caf2007-05-07 13:16:13 +0100286 rb_replace_node(&this->rb, &tn->rb, &rii->tn_root);
287 jffs2_kill_tn(c, this);
288 /* Same overlapping from in front and behind */
289 return 0;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100290 }
291 }
292 if (this->version < tn->version &&
293 this->fn->ofs >= tn->fn->ofs &&
294 this->fn->ofs + this->fn->size <= fn_end) {
295 /* New node entirely overlaps 'this' */
296 if (check_tn_node(c, tn)) {
297 dbg_readinode("new node bad CRC\n");
298 jffs2_kill_tn(c, tn);
299 return 0;
300 }
David Woodhousefcf3caf2007-05-07 13:16:13 +0100301 /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */
David Woodhouse1c979642007-05-08 00:19:54 +0100302 while (this && this->fn->ofs + this->fn->size <= fn_end) {
David Woodhousedf8e96f2007-04-25 03:23:42 +0100303 struct jffs2_tmp_dnode_info *next = tn_next(this);
304 if (this->version < tn->version) {
305 tn_erase(this, &rii->tn_root);
306 dbg_readinode("Kill overlapped ver %d, 0x%x-0x%x\n",
307 this->version, this->fn->ofs,
308 this->fn->ofs+this->fn->size);
309 jffs2_kill_tn(c, this);
310 }
311 this = next;
312 }
David Woodhousefcf3caf2007-05-07 13:16:13 +0100313 dbg_readinode("Done killing overlapped nodes\n");
David Woodhouse1c979642007-05-08 00:19:54 +0100314 continue;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100315 }
316 if (this->version > tn->version &&
317 this->fn->ofs <= tn->fn->ofs &&
318 this->fn->ofs+this->fn->size >= fn_end) {
319 /* New node entirely overlapped by 'this' */
320 if (!check_tn_node(c, this)) {
321 dbg_readinode("Good CRC on old node. Kill new\n");
322 jffs2_kill_tn(c, tn);
323 return 0;
324 }
325 /* ... but 'this' was bad. Replace it... */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100326 dbg_readinode("Bad CRC on old overlapping node. Kill it\n");
David Woodhousefcf3caf2007-05-07 13:16:13 +0100327 tn_erase(this, &rii->tn_root);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100328 jffs2_kill_tn(c, this);
David Woodhousefcf3caf2007-05-07 13:16:13 +0100329 break;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100330 }
David Woodhousedf8e96f2007-04-25 03:23:42 +0100331
332 this = tn_next(this);
333 }
David Woodhouse96dd8d22007-05-06 14:41:40 +0100334
David Woodhousedf8e96f2007-04-25 03:23:42 +0100335 /* We neither completely obsoleted nor were completely
David Woodhouse96dd8d22007-05-06 14:41:40 +0100336 obsoleted by an earlier node. Insert into the tree */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100337 {
David Woodhouse96dd8d22007-05-06 14:41:40 +0100338 struct rb_node *parent;
339 struct rb_node **link = &rii->tn_root.rb_node;
David Woodhouse1c979642007-05-08 00:19:54 +0100340 struct jffs2_tmp_dnode_info *insert_point = NULL;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100341
342 while (*link) {
343 parent = *link;
344 insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
345 if (tn->fn->ofs > insert_point->fn->ofs)
346 link = &insert_point->rb.rb_right;
347 else if (tn->fn->ofs < insert_point->fn->ofs ||
348 tn->fn->size < insert_point->fn->size)
349 link = &insert_point->rb.rb_left;
350 else
351 link = &insert_point->rb.rb_right;
352 }
353 rb_link_node(&tn->rb, &insert_point->rb, link);
354 rb_insert_color(&tn->rb, &rii->tn_root);
355 }
David Woodhouse1123e2a2007-05-05 16:29:34 +0100356
David Woodhousedf8e96f2007-04-25 03:23:42 +0100357 /* If there's anything behind that overlaps us, note it */
358 this = tn_prev(tn);
359 if (this) {
360 while (1) {
361 if (this->fn->ofs + this->fn->size > tn->fn->ofs) {
362 dbg_readinode("Node is overlapped by %p (v %d, 0x%x-0x%x)\n",
363 this, this->version, this->fn->ofs,
364 this->fn->ofs+this->fn->size);
365 tn->overlapped = 1;
366 break;
367 }
368 if (!this->overlapped)
369 break;
Thomas Gleixner4c41bd02009-02-16 21:29:31 +0100370
371 ptn = tn_prev(this);
372 if (!ptn) {
373 /*
374 * We killed a node which set the overlapped
375 * flags during the scan. Fix it up.
376 */
377 this->overlapped = 0;
378 break;
379 }
380 this = ptn;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100381 }
382 }
383
384 /* If the new node overlaps anything ahead, note it */
385 this = tn_next(tn);
386 while (this && this->fn->ofs < fn_end) {
387 this->overlapped = 1;
388 dbg_readinode("Node ver %d, 0x%x-0x%x is overlapped\n",
389 this->version, this->fn->ofs,
390 this->fn->ofs+this->fn->size);
391 this = tn_next(this);
392 }
393 return 0;
394}
395
396/* Trivial function to remove the last node in the tree. Which by definition
397 has no right-hand -- so can be removed just by making its only child (if
398 any) take its place under its parent. */
399static void eat_last(struct rb_root *root, struct rb_node *node)
400{
401 struct rb_node *parent = rb_parent(node);
402 struct rb_node **link;
403
404 /* LAST! */
405 BUG_ON(node->rb_right);
406
407 if (!parent)
408 link = &root->rb_node;
409 else if (node == parent->rb_left)
410 link = &parent->rb_left;
411 else
412 link = &parent->rb_right;
413
414 *link = node->rb_left;
415 /* Colour doesn't matter now. Only the parent pointer. */
416 if (node->rb_left)
417 node->rb_left->rb_parent_color = node->rb_parent_color;
418}
419
420/* We put this in reverse order, so we can just use eat_last */
421static void ver_insert(struct rb_root *ver_root, struct jffs2_tmp_dnode_info *tn)
422{
423 struct rb_node **link = &ver_root->rb_node;
424 struct rb_node *parent = NULL;
425 struct jffs2_tmp_dnode_info *this_tn;
426
427 while (*link) {
428 parent = *link;
429 this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
430
431 if (tn->version > this_tn->version)
432 link = &parent->rb_left;
433 else
434 link = &parent->rb_right;
435 }
436 dbg_readinode("Link new node at %p (root is %p)\n", link, ver_root);
437 rb_link_node(&tn->rb, parent, link);
438 rb_insert_color(&tn->rb, ver_root);
439}
440
441/* Build final, normal fragtree from tn tree. It doesn't matter which order
442 we add nodes to the real fragtree, as long as they don't overlap. And
443 having thrown away the majority of overlapped nodes as we went, there
444 really shouldn't be many sets of nodes which do overlap. If we start at
445 the end, we can use the overlap markers -- we can just eat nodes which
446 aren't overlapped, and when we encounter nodes which _do_ overlap we
447 sort them all into a temporary tree in version order before replaying them. */
448static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c,
449 struct jffs2_inode_info *f,
450 struct jffs2_readinode_info *rii)
451{
452 struct jffs2_tmp_dnode_info *pen, *last, *this;
453 struct rb_root ver_root = RB_ROOT;
454 uint32_t high_ver = 0;
455
456 if (rii->mdata_tn) {
457 dbg_readinode("potential mdata is ver %d at %p\n", rii->mdata_tn->version, rii->mdata_tn);
458 high_ver = rii->mdata_tn->version;
459 rii->latest_ref = rii->mdata_tn->fn->raw;
460 }
461#ifdef JFFS2_DBG_READINODE_MESSAGES
462 this = tn_last(&rii->tn_root);
463 while (this) {
464 dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d\n", this, this->version, this->fn->ofs,
David Woodhouse96dd8d22007-05-06 14:41:40 +0100465 this->fn->ofs+this->fn->size, this->overlapped);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100466 this = tn_prev(this);
467 }
468#endif
469 pen = tn_last(&rii->tn_root);
470 while ((last = pen)) {
471 pen = tn_prev(last);
472
473 eat_last(&rii->tn_root, &last->rb);
474 ver_insert(&ver_root, last);
475
Thomas Gleixner4c41bd02009-02-16 21:29:31 +0100476 if (unlikely(last->overlapped)) {
477 if (pen)
478 continue;
479 /*
480 * We killed a node which set the overlapped
481 * flags during the scan. Fix it up.
482 */
483 last->overlapped = 0;
484 }
David Woodhousedf8e96f2007-04-25 03:23:42 +0100485
486 /* Now we have a bunch of nodes in reverse version
487 order, in the tree at ver_root. Most of the time,
488 there'll actually be only one node in the 'tree',
489 in fact. */
490 this = tn_last(&ver_root);
491
492 while (this) {
493 struct jffs2_tmp_dnode_info *vers_next;
494 int ret;
495 vers_next = tn_prev(this);
496 eat_last(&ver_root, &this->rb);
497 if (check_tn_node(c, this)) {
David Woodhouse1123e2a2007-05-05 16:29:34 +0100498 dbg_readinode("node ver %d, 0x%x-0x%x failed CRC\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +0100499 this->version, this->fn->ofs,
500 this->fn->ofs+this->fn->size);
501 jffs2_kill_tn(c, this);
502 } else {
503 if (this->version > high_ver) {
504 /* Note that this is different from the other
505 highest_version, because this one is only
506 counting _valid_ nodes which could give the
507 latest inode metadata */
508 high_ver = this->version;
509 rii->latest_ref = this->fn->raw;
510 }
David Woodhouse1123e2a2007-05-05 16:29:34 +0100511 dbg_readinode("Add %p (v %d, 0x%x-0x%x, ov %d) to fragtree\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +0100512 this, this->version, this->fn->ofs,
513 this->fn->ofs+this->fn->size, this->overlapped);
514
515 ret = jffs2_add_full_dnode_to_inode(c, f, this->fn);
516 if (ret) {
517 /* Free the nodes in vers_root; let the caller
518 deal with the rest */
519 JFFS2_ERROR("Add node to tree failed %d\n", ret);
520 while (1) {
521 vers_next = tn_prev(this);
522 if (check_tn_node(c, this))
523 jffs2_mark_node_obsolete(c, this->fn->raw);
524 jffs2_free_full_dnode(this->fn);
525 jffs2_free_tmp_dnode_info(this);
526 this = vers_next;
527 if (!this)
528 break;
529 eat_last(&ver_root, &vers_next->rb);
530 }
531 return ret;
532 }
533 jffs2_free_tmp_dnode_info(this);
534 }
535 this = vers_next;
536 }
537 }
538 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100539}
540
541static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
542{
543 struct rb_node *this;
544 struct jffs2_tmp_dnode_info *tn;
545
546 this = list->rb_node;
547
548 /* Now at bottom of tree */
549 while (this) {
550 if (this->rb_left)
551 this = this->rb_left;
552 else if (this->rb_right)
553 this = this->rb_right;
554 else {
555 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
556 jffs2_free_full_dnode(tn->fn);
557 jffs2_free_tmp_dnode_info(tn);
558
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100559 this = rb_parent(this);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100560 if (!this)
561 break;
562
563 if (this->rb_left == &tn->rb)
564 this->rb_left = NULL;
565 else if (this->rb_right == &tn->rb)
566 this->rb_right = NULL;
567 else BUG();
568 }
569 }
Venkatesh Pallipadibcc54e22010-03-15 00:34:59 -0400570 *list = RB_ROOT;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100571}
572
573static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
574{
575 struct jffs2_full_dirent *next;
576
577 while (fd) {
578 next = fd->next;
579 jffs2_free_full_dirent(fd);
580 fd = next;
581 }
582}
583
584/* Returns first valid node after 'ref'. May return 'ref' */
585static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
586{
587 while (ref && ref->next_in_ino) {
588 if (!ref_obsolete(ref))
589 return ref;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100590 dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100591 ref = ref->next_in_ino;
592 }
593 return NULL;
594}
595
596/*
597 * Helper function for jffs2_get_inode_nodes().
598 * It is called every time an directory entry node is found.
599 *
David Woodhouse14c63812007-07-03 16:51:19 -0400600 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100601 * negative error code on failure.
602 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100603static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100604 struct jffs2_raw_dirent *rd, size_t read,
605 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100606{
607 struct jffs2_full_dirent *fd;
David Woodhouse1046d882006-06-18 22:44:21 +0100608 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000609
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100610 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
611 BUG_ON(ref_obsolete(ref));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000612
David Woodhouse1046d882006-06-18 22:44:21 +0100613 crc = crc32(0, rd, sizeof(*rd) - 8);
614 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
615 JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n",
616 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100617 jffs2_mark_node_obsolete(c, ref);
618 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100619 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000620
David Woodhouse1046d882006-06-18 22:44:21 +0100621 /* If we've never checked the CRCs on this node, check them now */
622 if (ref_flags(ref) == REF_UNCHECKED) {
623 struct jffs2_eraseblock *jeb;
624 int len;
625
626 /* Sanity check */
627 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
628 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
629 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100630 jffs2_mark_node_obsolete(c, ref);
631 return 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100632 }
633
634 jeb = &c->blocks[ref->flash_offset / c->sector_size];
635 len = ref_totlen(c, jeb, ref);
636
637 spin_lock(&c->erase_completion_lock);
638 jeb->used_size += len;
639 jeb->unchecked_size -= len;
640 c->used_size += len;
641 c->unchecked_size -= len;
David Woodhouse43dfa072007-06-29 13:39:57 +0100642 ref->flash_offset = ref_offset(ref) | dirent_node_state(rd);
David Woodhouse1046d882006-06-18 22:44:21 +0100643 spin_unlock(&c->erase_completion_lock);
644 }
645
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100646 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
647 if (unlikely(!fd))
648 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100650 fd->raw = ref;
651 fd->version = je32_to_cpu(rd->version);
652 fd->ino = je32_to_cpu(rd->ino);
653 fd->type = rd->type;
654
David Woodhousedf8e96f2007-04-25 03:23:42 +0100655 if (fd->version > rii->highest_version)
656 rii->highest_version = fd->version;
657
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100658 /* Pick out the mctime of the latest dirent */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100659 if(fd->version > rii->mctime_ver && je32_to_cpu(rd->mctime)) {
660 rii->mctime_ver = fd->version;
661 rii->latest_mctime = je32_to_cpu(rd->mctime);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100662 }
663
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000664 /*
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100665 * Copy as much of the name as possible from the raw
666 * dirent we've already read from the flash.
667 */
668 if (read > sizeof(*rd))
669 memcpy(&fd->name[0], &rd->name[0],
670 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000671
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100672 /* Do we need to copy any more of the name directly from the flash? */
673 if (rd->nsize + sizeof(*rd) > read) {
674 /* FIXME: point() */
675 int err;
676 int already = read - sizeof(*rd);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000677
678 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100679 rd->nsize - already, &read, &fd->name[already]);
680 if (unlikely(read != rd->nsize - already) && likely(!err))
681 return -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000682
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100683 if (unlikely(err)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100684 JFFS2_ERROR("read remainder of name: error %d\n", err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100685 jffs2_free_full_dirent(fd);
686 return -EIO;
687 }
688 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000689
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100690 fd->nhash = full_name_hash(fd->name, rd->nsize);
691 fd->next = NULL;
692 fd->name[rd->nsize] = '\0';
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000693
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100694 /*
695 * Wheee. We now have a complete jffs2_full_dirent structure, with
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000696 * the name in it and everything. Link it into the list
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100697 */
David Woodhousedf8e96f2007-04-25 03:23:42 +0100698 jffs2_add_fd_to_list(c, fd, &rii->fds);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100699
700 return 0;
701}
702
703/*
704 * Helper function for jffs2_get_inode_nodes().
705 * It is called every time an inode node is found.
706 *
David Woodhouse14c63812007-07-03 16:51:19 -0400707 * Returns: 0 on success (possibly after marking a bad node obsolete);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100708 * negative error code on failure.
709 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100710static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100711 struct jffs2_raw_inode *rd, int rdlen,
712 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100713{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100714 struct jffs2_tmp_dnode_info *tn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100715 uint32_t len, csize;
David Woodhouse14c63812007-07-03 16:51:19 -0400716 int ret = 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100717 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000718
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100719 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
720 BUG_ON(ref_obsolete(ref));
721
David Woodhouse1046d882006-06-18 22:44:21 +0100722 crc = crc32(0, rd, sizeof(*rd) - 8);
723 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
724 JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n",
725 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100726 jffs2_mark_node_obsolete(c, ref);
727 return 0;
David Woodhouse1046d882006-06-18 22:44:21 +0100728 }
729
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100730 tn = jffs2_alloc_tmp_dnode_info();
731 if (!tn) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400732 JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100733 return -ENOMEM;
734 }
735
736 tn->partial_crc = 0;
737 csize = je32_to_cpu(rd->csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000738
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100739 /* If we've never checked the CRCs on this node, check them now */
740 if (ref_flags(ref) == REF_UNCHECKED) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000741
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100742 /* Sanity checks */
743 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
744 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
David Woodhouse14c63812007-07-03 16:51:19 -0400745 JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
746 jffs2_dbg_dump_node(c, ref_offset(ref));
747 jffs2_mark_node_obsolete(c, ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100748 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100749 }
750
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100751 if (jffs2_is_writebuffered(c) && csize != 0) {
752 /* At this point we are supposed to check the data CRC
753 * of our unchecked node. But thus far, we do not
754 * know whether the node is valid or obsolete. To
755 * figure this out, we need to walk all the nodes of
756 * the inode and build the inode fragtree. We don't
757 * want to spend time checking data of nodes which may
758 * later be found to be obsolete. So we put off the full
759 * data CRC checking until we have read all the inode
760 * nodes and have started building the fragtree.
761 *
762 * The fragtree is being built starting with nodes
763 * having the highest version number, so we'll be able
764 * to detect whether a node is valid (i.e., it is not
765 * overlapped by a node with higher version) or not.
766 * And we'll be able to check only those nodes, which
767 * are not obsolete.
768 *
769 * Of course, this optimization only makes sense in case
Robert P. J. Daye1b85132008-02-03 15:14:02 +0200770 * of NAND flashes (or other flashes with
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100771 * !jffs2_can_mark_obsolete()), since on NOR flashes
772 * nodes are marked obsolete physically.
773 *
774 * Since NAND flashes (or other flashes with
775 * jffs2_is_writebuffered(c)) are anyway read by
776 * fractions of c->wbuf_pagesize, and we have just read
777 * the node header, it is likely that the starting part
778 * of the node data is also read when we read the
779 * header. So we don't mind to check the CRC of the
780 * starting part of the data of the node now, and check
781 * the second part later (in jffs2_check_node_data()).
782 * Of course, we will not need to re-read and re-check
783 * the NAND page which we have just read. This is why we
784 * read the whole NAND page at jffs2_get_inode_nodes(),
785 * while we needed only the node header.
786 */
787 unsigned char *buf;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100788
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100789 /* 'buf' will point to the start of data */
790 buf = (unsigned char *)rd + sizeof(*rd);
791 /* len will be the read data length */
792 len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100793 tn->partial_crc = crc32(0, buf, len);
794
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100795 dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100796
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100797 /* If we actually calculated the whole data CRC
798 * and it is wrong, drop the node. */
Artem B. Bityutskiy3c091332005-08-04 12:40:02 +0100799 if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100800 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
801 ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
David Woodhouse14c63812007-07-03 16:51:19 -0400802 jffs2_mark_node_obsolete(c, ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100803 goto free_out;
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100804 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100805
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100806 } else if (csize == 0) {
807 /*
808 * We checked the header CRC. If the node has no data, adjust
809 * the space accounting now. For other nodes this will be done
810 * later either when the node is marked obsolete or when its
811 * data is checked.
812 */
813 struct jffs2_eraseblock *jeb;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100814
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100815 dbg_readinode("the node has no data.\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100816 jeb = &c->blocks[ref->flash_offset / c->sector_size];
817 len = ref_totlen(c, jeb, ref);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100818
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100819 spin_lock(&c->erase_completion_lock);
820 jeb->used_size += len;
821 jeb->unchecked_size -= len;
822 c->used_size += len;
823 c->unchecked_size -= len;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100824 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100825 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100826 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100827 }
828
829 tn->fn = jffs2_alloc_full_dnode();
830 if (!tn->fn) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100831 JFFS2_ERROR("alloc fn failed\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100832 ret = -ENOMEM;
833 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100834 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000835
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100836 tn->version = je32_to_cpu(rd->version);
837 tn->fn->ofs = je32_to_cpu(rd->offset);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100838 tn->data_crc = je32_to_cpu(rd->data_crc);
839 tn->csize = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100840 tn->fn->raw = ref;
David Woodhousedf8e96f2007-04-25 03:23:42 +0100841 tn->overlapped = 0;
842
843 if (tn->version > rii->highest_version)
844 rii->highest_version = tn->version;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000845
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100846 /* There was a bug where we wrote hole nodes out with
847 csize/dsize swapped. Deal with it */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100848 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
849 tn->fn->size = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100850 else // normal case...
851 tn->fn->size = je32_to_cpu(rd->dsize);
852
David Woodhouse2c61cb22008-04-23 16:43:15 +0100853 dbg_readinode2("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
854 ref_offset(ref), je32_to_cpu(rd->version),
855 je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000856
David Woodhousedf8e96f2007-04-25 03:23:42 +0100857 ret = jffs2_add_tn_to_tree(c, rii, tn);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100858
David Woodhousedf8e96f2007-04-25 03:23:42 +0100859 if (ret) {
860 jffs2_free_full_dnode(tn->fn);
861 free_out:
862 jffs2_free_tmp_dnode_info(tn);
863 return ret;
864 }
David Woodhouse2c61cb22008-04-23 16:43:15 +0100865#ifdef JFFS2_DBG_READINODE2_MESSAGES
866 dbg_readinode2("After adding ver %d:\n", je32_to_cpu(rd->version));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100867 tn = tn_first(&rii->tn_root);
868 while (tn) {
David Woodhouse2c61cb22008-04-23 16:43:15 +0100869 dbg_readinode2("%p: v %d r 0x%x-0x%x ov %d\n",
870 tn, tn->version, tn->fn->ofs,
871 tn->fn->ofs+tn->fn->size, tn->overlapped);
David Woodhousedf8e96f2007-04-25 03:23:42 +0100872 tn = tn_next(tn);
873 }
874#endif
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100875 return 0;
876}
877
878/*
879 * Helper function for jffs2_get_inode_nodes().
880 * It is called every time an unknown node is found.
881 *
David Woodhouse3877f0b2006-06-18 00:05:26 +0100882 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100883 * negative error code on failure.
884 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100885static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100886{
887 /* We don't mark unknown nodes as REF_UNCHECKED */
David Woodhousec7258a42007-03-09 11:44:00 +0000888 if (ref_flags(ref) == REF_UNCHECKED) {
889 JFFS2_ERROR("REF_UNCHECKED but unknown node at %#08x\n",
890 ref_offset(ref));
891 JFFS2_ERROR("Node is {%04x,%04x,%08x,%08x}. Please report this error.\n",
David Woodhouseef53cb02007-07-10 10:01:22 +0100892 je16_to_cpu(un->magic), je16_to_cpu(un->nodetype),
893 je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100894 jffs2_mark_node_obsolete(c, ref);
895 return 0;
David Woodhousec7258a42007-03-09 11:44:00 +0000896 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000897
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100898 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
899
David Woodhouse3877f0b2006-06-18 00:05:26 +0100900 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
901
902 case JFFS2_FEATURE_INCOMPAT:
903 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
904 je16_to_cpu(un->nodetype), ref_offset(ref));
905 /* EEP */
906 BUG();
907 break;
908
909 case JFFS2_FEATURE_ROCOMPAT:
910 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
911 je16_to_cpu(un->nodetype), ref_offset(ref));
912 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
913 break;
914
915 case JFFS2_FEATURE_RWCOMPAT_COPY:
916 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
917 je16_to_cpu(un->nodetype), ref_offset(ref));
918 break;
919
920 case JFFS2_FEATURE_RWCOMPAT_DELETE:
921 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
922 je16_to_cpu(un->nodetype), ref_offset(ref));
David Woodhousedf8e96f2007-04-25 03:23:42 +0100923 jffs2_mark_node_obsolete(c, ref);
924 return 0;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100925 }
926
927 return 0;
928}
929
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100930/*
931 * Helper function for jffs2_get_inode_nodes().
932 * The function detects whether more data should be read and reads it if yes.
933 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200934 * Returns: 0 on success;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100935 * negative error code on failure.
936 */
937static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300938 int needed_len, int *rdlen, unsigned char *buf)
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100939{
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300940 int err, to_read = needed_len - *rdlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100941 size_t retlen;
942 uint32_t offs;
943
944 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300945 int rem = to_read % c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100946
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300947 if (rem)
948 to_read += c->wbuf_pagesize - rem;
949 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100950
951 /* We need to read more data */
952 offs = ref_offset(ref) + *rdlen;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000953
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300954 dbg_readinode("read more %d bytes\n", to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100955
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300956 err = jffs2_flash_read(c, offs, to_read, &retlen, buf + *rdlen);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100957 if (err) {
958 JFFS2_ERROR("can not read %d bytes from 0x%08x, "
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300959 "error code: %d.\n", to_read, offs, err);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100960 return err;
961 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000962
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300963 if (retlen < to_read) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400964 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n",
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300965 offs, retlen, to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100966 return -EIO;
967 }
968
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300969 *rdlen += to_read;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100970 return 0;
971}
972
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100973/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
David Woodhousedf8e96f2007-04-25 03:23:42 +0100974 with this ino. Perform a preliminary ordering on data nodes, throwing away
975 those which are completely obsoleted by newer ones. The naïve approach we
976 use to take of just returning them _all_ in version order will cause us to
977 run out of memory in certain degenerate cases. */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100978static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
David Woodhousedf8e96f2007-04-25 03:23:42 +0100979 struct jffs2_readinode_info *rii)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100980{
981 struct jffs2_raw_node_ref *ref, *valid_ref;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100982 unsigned char *buf = NULL;
983 union jffs2_node_union *node;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100984 size_t retlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100985 int len, err;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100986
David Woodhousedf8e96f2007-04-25 03:23:42 +0100987 rii->mctime_ver = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000988
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100989 dbg_readinode("ino #%u\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100990
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100991 /* FIXME: in case of NOR and available ->point() this
992 * needs to be fixed. */
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300993 len = sizeof(union jffs2_node_union) + c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100994 buf = kmalloc(len, GFP_KERNEL);
995 if (!buf)
996 return -ENOMEM;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000997
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100998 spin_lock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100999 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001000 if (!valid_ref && f->inocache->ino != 1)
1001 JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001002 while (valid_ref) {
1003 /* We can hold a pointer to a non-obsolete node without the spinlock,
1004 but _obsolete_ nodes may disappear at any time, if the block
1005 they're in gets erased. So if we mark 'ref' obsolete while we're
1006 not holding the lock, it can go away immediately. For that reason,
1007 we find the next valid node first, before processing 'ref'.
1008 */
1009 ref = valid_ref;
1010 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
1011 spin_unlock(&c->erase_completion_lock);
1012
1013 cond_resched();
1014
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001015 /*
1016 * At this point we don't know the type of the node we're going
1017 * to read, so we do not know the size of its header. In order
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001018 * to minimize the amount of flash IO we assume the header is
1019 * of size = JFFS2_MIN_NODE_HEADER.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001020 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001021 len = JFFS2_MIN_NODE_HEADER;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001022 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001023 int end, rem;
1024
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001025 /*
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001026 * We are about to read JFFS2_MIN_NODE_HEADER bytes,
1027 * but this flash has some minimal I/O unit. It is
1028 * possible that we'll need to read more soon, so read
1029 * up to the next min. I/O unit, in order not to
1030 * re-read the same min. I/O unit twice.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001031 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001032 end = ref_offset(ref) + len;
1033 rem = end % c->wbuf_pagesize;
1034 if (rem)
1035 end += c->wbuf_pagesize - rem;
1036 len = end - ref_offset(ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001037 }
1038
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001039 dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001040
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001041 /* FIXME: point() */
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001042 err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001043 if (err) {
Jean Delvaredf2e3012011-07-16 18:10:35 +02001044 JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ref_offset(ref), err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001045 goto free_out;
1046 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001047
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001048 if (retlen < len) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -04001049 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001050 err = -EIO;
1051 goto free_out;
1052 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001053
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001054 node = (union jffs2_node_union *)buf;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001055
David Woodhouse3877f0b2006-06-18 00:05:26 +01001056 /* No need to mask in the valid bit; it shouldn't be invalid */
1057 if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) {
1058 JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n",
1059 ref_offset(ref), je16_to_cpu(node->u.magic),
1060 je16_to_cpu(node->u.nodetype),
1061 je32_to_cpu(node->u.totlen),
1062 je32_to_cpu(node->u.hdr_crc));
1063 jffs2_dbg_dump_node(c, ref_offset(ref));
1064 jffs2_mark_node_obsolete(c, ref);
1065 goto cont;
1066 }
Joakim Tjernlund0dec4c82007-03-10 17:08:44 +01001067 if (je16_to_cpu(node->u.magic) != JFFS2_MAGIC_BITMASK) {
1068 /* Not a JFFS2 node, whinge and move on */
1069 JFFS2_NOTICE("Wrong magic bitmask 0x%04x in node header at %#08x.\n",
1070 je16_to_cpu(node->u.magic), ref_offset(ref));
David Woodhousec7258a42007-03-09 11:44:00 +00001071 jffs2_mark_node_obsolete(c, ref);
1072 goto cont;
1073 }
David Woodhouse3877f0b2006-06-18 00:05:26 +01001074
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001075 switch (je16_to_cpu(node->u.nodetype)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001076
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001077 case JFFS2_NODETYPE_DIRENT:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001078
Artem Bityutskiyea55d302007-05-30 12:08:14 +03001079 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent) &&
1080 len < sizeof(struct jffs2_raw_dirent)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001081 err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001082 if (unlikely(err))
1083 goto free_out;
1084 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001085
David Woodhousedf8e96f2007-04-25 03:23:42 +01001086 err = read_direntry(c, ref, &node->d, retlen, rii);
1087 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001088 goto free_out;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001089
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001090 break;
1091
1092 case JFFS2_NODETYPE_INODE:
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001093
Artem Bityutskiyea55d302007-05-30 12:08:14 +03001094 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode) &&
1095 len < sizeof(struct jffs2_raw_inode)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001096 err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001097 if (unlikely(err))
1098 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001099 }
1100
David Woodhousedf8e96f2007-04-25 03:23:42 +01001101 err = read_dnode(c, ref, &node->i, len, rii);
1102 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001103 goto free_out;
1104
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 break;
1106
1107 default:
Artem Bityutskiyea55d302007-05-30 12:08:14 +03001108 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node) &&
1109 len < sizeof(struct jffs2_unknown_node)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +03001110 err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001111 if (unlikely(err))
1112 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001113 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001114
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001115 err = read_unknown(c, ref, &node->u);
David Woodhouse14c63812007-07-03 16:51:19 -04001116 if (unlikely(err))
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001117 goto free_out;
1118
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 }
David Woodhouse3877f0b2006-06-18 00:05:26 +01001120 cont:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001121 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001123
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001124 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001125 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
David Woodhousedf8e96f2007-04-25 03:23:42 +01001127 f->highest_version = rii->highest_version;
1128
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001129 dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +01001130 f->inocache->ino, rii->highest_version, rii->latest_mctime,
1131 rii->mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001132 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001134 free_out:
David Woodhousedf8e96f2007-04-25 03:23:42 +01001135 jffs2_free_tmp_dnode_info_list(&rii->tn_root);
1136 jffs2_free_full_dirent_list(rii->fds);
1137 rii->fds = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001138 kfree(buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001139 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140}
1141
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001142static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 struct jffs2_inode_info *f,
1144 struct jffs2_raw_inode *latest_node)
1145{
David Woodhousedf8e96f2007-04-25 03:23:42 +01001146 struct jffs2_readinode_info rii;
David Woodhouse61c4b232007-04-25 17:04:23 +01001147 uint32_t crc, new_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 size_t retlen;
1149 int ret;
1150
David Woodhouse27c72b02008-05-01 18:47:17 +01001151 dbg_readinode("ino #%u pino/nlink is %d\n", f->inocache->ino,
1152 f->inocache->pino_nlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
David Woodhousedf8e96f2007-04-25 03:23:42 +01001154 memset(&rii, 0, sizeof(rii));
1155
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 /* Grab all nodes relevant to this ino */
David Woodhousedf8e96f2007-04-25 03:23:42 +01001157 ret = jffs2_get_inode_nodes(c, f, &rii);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
1159 if (ret) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001160 JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 if (f->inocache->state == INO_STATE_READING)
1162 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1163 return ret;
1164 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165
David Woodhousedf8e96f2007-04-25 03:23:42 +01001166 ret = jffs2_build_inode_fragtree(c, f, &rii);
1167 if (ret) {
1168 JFFS2_ERROR("Failed to build final fragtree for inode #%u: error %d\n",
1169 f->inocache->ino, ret);
1170 if (f->inocache->state == INO_STATE_READING)
1171 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1172 jffs2_free_tmp_dnode_info_list(&rii.tn_root);
1173 /* FIXME: We could at least crc-check them all */
1174 if (rii.mdata_tn) {
1175 jffs2_free_full_dnode(rii.mdata_tn->fn);
1176 jffs2_free_tmp_dnode_info(rii.mdata_tn);
1177 rii.mdata_tn = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +01001178 }
David Woodhousedf8e96f2007-04-25 03:23:42 +01001179 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 }
David Woodhousedf8e96f2007-04-25 03:23:42 +01001181
1182 if (rii.mdata_tn) {
1183 if (rii.mdata_tn->fn->raw == rii.latest_ref) {
1184 f->metadata = rii.mdata_tn->fn;
1185 jffs2_free_tmp_dnode_info(rii.mdata_tn);
1186 } else {
1187 jffs2_kill_tn(c, rii.mdata_tn);
1188 }
1189 rii.mdata_tn = NULL;
1190 }
1191
1192 f->dents = rii.fds;
1193
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +01001194 jffs2_dbg_fragtree_paranoia_check_nolock(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
David Woodhousedf8e96f2007-04-25 03:23:42 +01001196 if (unlikely(!rii.latest_ref)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 /* No data nodes for this inode. */
1198 if (f->inocache->ino != 1) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001199 JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
David Woodhousedf8e96f2007-04-25 03:23:42 +01001200 if (!rii.fds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 if (f->inocache->state == INO_STATE_READING)
1202 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
1203 return -EIO;
1204 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001205 JFFS2_NOTICE("but it has children so we fake some modes for it\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 }
1207 latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
1208 latest_node->version = cpu_to_je32(0);
1209 latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
1210 latest_node->isize = cpu_to_je32(0);
1211 latest_node->gid = cpu_to_je16(0);
1212 latest_node->uid = cpu_to_je16(0);
1213 if (f->inocache->state == INO_STATE_READING)
1214 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
1215 return 0;
1216 }
1217
David Woodhousedf8e96f2007-04-25 03:23:42 +01001218 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref), sizeof(*latest_node), &retlen, (void *)latest_node);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 if (ret || retlen != sizeof(*latest_node)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001220 JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
1221 ret, retlen, sizeof(*latest_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 /* FIXME: If this fails, there seems to be a memory leak. Find it. */
David Woodhouseced22072008-04-22 15:13:40 +01001223 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 jffs2_do_clear_inode(c, f);
1225 return ret?ret:-EIO;
1226 }
1227
1228 crc = crc32(0, latest_node, sizeof(*latest_node)-8);
1229 if (crc != je32_to_cpu(latest_node->node_crc)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001230 JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
David Woodhousedf8e96f2007-04-25 03:23:42 +01001231 f->inocache->ino, ref_offset(rii.latest_ref));
David Woodhouseced22072008-04-22 15:13:40 +01001232 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 jffs2_do_clear_inode(c, f);
1234 return -EIO;
1235 }
1236
1237 switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
1238 case S_IFDIR:
David Woodhousedf8e96f2007-04-25 03:23:42 +01001239 if (rii.mctime_ver > je32_to_cpu(latest_node->version)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 /* The times in the latest_node are actually older than
1241 mctime in the latest dirent. Cheat. */
David Woodhousedf8e96f2007-04-25 03:23:42 +01001242 latest_node->ctime = latest_node->mtime = cpu_to_je32(rii.latest_mctime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 }
1244 break;
1245
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001246
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 case S_IFREG:
1248 /* If it was a regular file, truncate it to the latest node's isize */
David Woodhouse61c4b232007-04-25 17:04:23 +01001249 new_size = jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
1250 if (new_size != je32_to_cpu(latest_node->isize)) {
1251 JFFS2_WARNING("Truncating ino #%u to %d bytes failed because it only had %d bytes to start with!\n",
1252 f->inocache->ino, je32_to_cpu(latest_node->isize), new_size);
1253 latest_node->isize = cpu_to_je32(new_size);
1254 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 break;
1256
1257 case S_IFLNK:
1258 /* Hack to work around broken isize in old symlink code.
1259 Remove this when dwmw2 comes to his senses and stops
1260 symlinks from being an entirely gratuitous special
1261 case. */
1262 if (!je32_to_cpu(latest_node->isize))
1263 latest_node->isize = latest_node->dsize;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001264
1265 if (f->inocache->state != INO_STATE_CHECKING) {
1266 /* Symlink's inode data is the target path. Read it and
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001267 * keep in RAM to facilitate quick follow symlink
1268 * operation. */
1269 f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
1270 if (!f->target) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001271 JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
David Woodhouseced22072008-04-22 15:13:40 +01001272 mutex_unlock(&f->sem);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001273 jffs2_do_clear_inode(c, f);
1274 return -ENOMEM;
1275 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001276
David Woodhousedf8e96f2007-04-25 03:23:42 +01001277 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node),
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001278 je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001279
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001280 if (ret || retlen != je32_to_cpu(latest_node->csize)) {
1281 if (retlen != je32_to_cpu(latest_node->csize))
1282 ret = -EIO;
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001283 kfree(f->target);
1284 f->target = NULL;
David Woodhouseced22072008-04-22 15:13:40 +01001285 mutex_unlock(&f->sem);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001286 jffs2_do_clear_inode(c, f);
Roel Kluine670e412009-11-12 18:09:48 +01001287 return ret;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001288 }
1289
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001290 f->target[je32_to_cpu(latest_node->csize)] = '\0';
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001291 dbg_readinode("symlink's target '%s' cached\n", f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +00001292 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001293
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 /* fall through... */
1295
1296 case S_IFBLK:
1297 case S_IFCHR:
1298 /* Certain inode types should have only one data node, and it's
1299 kept as the metadata node */
1300 if (f->metadata) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001301 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 f->inocache->ino, jemode_to_cpu(latest_node->mode));
David Woodhouseced22072008-04-22 15:13:40 +01001303 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 jffs2_do_clear_inode(c, f);
1305 return -EIO;
1306 }
1307 if (!frag_first(&f->fragtree)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001308 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 f->inocache->ino, jemode_to_cpu(latest_node->mode));
David Woodhouseced22072008-04-22 15:13:40 +01001310 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 jffs2_do_clear_inode(c, f);
1312 return -EIO;
1313 }
1314 /* ASSERT: f->fraglist != NULL */
1315 if (frag_next(frag_first(&f->fragtree))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001316 JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 f->inocache->ino, jemode_to_cpu(latest_node->mode));
1318 /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
David Woodhouseced22072008-04-22 15:13:40 +01001319 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 jffs2_do_clear_inode(c, f);
1321 return -EIO;
1322 }
1323 /* OK. We're happy */
1324 f->metadata = frag_first(&f->fragtree)->node;
1325 jffs2_free_node_frag(frag_first(&f->fragtree));
1326 f->fragtree = RB_ROOT;
1327 break;
1328 }
1329 if (f->inocache->state == INO_STATE_READING)
1330 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
1331
1332 return 0;
1333}
1334
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001335/* Scan the list of all nodes present for this ino, build map of versions, etc. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001336int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001337 uint32_t ino, struct jffs2_raw_inode *latest_node)
1338{
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001339 dbg_readinode("read inode #%u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001340
1341 retry_inocache:
1342 spin_lock(&c->inocache_lock);
1343 f->inocache = jffs2_get_ino_cache(c, ino);
1344
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001345 if (f->inocache) {
1346 /* Check its state. We may need to wait before we can use it */
1347 switch(f->inocache->state) {
1348 case INO_STATE_UNCHECKED:
1349 case INO_STATE_CHECKEDABSENT:
1350 f->inocache->state = INO_STATE_READING;
1351 break;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001352
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001353 case INO_STATE_CHECKING:
1354 case INO_STATE_GC:
1355 /* If it's in either of these states, we need
1356 to wait for whoever's got it to finish and
1357 put it back. */
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001358 dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001359 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
1360 goto retry_inocache;
1361
1362 case INO_STATE_READING:
1363 case INO_STATE_PRESENT:
1364 /* Eep. This should never happen. It can
1365 happen if Linux calls read_inode() again
1366 before clear_inode() has finished though. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001367 JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001368 /* Fail. That's probably better than allowing it to succeed */
1369 f->inocache = NULL;
1370 break;
1371
1372 default:
1373 BUG();
1374 }
1375 }
1376 spin_unlock(&c->inocache_lock);
1377
1378 if (!f->inocache && ino == 1) {
1379 /* Special case - no root inode on medium */
1380 f->inocache = jffs2_alloc_inode_cache();
1381 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001382 JFFS2_ERROR("cannot allocate inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001383 return -ENOMEM;
1384 }
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +01001385 dbg_readinode("creating inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001386 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
David Woodhouse27c72b02008-05-01 18:47:17 +01001387 f->inocache->ino = f->inocache->pino_nlink = 1;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001388 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
1389 f->inocache->state = INO_STATE_READING;
1390 jffs2_add_ino_cache(c, f->inocache);
1391 }
1392 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +01001393 JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001394 return -ENOENT;
1395 }
1396
1397 return jffs2_do_read_inode_internal(c, f, latest_node);
1398}
1399
1400int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
1401{
1402 struct jffs2_raw_inode n;
Yan Burman3d375d92006-12-04 15:03:01 -08001403 struct jffs2_inode_info *f = kzalloc(sizeof(*f), GFP_KERNEL);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001404 int ret;
1405
1406 if (!f)
1407 return -ENOMEM;
1408
David Woodhouseced22072008-04-22 15:13:40 +01001409 mutex_init(&f->sem);
1410 mutex_lock(&f->sem);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001411 f->inocache = ic;
1412
1413 ret = jffs2_do_read_inode_internal(c, f, &n);
1414 if (!ret) {
David Woodhouseced22072008-04-22 15:13:40 +01001415 mutex_unlock(&f->sem);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +01001416 jffs2_do_clear_inode(c, f);
1417 }
1418 kfree (f);
1419 return ret;
1420}
1421
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
1423{
1424 struct jffs2_full_dirent *fd, *fds;
1425 int deleted;
1426
KaiGai Kohei355ed4e2006-06-24 09:15:36 +09001427 jffs2_xattr_delete_inode(c, f->inocache);
David Woodhouseced22072008-04-22 15:13:40 +01001428 mutex_lock(&f->sem);
David Woodhouse27c72b02008-05-01 18:47:17 +01001429 deleted = f->inocache && !f->inocache->pino_nlink;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430
David Woodhouse67e345d2005-02-27 23:01:36 +00001431 if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
1432 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
1433
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 if (f->metadata) {
1435 if (deleted)
1436 jffs2_mark_node_obsolete(c, f->metadata->raw);
1437 jffs2_free_full_dnode(f->metadata);
1438 }
1439
1440 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
1441
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001442 if (f->target) {
1443 kfree(f->target);
1444 f->target = NULL;
1445 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001446
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001447 fds = f->dents;
1448 while(fds) {
1449 fd = fds;
1450 fds = fd->next;
1451 jffs2_free_full_dirent(fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 }
1453
David Woodhouse67e345d2005-02-27 23:01:36 +00001454 if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
David Woodhouse67e345d2005-02-27 23:01:36 +00001456 if (f->inocache->nodes == (void *)f->inocache)
1457 jffs2_del_ino_cache(c, f->inocache);
1458 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
David Woodhouseced22072008-04-22 15:13:40 +01001460 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461}