blob: 67732ba08c3e81ea96c95cfe46cf6f8cec528b3b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +010010 * $Id: readinode.c,v 1.132 2005/07/28 14:46:40 dedekind Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/fs.h>
17#include <linux/crc32.h>
18#include <linux/pagemap.h>
19#include <linux/mtd/mtd.h>
20#include <linux/compiler.h>
21#include "nodelist.h"
22
Artem B. Bityutskiyf302cd02005-07-24 16:29:59 +010023void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -070024{
25 struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size);
26
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +010027 JFFS2_DBG_FRAGTREE("truncating fragtree to 0x%08x bytes\n", size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29 /* We know frag->ofs <= size. That's what lookup does for us */
30 if (frag && frag->ofs != size) {
31 if (frag->ofs+frag->size >= size) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +010032 JFFS2_DBG_FRAGTREE2("truncating frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 frag->size = size - frag->ofs;
34 }
35 frag = frag_next(frag);
36 }
37 while (frag && frag->ofs >= size) {
38 struct jffs2_node_frag *next = frag_next(frag);
39
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +010040 JFFS2_DBG_FRAGTREE("removing frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 frag_erase(frag, list);
42 jffs2_obsolete_node_frag(c, frag);
43 frag = next;
44 }
45}
46
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010047/*
48 * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in
49 * order of increasing version.
50 */
51static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -070052{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010053 struct rb_node **p = &list->rb_node;
54 struct rb_node * parent = NULL;
55 struct jffs2_tmp_dnode_info *this;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010057 while (*p) {
58 parent = *p;
59 this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010061 /* There may actually be a collision here, but it doesn't
62 actually matter. As long as the two nodes with the same
63 version are together, it's all fine. */
64 if (tn->version < this->version)
65 p = &(*p)->rb_left;
66 else
67 p = &(*p)->rb_right;
68 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010070 rb_link_node(&tn->rb, parent, p);
71 rb_insert_color(&tn->rb, list);
72}
73
74static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
75{
76 struct rb_node *this;
77 struct jffs2_tmp_dnode_info *tn;
78
79 this = list->rb_node;
80
81 /* Now at bottom of tree */
82 while (this) {
83 if (this->rb_left)
84 this = this->rb_left;
85 else if (this->rb_right)
86 this = this->rb_right;
87 else {
88 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
89 jffs2_free_full_dnode(tn->fn);
90 jffs2_free_tmp_dnode_info(tn);
91
92 this = this->rb_parent;
93 if (!this)
94 break;
95
96 if (this->rb_left == &tn->rb)
97 this->rb_left = NULL;
98 else if (this->rb_right == &tn->rb)
99 this->rb_right = NULL;
100 else BUG();
101 }
102 }
103 list->rb_node = NULL;
104}
105
106static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
107{
108 struct jffs2_full_dirent *next;
109
110 while (fd) {
111 next = fd->next;
112 jffs2_free_full_dirent(fd);
113 fd = next;
114 }
115}
116
117/* Returns first valid node after 'ref'. May return 'ref' */
118static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
119{
120 while (ref && ref->next_in_ino) {
121 if (!ref_obsolete(ref))
122 return ref;
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100123 JFFS2_DBG_NODEREF("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100124 ref = ref->next_in_ino;
125 }
126 return NULL;
127}
128
129/*
130 * Helper function for jffs2_get_inode_nodes().
131 * It is called every time an directory entry node is found.
132 *
133 * Returns: 0 on succes;
134 * 1 if the node should be marked obsolete;
135 * negative error code on failure.
136 */
137static inline int
138read_direntry(struct jffs2_sb_info *c,
139 struct jffs2_raw_node_ref *ref,
140 struct jffs2_raw_dirent *rd,
141 uint32_t read,
142 struct jffs2_full_dirent **fdp,
143 int32_t *latest_mctime,
144 uint32_t *mctime_ver)
145{
146 struct jffs2_full_dirent *fd;
147
148 /* The direntry nodes are checked during the flash scanning */
149 BUG_ON(ref_flags(ref) == REF_UNCHECKED);
150 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
151 BUG_ON(ref_obsolete(ref));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100153 /* Sanity check */
154 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100155 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100156 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
157 return 1;
158 }
159
160 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
161 if (unlikely(!fd))
162 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100164 fd->raw = ref;
165 fd->version = je32_to_cpu(rd->version);
166 fd->ino = je32_to_cpu(rd->ino);
167 fd->type = rd->type;
168
169 /* Pick out the mctime of the latest dirent */
170 if(fd->version > *mctime_ver) {
171 *mctime_ver = fd->version;
172 *latest_mctime = je32_to_cpu(rd->mctime);
173 }
174
175 /*
176 * Copy as much of the name as possible from the raw
177 * dirent we've already read from the flash.
178 */
179 if (read > sizeof(*rd))
180 memcpy(&fd->name[0], &rd->name[0],
181 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
182
183 /* Do we need to copy any more of the name directly from the flash? */
184 if (rd->nsize + sizeof(*rd) > read) {
185 /* FIXME: point() */
186 int err;
187 int already = read - sizeof(*rd);
188
189 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
190 rd->nsize - already, &read, &fd->name[already]);
191 if (unlikely(read != rd->nsize - already) && likely(!err))
192 return -EIO;
193
194 if (unlikely(err)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100195 JFFS2_ERROR("read remainder of name: error %d\n", err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100196 jffs2_free_full_dirent(fd);
197 return -EIO;
198 }
199 }
200
201 fd->nhash = full_name_hash(fd->name, rd->nsize);
202 fd->next = NULL;
203 fd->name[rd->nsize] = '\0';
204
205 /*
206 * Wheee. We now have a complete jffs2_full_dirent structure, with
207 * the name in it and everything. Link it into the list
208 */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100209 jffs2_add_fd_to_list(c, fd, fdp);
210
211 return 0;
212}
213
214/*
215 * Helper function for jffs2_get_inode_nodes().
216 * It is called every time an inode node is found.
217 *
218 * Returns: 0 on succes;
219 * 1 if the node should be marked obsolete;
220 * negative error code on failure.
221 */
222static inline int
223read_dnode(struct jffs2_sb_info *c,
224 struct jffs2_raw_node_ref *ref,
225 struct jffs2_raw_inode *rd,
226 uint32_t read,
227 struct rb_root *tnp,
228 int32_t *latest_mctime,
229 uint32_t *mctime_ver)
230{
231 struct jffs2_eraseblock *jeb;
232 struct jffs2_tmp_dnode_info *tn;
233
234 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
235 BUG_ON(ref_obsolete(ref));
236
237 /* If we've never checked the CRCs on this node, check them now */
238 if (ref_flags(ref) == REF_UNCHECKED) {
239 uint32_t crc, len;
240
241 crc = crc32(0, rd, sizeof(*rd) - 8);
242 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100243 JFFS2_NOTICE("header CRC failed on node at %#08x: read %#08x, calculated %#08x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100244 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
245 return 1;
246 }
247
248 /* Sanity checks */
249 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
250 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100251 JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
252 __jffs2_dbg_dump_node(c, ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100253 return 1;
254 }
255
256 if (rd->compr != JFFS2_COMPR_ZERO && je32_to_cpu(rd->csize)) {
257 unsigned char *buf = NULL;
258 uint32_t pointed = 0;
259 int err;
260#ifndef __ECOS
261 if (c->mtd->point) {
262 err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize),
263 &read, &buf);
264 if (unlikely(read < je32_to_cpu(rd->csize)) && likely(!err)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100265 JFFS2_ERROR("MTD point returned len too short: 0x%zx\n", read);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100266 c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(*rd),
267 je32_to_cpu(rd->csize));
268 } else if (unlikely(err)){
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100269 JFFS2_ERROR("MTD point failed %d\n", err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100270 } else
271 pointed = 1; /* succefully pointed to device */
272 }
273#endif
274 if(!pointed){
275 buf = kmalloc(je32_to_cpu(rd->csize), GFP_KERNEL);
276 if (!buf)
277 return -ENOMEM;
278
279 err = jffs2_flash_read(c, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize),
280 &read, buf);
281 if (unlikely(read != je32_to_cpu(rd->csize)) && likely(!err))
282 err = -EIO;
283 if (err) {
284 kfree(buf);
285 return err;
286 }
287 }
288 crc = crc32(0, buf, je32_to_cpu(rd->csize));
289 if(!pointed)
290 kfree(buf);
291#ifndef __ECOS
292 else
293 c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize));
294#endif
295
296 if (crc != je32_to_cpu(rd->data_crc)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100297 JFFS2_NOTICE("data CRC failed on node at %#08x: read %#08x, calculated %#08x\n",
298 ref_offset(ref), je32_to_cpu(rd->data_crc), crc);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100299 return 1;
300 }
301
302 }
303
304 /* Mark the node as having been checked and fix the accounting accordingly */
305 jeb = &c->blocks[ref->flash_offset / c->sector_size];
306 len = ref_totlen(c, jeb, ref);
307
308 spin_lock(&c->erase_completion_lock);
309 jeb->used_size += len;
310 jeb->unchecked_size -= len;
311 c->used_size += len;
312 c->unchecked_size -= len;
313
314 /* If node covers at least a whole page, or if it starts at the
315 beginning of a page and runs to the end of the file, or if
316 it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
317
318 If it's actually overlapped, it'll get made NORMAL (or OBSOLETE)
319 when the overlapping node(s) get added to the tree anyway.
320 */
321 if ((je32_to_cpu(rd->dsize) >= PAGE_CACHE_SIZE) ||
322 ( ((je32_to_cpu(rd->offset) & (PAGE_CACHE_SIZE-1))==0) &&
323 (je32_to_cpu(rd->dsize) + je32_to_cpu(rd->offset) == je32_to_cpu(rd->isize)))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100324 JFFS2_DBG_READINODE("marking node at %#08x REF_PRISTINE\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100325 ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
326 } else {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100327 JFFS2_DBG_READINODE("marking node at %#08x REF_NORMAL\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100328 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
329 }
330 spin_unlock(&c->erase_completion_lock);
331 }
332
333 tn = jffs2_alloc_tmp_dnode_info();
334 if (!tn) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100335 JFFS2_ERROR("alloc tn failed\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100336 return -ENOMEM;
337 }
338
339 tn->fn = jffs2_alloc_full_dnode();
340 if (!tn->fn) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100341 JFFS2_ERROR("alloc fn failed\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100342 jffs2_free_tmp_dnode_info(tn);
343 return -ENOMEM;
344 }
345
346 tn->version = je32_to_cpu(rd->version);
347 tn->fn->ofs = je32_to_cpu(rd->offset);
348 tn->fn->raw = ref;
349
350 /* There was a bug where we wrote hole nodes out with
351 csize/dsize swapped. Deal with it */
352 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && je32_to_cpu(rd->csize))
353 tn->fn->size = je32_to_cpu(rd->csize);
354 else // normal case...
355 tn->fn->size = je32_to_cpu(rd->dsize);
356
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100357 JFFS2_DBG_READINODE("dnode @%08x: ver %u, offset %#04x, dsize %#04x\n",
358 ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100359
360 jffs2_add_tn_to_tree(tn, tnp);
361
362 return 0;
363}
364
365/*
366 * Helper function for jffs2_get_inode_nodes().
367 * It is called every time an unknown node is found.
368 *
369 * Returns: 0 on succes;
370 * 1 if the node should be marked obsolete;
371 * negative error code on failure.
372 */
373static inline int
374read_unknown(struct jffs2_sb_info *c,
375 struct jffs2_raw_node_ref *ref,
376 struct jffs2_unknown_node *un,
377 uint32_t read)
378{
379 /* We don't mark unknown nodes as REF_UNCHECKED */
380 BUG_ON(ref_flags(ref) == REF_UNCHECKED);
381
382 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
383
384 if (crc32(0, un, sizeof(struct jffs2_unknown_node) - 4) != je32_to_cpu(un->hdr_crc)) {
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100385 /* Hmmm. This should have been caught at scan time. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100386 JFFS2_NOTICE("node header CRC failed at %#08x. But it must have been OK earlier.\n", ref_offset(ref));
387 __jffs2_dbg_dump_node(c, ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100388 return 1;
389 } else {
390 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
391
392 case JFFS2_FEATURE_INCOMPAT:
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100393 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
394 je16_to_cpu(un->nodetype), ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100395 /* EEP */
396 BUG();
397 break;
398
399 case JFFS2_FEATURE_ROCOMPAT:
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100400 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100401 je16_to_cpu(un->nodetype), ref_offset(ref));
402 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
403 break;
404
405 case JFFS2_FEATURE_RWCOMPAT_COPY:
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100406 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100407 je16_to_cpu(un->nodetype), ref_offset(ref));
408 break;
409
410 case JFFS2_FEATURE_RWCOMPAT_DELETE:
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100411 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100412 je16_to_cpu(un->nodetype), ref_offset(ref));
413 return 1;
414 }
415 }
416
417 return 0;
418}
419
420/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
421 with this ino, returning the former in order of version */
422
423static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
424 struct rb_root *tnp, struct jffs2_full_dirent **fdp,
425 uint32_t *highest_version, uint32_t *latest_mctime,
426 uint32_t *mctime_ver)
427{
428 struct jffs2_raw_node_ref *ref, *valid_ref;
429 struct rb_root ret_tn = RB_ROOT;
430 struct jffs2_full_dirent *ret_fd = NULL;
431 union jffs2_node_union node;
432 size_t retlen;
433 int err;
434
435 *mctime_ver = 0;
436
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100437 JFFS2_DBG_READINODE("ino #%u\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100438
439 spin_lock(&c->erase_completion_lock);
440
441 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
442
443 if (!valid_ref && (f->inocache->ino != 1))
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100444 JFFS2_WARNING("no valid nodes for ino #%u\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100445
446 while (valid_ref) {
447 /* We can hold a pointer to a non-obsolete node without the spinlock,
448 but _obsolete_ nodes may disappear at any time, if the block
449 they're in gets erased. So if we mark 'ref' obsolete while we're
450 not holding the lock, it can go away immediately. For that reason,
451 we find the next valid node first, before processing 'ref'.
452 */
453 ref = valid_ref;
454 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
455 spin_unlock(&c->erase_completion_lock);
456
457 cond_resched();
458
459 /* FIXME: point() */
460 err = jffs2_flash_read(c, (ref_offset(ref)),
461 min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)),
462 &retlen, (void *)&node);
463 if (err) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100464 JFFS2_ERROR("error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100465 goto free_out;
466 }
467
468 switch (je16_to_cpu(node.u.nodetype)) {
469
470 case JFFS2_NODETYPE_DIRENT:
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100471 JFFS2_DBG_READINODE("node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100472
473 if (retlen < sizeof(node.d)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100474 JFFS2_ERROR("short read dirent at %#08x\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100475 err = -EIO;
476 goto free_out;
477 }
478
479 err = read_direntry(c, ref, &node.d, retlen, &ret_fd, latest_mctime, mctime_ver);
480 if (err == 1) {
481 jffs2_mark_node_obsolete(c, ref);
482 break;
483 } else if (unlikely(err))
484 goto free_out;
485
486 if (je32_to_cpu(node.d.version) > *highest_version)
487 *highest_version = je32_to_cpu(node.d.version);
488
489 break;
490
491 case JFFS2_NODETYPE_INODE:
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100492 JFFS2_DBG_READINODE("node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100493
494 if (retlen < sizeof(node.i)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100495 JFFS2_ERROR("short read dnode at %#08x\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100496 err = -EIO;
497 goto free_out;
498 }
499
500 err = read_dnode(c, ref, &node.i, retlen, &ret_tn, latest_mctime, mctime_ver);
501 if (err == 1) {
502 jffs2_mark_node_obsolete(c, ref);
503 break;
504 } else if (unlikely(err))
505 goto free_out;
506
507 if (je32_to_cpu(node.i.version) > *highest_version)
508 *highest_version = je32_to_cpu(node.i.version);
509
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100510 JFFS2_DBG_READINODE("version %d, highest_version now %d\n",
511 je32_to_cpu(node.i.version), *highest_version);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100512
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 break;
514
515 default:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100516 /* Check we've managed to read at least the common node header */
517 if (retlen < sizeof(struct jffs2_unknown_node)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100518 JFFS2_ERROR("short read unknown node at %#08x\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100519 return -EIO;
520 }
521
522 err = read_unknown(c, ref, &node.u, retlen);
523 if (err == 1) {
524 jffs2_mark_node_obsolete(c, ref);
525 break;
526 } else if (unlikely(err))
527 goto free_out;
528
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100530 spin_lock(&c->erase_completion_lock);
531
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100533 spin_unlock(&c->erase_completion_lock);
534 *tnp = ret_tn;
535 *fdp = ret_fd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100537 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100539 free_out:
540 jffs2_free_tmp_dnode_info_list(&ret_tn);
541 jffs2_free_full_dirent_list(ret_fd);
542 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543}
544
545static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
546 struct jffs2_inode_info *f,
547 struct jffs2_raw_inode *latest_node)
548{
David Woodhouse9dee7502005-07-05 22:03:10 +0100549 struct jffs2_tmp_dnode_info *tn = NULL;
550 struct rb_root tn_list;
551 struct rb_node *rb, *repl_rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 struct jffs2_full_dirent *fd_list;
553 struct jffs2_full_dnode *fn = NULL;
554 uint32_t crc;
555 uint32_t latest_mctime, mctime_ver;
556 uint32_t mdata_ver = 0;
557 size_t retlen;
558 int ret;
559
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100560 JFFS2_DBG_READINODE("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
562 /* Grab all nodes relevant to this ino */
563 ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver);
564
565 if (ret) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100566 JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 if (f->inocache->state == INO_STATE_READING)
568 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
569 return ret;
570 }
571 f->dents = fd_list;
572
David Woodhouse9dee7502005-07-05 22:03:10 +0100573 rb = rb_first(&tn_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
David Woodhouse9dee7502005-07-05 22:03:10 +0100575 while (rb) {
576 tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 fn = tn->fn;
578
579 if (f->metadata) {
580 if (likely(tn->version >= mdata_ver)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100581 JFFS2_DBG_READINODE("obsoleting old metadata at 0x%08x\n", ref_offset(f->metadata->raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 jffs2_mark_node_obsolete(c, f->metadata->raw);
583 jffs2_free_full_dnode(f->metadata);
584 f->metadata = NULL;
585
586 mdata_ver = 0;
587 } else {
588 /* This should never happen. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100589 JFFS2_ERROR("Er. New metadata at 0x%08x with ver %d is actually older than previous ver %d at 0x%08x\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 ref_offset(fn->raw), tn->version, mdata_ver, ref_offset(f->metadata->raw));
591 jffs2_mark_node_obsolete(c, fn->raw);
592 jffs2_free_full_dnode(fn);
593 /* Fill in latest_node from the metadata, not this one we're about to free... */
594 fn = f->metadata;
595 goto next_tn;
596 }
597 }
598
599 if (fn->size) {
600 jffs2_add_full_dnode_to_inode(c, f, fn);
601 } else {
602 /* Zero-sized node at end of version list. Just a metadata update */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100603 JFFS2_DBG_READINODE("metadata @%08x: ver %d\n", ref_offset(fn->raw), tn->version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 f->metadata = fn;
605 mdata_ver = tn->version;
606 }
607 next_tn:
David Woodhouse9dee7502005-07-05 22:03:10 +0100608 BUG_ON(rb->rb_left);
David Woodhouse9dee7502005-07-05 22:03:10 +0100609 if (rb->rb_parent && rb->rb_parent->rb_left == rb) {
610 /* We were then left-hand child of our parent. We need
611 to move our own right-hand child into our place. */
612 repl_rb = rb->rb_right;
613 if (repl_rb)
614 repl_rb->rb_parent = rb->rb_parent;
615 } else
616 repl_rb = NULL;
617
618 rb = rb_next(rb);
619
620 /* Remove the spent tn from the tree; don't bother rebalancing
621 but put our right-hand child in our own place. */
622 if (tn->rb.rb_parent) {
623 if (tn->rb.rb_parent->rb_left == &tn->rb)
624 tn->rb.rb_parent->rb_left = repl_rb;
625 else if (tn->rb.rb_parent->rb_right == &tn->rb)
626 tn->rb.rb_parent->rb_right = repl_rb;
627 else BUG();
628 } else if (tn->rb.rb_right)
629 tn->rb.rb_right->rb_parent = NULL;
630
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 jffs2_free_tmp_dnode_info(tn);
632 }
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100633 jffs2_dbg_fragtree_paranoia_check_nolock(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
635 if (!fn) {
636 /* No data nodes for this inode. */
637 if (f->inocache->ino != 1) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100638 JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 if (!fd_list) {
640 if (f->inocache->state == INO_STATE_READING)
641 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
642 return -EIO;
643 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100644 JFFS2_NOTICE("but it has children so we fake some modes for it\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 }
646 latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
647 latest_node->version = cpu_to_je32(0);
648 latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
649 latest_node->isize = cpu_to_je32(0);
650 latest_node->gid = cpu_to_je16(0);
651 latest_node->uid = cpu_to_je16(0);
652 if (f->inocache->state == INO_STATE_READING)
653 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
654 return 0;
655 }
656
657 ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node);
658 if (ret || retlen != sizeof(*latest_node)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100659 JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
660 ret, retlen, sizeof(*latest_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 /* FIXME: If this fails, there seems to be a memory leak. Find it. */
662 up(&f->sem);
663 jffs2_do_clear_inode(c, f);
664 return ret?ret:-EIO;
665 }
666
667 crc = crc32(0, latest_node, sizeof(*latest_node)-8);
668 if (crc != je32_to_cpu(latest_node->node_crc)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100669 JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
670 f->inocache->ino, ref_offset(fn->raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 up(&f->sem);
672 jffs2_do_clear_inode(c, f);
673 return -EIO;
674 }
675
676 switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
677 case S_IFDIR:
678 if (mctime_ver > je32_to_cpu(latest_node->version)) {
679 /* The times in the latest_node are actually older than
680 mctime in the latest dirent. Cheat. */
681 latest_node->ctime = latest_node->mtime = cpu_to_je32(latest_mctime);
682 }
683 break;
684
685
686 case S_IFREG:
687 /* If it was a regular file, truncate it to the latest node's isize */
Artem B. Bityutskiyf302cd02005-07-24 16:29:59 +0100688 jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 break;
690
691 case S_IFLNK:
692 /* Hack to work around broken isize in old symlink code.
693 Remove this when dwmw2 comes to his senses and stops
694 symlinks from being an entirely gratuitous special
695 case. */
696 if (!je32_to_cpu(latest_node->isize))
697 latest_node->isize = latest_node->dsize;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000698
699 if (f->inocache->state != INO_STATE_CHECKING) {
700 /* Symlink's inode data is the target path. Read it and
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100701 * keep in RAM to facilitate quick follow symlink
702 * operation. */
703 f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
704 if (!f->target) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100705 JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000706 up(&f->sem);
707 jffs2_do_clear_inode(c, f);
708 return -ENOMEM;
709 }
710
711 ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node),
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100712 je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000713
714 if (ret || retlen != je32_to_cpu(latest_node->csize)) {
715 if (retlen != je32_to_cpu(latest_node->csize))
716 ret = -EIO;
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100717 kfree(f->target);
718 f->target = NULL;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000719 up(&f->sem);
720 jffs2_do_clear_inode(c, f);
721 return -ret;
722 }
723
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100724 f->target[je32_to_cpu(latest_node->csize)] = '\0';
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100725 JFFS2_DBG_READINODE("symlink's target '%s' cached\n", f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000726 }
727
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 /* fall through... */
729
730 case S_IFBLK:
731 case S_IFCHR:
732 /* Certain inode types should have only one data node, and it's
733 kept as the metadata node */
734 if (f->metadata) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100735 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 f->inocache->ino, jemode_to_cpu(latest_node->mode));
737 up(&f->sem);
738 jffs2_do_clear_inode(c, f);
739 return -EIO;
740 }
741 if (!frag_first(&f->fragtree)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100742 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 f->inocache->ino, jemode_to_cpu(latest_node->mode));
744 up(&f->sem);
745 jffs2_do_clear_inode(c, f);
746 return -EIO;
747 }
748 /* ASSERT: f->fraglist != NULL */
749 if (frag_next(frag_first(&f->fragtree))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100750 JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 f->inocache->ino, jemode_to_cpu(latest_node->mode));
752 /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
753 up(&f->sem);
754 jffs2_do_clear_inode(c, f);
755 return -EIO;
756 }
757 /* OK. We're happy */
758 f->metadata = frag_first(&f->fragtree)->node;
759 jffs2_free_node_frag(frag_first(&f->fragtree));
760 f->fragtree = RB_ROOT;
761 break;
762 }
763 if (f->inocache->state == INO_STATE_READING)
764 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
765
766 return 0;
767}
768
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100769/* Scan the list of all nodes present for this ino, build map of versions, etc. */
770int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
771 uint32_t ino, struct jffs2_raw_inode *latest_node)
772{
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100773 JFFS2_DBG_READINODE("read inode #%u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100774
775 retry_inocache:
776 spin_lock(&c->inocache_lock);
777 f->inocache = jffs2_get_ino_cache(c, ino);
778
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100779 if (f->inocache) {
780 /* Check its state. We may need to wait before we can use it */
781 switch(f->inocache->state) {
782 case INO_STATE_UNCHECKED:
783 case INO_STATE_CHECKEDABSENT:
784 f->inocache->state = INO_STATE_READING;
785 break;
786
787 case INO_STATE_CHECKING:
788 case INO_STATE_GC:
789 /* If it's in either of these states, we need
790 to wait for whoever's got it to finish and
791 put it back. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100792 JFFS2_DBG_READINODE("waiting for ino #%u in state %d\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100793 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
794 goto retry_inocache;
795
796 case INO_STATE_READING:
797 case INO_STATE_PRESENT:
798 /* Eep. This should never happen. It can
799 happen if Linux calls read_inode() again
800 before clear_inode() has finished though. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100801 JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100802 /* Fail. That's probably better than allowing it to succeed */
803 f->inocache = NULL;
804 break;
805
806 default:
807 BUG();
808 }
809 }
810 spin_unlock(&c->inocache_lock);
811
812 if (!f->inocache && ino == 1) {
813 /* Special case - no root inode on medium */
814 f->inocache = jffs2_alloc_inode_cache();
815 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100816 JFFS2_ERROR("cannot allocate inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100817 return -ENOMEM;
818 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100819 JFFS2_DBG_READINODE("creating inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100820 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
821 f->inocache->ino = f->inocache->nlink = 1;
822 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
823 f->inocache->state = INO_STATE_READING;
824 jffs2_add_ino_cache(c, f->inocache);
825 }
826 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100827 JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100828 return -ENOENT;
829 }
830
831 return jffs2_do_read_inode_internal(c, f, latest_node);
832}
833
834int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
835{
836 struct jffs2_raw_inode n;
837 struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL);
838 int ret;
839
840 if (!f)
841 return -ENOMEM;
842
843 memset(f, 0, sizeof(*f));
844 init_MUTEX_LOCKED(&f->sem);
845 f->inocache = ic;
846
847 ret = jffs2_do_read_inode_internal(c, f, &n);
848 if (!ret) {
849 up(&f->sem);
850 jffs2_do_clear_inode(c, f);
851 }
852 kfree (f);
853 return ret;
854}
855
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
857{
858 struct jffs2_full_dirent *fd, *fds;
859 int deleted;
860
861 down(&f->sem);
862 deleted = f->inocache && !f->inocache->nlink;
863
David Woodhouse67e345d2005-02-27 23:01:36 +0000864 if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
865 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
866
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 if (f->metadata) {
868 if (deleted)
869 jffs2_mark_node_obsolete(c, f->metadata->raw);
870 jffs2_free_full_dnode(f->metadata);
871 }
872
873 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
874
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100875 if (f->target) {
876 kfree(f->target);
877 f->target = NULL;
878 }
879
880 fds = f->dents;
881 while(fds) {
882 fd = fds;
883 fds = fd->next;
884 jffs2_free_full_dirent(fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 }
886
David Woodhouse67e345d2005-02-27 23:01:36 +0000887 if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
David Woodhouse67e345d2005-02-27 23:01:36 +0000889 if (f->inocache->nodes == (void *)f->inocache)
890 jffs2_del_ino_cache(c, f->inocache);
891 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
893 up(&f->sem);
894}