blob: a04111530f4df0b44e039d68fdfd56a4664f335f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Artem B. Bityutskiydae62272005-07-15 11:13:57 +010010 * $Id: nodelist.c,v 1.99 2005/07/15 10:13:54 dedekind Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/sched.h>
16#include <linux/fs.h>
17#include <linux/mtd/mtd.h>
18#include <linux/rbtree.h>
19#include <linux/crc32.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include "nodelist.h"
23
24void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list)
25{
26 struct jffs2_full_dirent **prev = list;
27 D1(printk(KERN_DEBUG "jffs2_add_fd_to_list( %p, %p (->%p))\n", new, list, *list));
28
29 while ((*prev) && (*prev)->nhash <= new->nhash) {
30 if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) {
31 /* Duplicate. Free one */
32 if (new->version < (*prev)->version) {
33 D1(printk(KERN_DEBUG "Eep! Marking new dirent node obsolete\n"));
34 D1(printk(KERN_DEBUG "New dirent is \"%s\"->ino #%u. Old is \"%s\"->ino #%u\n", new->name, new->ino, (*prev)->name, (*prev)->ino));
35 jffs2_mark_node_obsolete(c, new->raw);
36 jffs2_free_full_dirent(new);
37 } else {
38 D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) obsolete\n", (*prev)->ino));
39 new->next = (*prev)->next;
40 jffs2_mark_node_obsolete(c, ((*prev)->raw));
41 jffs2_free_full_dirent(*prev);
42 *prev = new;
43 }
44 goto out;
45 }
46 prev = &((*prev)->next);
47 }
48 new->next = *prev;
49 *prev = new;
50
51 out:
52 D2(while(*list) {
53 printk(KERN_DEBUG "Dirent \"%s\" (hash 0x%08x, ino #%u\n", (*list)->name, (*list)->nhash, (*list)->ino);
54 list = &(*list)->next;
55 });
56}
57
Artem B. Bityuckiye4fef662005-07-10 16:15:36 +010058/*
59 * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in
60 * order of increasing version.
61 */
62static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063{
David Woodhouse9dee7502005-07-05 22:03:10 +010064 struct rb_node **p = &list->rb_node;
65 struct rb_node * parent = NULL;
66 struct jffs2_tmp_dnode_info *this;
67
68 while (*p) {
69 parent = *p;
70 this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
71
Artem B. Bityuckiy6430a8d2005-07-06 15:43:18 +010072 /* There may actually be a collision here, but it doesn't
73 actually matter. As long as the two nodes with the same
74 version are together, it's all fine. */
David Woodhouse9dee7502005-07-05 22:03:10 +010075 if (tn->version < this->version)
76 p = &(*p)->rb_left;
David Woodhouse9dee7502005-07-05 22:03:10 +010077 else
78 p = &(*p)->rb_right;
79 }
80
81 rb_link_node(&tn->rb, parent, p);
82 rb_insert_color(&tn->rb, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
84
David Woodhouse9dee7502005-07-05 22:03:10 +010085static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086{
David Woodhouse9dee7502005-07-05 22:03:10 +010087 struct rb_node *this;
88 struct jffs2_tmp_dnode_info *tn;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
David Woodhouse9dee7502005-07-05 22:03:10 +010090 this = list->rb_node;
91
92 /* Now at bottom of tree */
93 while (this) {
94 if (this->rb_left)
95 this = this->rb_left;
96 else if (this->rb_right)
97 this = this->rb_right;
98 else {
99 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
100 jffs2_free_full_dnode(tn->fn);
101 jffs2_free_tmp_dnode_info(tn);
102
103 this = this->rb_parent;
104 if (!this)
105 break;
106
107 if (this->rb_left == &tn->rb)
108 this->rb_left = NULL;
109 else if (this->rb_right == &tn->rb)
110 this->rb_right = NULL;
111 else BUG();
112 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 }
David Woodhouse9dee7502005-07-05 22:03:10 +0100114 list->rb_node = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115}
116
117static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
118{
119 struct jffs2_full_dirent *next;
120
121 while (fd) {
122 next = fd->next;
123 jffs2_free_full_dirent(fd);
124 fd = next;
125 }
126}
127
128/* Returns first valid node after 'ref'. May return 'ref' */
129static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
130{
131 while (ref && ref->next_in_ino) {
132 if (!ref_obsolete(ref))
133 return ref;
134 D1(printk(KERN_DEBUG "node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref)));
135 ref = ref->next_in_ino;
136 }
137 return NULL;
138}
139
Artem B. Bityutskiydae62272005-07-15 11:13:57 +0100140/*
141 * Helper function for jffs2_get_inode_nodes().
142 * It is called every time an directory entry node is found.
143 *
144 * Returns: 0 on succes;
145 * 1 if the node should be marked obsolete;
146 * negative error code on failure.
147 */
148static inline int
149read_direntry(struct jffs2_sb_info *c,
150 struct jffs2_raw_node_ref *ref,
151 struct jffs2_raw_dirent *rd,
152 uint32_t read,
153 struct jffs2_full_dirent **fdp,
154 int32_t *latest_mctime,
155 uint32_t *mctime_ver)
156{
157 struct jffs2_full_dirent *fd;
158
159 /* The direntry nodes are checked during the flash scanning */
160 BUG_ON(ref_flags(ref) == REF_UNCHECKED);
161 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
162 BUG_ON(ref_obsolete(ref));
163
164 /* Sanity check */
165 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
166 printk(KERN_ERR "Error! Illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
167 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
168 return 1;
169 }
170
171 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
172 if (unlikely(!fd))
173 return -ENOMEM;
174
175 fd->raw = ref;
176 fd->version = je32_to_cpu(rd->version);
177 fd->ino = je32_to_cpu(rd->ino);
178 fd->type = rd->type;
179
180 /* Pick out the mctime of the latest dirent */
181 if(fd->version > *mctime_ver) {
182 *mctime_ver = fd->version;
183 *latest_mctime = je32_to_cpu(rd->mctime);
184 }
185
186 /*
187 * Copy as much of the name as possible from the raw
188 * dirent we've already read from the flash.
189 */
190 if (read > sizeof(*rd))
191 memcpy(&fd->name[0], &rd->name[0],
192 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
193
194 /* Do we need to copy any more of the name directly from the flash? */
195 if (rd->nsize + sizeof(*rd) > read) {
196 /* FIXME: point() */
197 int err;
198 int already = read - sizeof(*rd);
199
200 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
201 rd->nsize - already, &read, &fd->name[already]);
202 if (unlikely(read != rd->nsize - already) && likely(!err))
203 return -EIO;
204
205 if (unlikely(err)) {
206 printk(KERN_WARNING "Read remainder of name in jffs2_get_inode_nodes(): error %d\n", err);
207 jffs2_free_full_dirent(fd);
208 return -EIO;
209 }
210 }
211
212 fd->nhash = full_name_hash(fd->name, rd->nsize);
213 fd->next = NULL;
214 fd->name[rd->nsize] = '\0';
215
216 /*
217 * Wheee. We now have a complete jffs2_full_dirent structure, with
218 * the name in it and everything. Link it into the list
219 */
220 D1(printk(KERN_DEBUG "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino));
221
222 jffs2_add_fd_to_list(c, fd, fdp);
223
224 return 0;
225}
226
227/*
228 * Helper function for jffs2_get_inode_nodes().
229 * It is called every time an inode node is found.
230 *
231 * Returns: 0 on succes;
232 * 1 if the node should be marked obsolete;
233 * negative error code on failure.
234 */
235static inline int
236read_dnode(struct jffs2_sb_info *c,
237 struct jffs2_raw_node_ref *ref,
238 struct jffs2_raw_inode *rd,
239 uint32_t read,
240 struct rb_root *tnp,
241 int32_t *latest_mctime,
242 uint32_t *mctime_ver)
243{
244 struct jffs2_eraseblock *jeb;
245 struct jffs2_tmp_dnode_info *tn;
246
247 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
248 BUG_ON(ref_obsolete(ref));
249
250 /* If we've never checked the CRCs on this node, check them now */
251 if (ref_flags(ref) == REF_UNCHECKED) {
252 uint32_t crc, len;
253
254 crc = crc32(0, rd, sizeof(*rd) - 8);
255 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
256 printk(KERN_WARNING "Header CRC failed on node at %#08x: read %#08x, calculated %#08x\n",
257 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
258 return 1;
259 }
260
261 /* Sanity checks */
262 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
263 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
264 printk(KERN_WARNING "Inode corrupted at %#08x, totlen %d, #ino %d, version %d, "
265 "isize %d, csize %d, dsize %d \n",
266 ref_offset(ref), je32_to_cpu(rd->totlen), je32_to_cpu(rd->ino),
267 je32_to_cpu(rd->version), je32_to_cpu(rd->isize),
268 je32_to_cpu(rd->csize), je32_to_cpu(rd->dsize));
269 return 1;
270 }
271
272 if (rd->compr != JFFS2_COMPR_ZERO && je32_to_cpu(rd->csize)) {
273 unsigned char *buf = NULL;
274 uint32_t pointed = 0;
275 int err;
276#ifndef __ECOS
277 if (c->mtd->point) {
278 err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize),
279 &read, &buf);
280 if (unlikely(read < je32_to_cpu(rd->csize)) && likely(!err)) {
281 D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", read));
282 c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(*rd),
283 je32_to_cpu(rd->csize));
284 } else if (unlikely(err)){
285 D1(printk(KERN_DEBUG "MTD point failed %d\n", err));
286 } else
287 pointed = 1; /* succefully pointed to device */
288 }
289#endif
290 if(!pointed){
291 buf = kmalloc(je32_to_cpu(rd->csize), GFP_KERNEL);
292 if (!buf)
293 return -ENOMEM;
294
295 err = jffs2_flash_read(c, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize),
296 &read, buf);
297 if (unlikely(read != je32_to_cpu(rd->csize)) && likely(!err))
298 err = -EIO;
299 if (err) {
300 kfree(buf);
301 return err;
302 }
303 }
304 crc = crc32(0, buf, je32_to_cpu(rd->csize));
305 if(!pointed)
306 kfree(buf);
307#ifndef __ECOS
308 else
309 c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(*rd), je32_to_cpu(rd->csize));
310#endif
311
312 if (crc != je32_to_cpu(rd->data_crc)) {
313 printk(KERN_NOTICE "Data CRC failed on node at %#08x: read %#08x, calculated %#08x\n",
314 ref_offset(ref), je32_to_cpu(rd->data_crc), crc);
315 return 1;
316 }
317
318 }
319
320 /* Mark the node as having been checked and fix the accounting accordingly */
321 jeb = &c->blocks[ref->flash_offset / c->sector_size];
322 len = ref_totlen(c, jeb, ref);
323
324 spin_lock(&c->erase_completion_lock);
325 jeb->used_size += len;
326 jeb->unchecked_size -= len;
327 c->used_size += len;
328 c->unchecked_size -= len;
329
330 /* If node covers at least a whole page, or if it starts at the
331 beginning of a page and runs to the end of the file, or if
332 it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
333
334 If it's actually overlapped, it'll get made NORMAL (or OBSOLETE)
335 when the overlapping node(s) get added to the tree anyway.
336 */
337 if ((je32_to_cpu(rd->dsize) >= PAGE_CACHE_SIZE) ||
338 ( ((je32_to_cpu(rd->offset) & (PAGE_CACHE_SIZE-1))==0) &&
339 (je32_to_cpu(rd->dsize) + je32_to_cpu(rd->offset) == je32_to_cpu(rd->isize)))) {
340 D1(printk(KERN_DEBUG "Marking node at %#08x REF_PRISTINE\n", ref_offset(ref)));
341 ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
342 } else {
343 D1(printk(KERN_DEBUG "Marking node at %#08x REF_NORMAL\n", ref_offset(ref)));
344 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
345 }
346 spin_unlock(&c->erase_completion_lock);
347 }
348
349 tn = jffs2_alloc_tmp_dnode_info();
350 if (!tn) {
351 D1(printk(KERN_DEBUG "alloc tn failed\n"));
352 return -ENOMEM;
353 }
354
355 tn->fn = jffs2_alloc_full_dnode();
356 if (!tn->fn) {
357 D1(printk(KERN_DEBUG "alloc fn failed\n"));
358 jffs2_free_tmp_dnode_info(tn);
359 return -ENOMEM;
360 }
361
362 tn->version = je32_to_cpu(rd->version);
363 tn->fn->ofs = je32_to_cpu(rd->offset);
364 tn->fn->raw = ref;
365
366 /* There was a bug where we wrote hole nodes out with
367 csize/dsize swapped. Deal with it */
368 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && je32_to_cpu(rd->csize))
369 tn->fn->size = je32_to_cpu(rd->csize);
370 else // normal case...
371 tn->fn->size = je32_to_cpu(rd->dsize);
372
373 D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %#04x, dsize %#04x\n",
374 ref_offset(ref), je32_to_cpu(rd->version),
375 je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize)));
376
377 jffs2_add_tn_to_tree(tn, tnp);
378
379 return 0;
380}
381
382/*
383 * Helper function for jffs2_get_inode_nodes().
384 * It is called every time an unknown node is found.
385 *
386 * Returns: 0 on succes;
387 * 1 if the node should be marked obsolete;
388 * negative error code on failure.
389 */
390static inline int
391read_unknown(struct jffs2_sb_info *c,
392 struct jffs2_raw_node_ref *ref,
393 struct jffs2_unknown_node *un,
394 uint32_t read)
395{
396 /* We don't mark unknown nodes as REF_UNCHECKED */
397 BUG_ON(ref_flags(ref) == REF_UNCHECKED);
398
399 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
400
401 if (crc32(0, un, sizeof(struct jffs2_unknown_node) - 4) != je32_to_cpu(un->hdr_crc)) {
402
403 /* Hmmm. This should have been caught at scan time. */
404 printk(KERN_WARNING "Warning! Node header CRC failed at %#08x. "
405 "But it must have been OK earlier.\n", ref_offset(ref));
406 D1(printk(KERN_DEBUG "Node was: { %#04x, %#04x, %#08x, %#08x }\n",
407 je16_to_cpu(un->magic), je16_to_cpu(un->nodetype),
408 je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc)));
409 return 1;
410 } else {
411 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
412
413 case JFFS2_FEATURE_INCOMPAT:
414 printk(KERN_NOTICE "Unknown INCOMPAT nodetype %#04X at %#08x\n",
415 je16_to_cpu(un->nodetype), ref_offset(ref));
416 /* EEP */
417 BUG();
418 break;
419
420 case JFFS2_FEATURE_ROCOMPAT:
421 printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %#04X at %#08x\n",
422 je16_to_cpu(un->nodetype), ref_offset(ref));
423 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
424 break;
425
426 case JFFS2_FEATURE_RWCOMPAT_COPY:
427 printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
428 je16_to_cpu(un->nodetype), ref_offset(ref));
429 break;
430
431 case JFFS2_FEATURE_RWCOMPAT_DELETE:
432 printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
433 je16_to_cpu(un->nodetype), ref_offset(ref));
434 return 1;
435 }
436 }
437
438 return 0;
439}
440
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
442 with this ino, returning the former in order of version */
443
444int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
David Woodhouse9dee7502005-07-05 22:03:10 +0100445 struct rb_root *tnp, struct jffs2_full_dirent **fdp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 uint32_t *highest_version, uint32_t *latest_mctime,
447 uint32_t *mctime_ver)
448{
449 struct jffs2_raw_node_ref *ref, *valid_ref;
David Woodhouse9dee7502005-07-05 22:03:10 +0100450 struct rb_root ret_tn = RB_ROOT;
Artem B. Bityutskiydae62272005-07-15 11:13:57 +0100451 struct jffs2_full_dirent *ret_fd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 union jffs2_node_union node;
453 size_t retlen;
454 int err;
455
456 *mctime_ver = 0;
457
458 D1(printk(KERN_DEBUG "jffs2_get_inode_nodes(): ino #%u\n", f->inocache->ino));
459
460 spin_lock(&c->erase_completion_lock);
461
462 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
463
Todd Poynor8fabed4a2005-01-19 19:22:04 +0000464 if (!valid_ref && (f->inocache->ino != 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 printk(KERN_WARNING "Eep. No valid nodes for ino #%u\n", f->inocache->ino);
466
467 while (valid_ref) {
468 /* We can hold a pointer to a non-obsolete node without the spinlock,
469 but _obsolete_ nodes may disappear at any time, if the block
470 they're in gets erased. So if we mark 'ref' obsolete while we're
471 not holding the lock, it can go away immediately. For that reason,
472 we find the next valid node first, before processing 'ref'.
473 */
474 ref = valid_ref;
475 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
476 spin_unlock(&c->erase_completion_lock);
477
478 cond_resched();
479
480 /* FIXME: point() */
481 err = jffs2_flash_read(c, (ref_offset(ref)),
482 min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)),
483 &retlen, (void *)&node);
484 if (err) {
485 printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref));
486 goto free_out;
487 }
488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 switch (je16_to_cpu(node.u.nodetype)) {
Artem B. Bityutskiydae62272005-07-15 11:13:57 +0100490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 case JFFS2_NODETYPE_DIRENT:
492 D1(printk(KERN_DEBUG "Node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref)));
Artem B. Bityutskiydae62272005-07-15 11:13:57 +0100493
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 if (retlen < sizeof(node.d)) {
Artem B. Bityutskiydae62272005-07-15 11:13:57 +0100495 printk(KERN_WARNING "Warning! Short read dirent at %#08x\n", ref_offset(ref));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 err = -EIO;
497 goto free_out;
498 }
Artem B. Bityutskiydae62272005-07-15 11:13:57 +0100499
500 err = read_direntry(c, ref, &node.d, retlen, &ret_fd, latest_mctime, mctime_ver);
501 if (err == 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 jffs2_mark_node_obsolete(c, ref);
Artem B. Bityutskiydae62272005-07-15 11:13:57 +0100503 break;
504 } else if (unlikely(err))
505 goto free_out;
506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 if (je32_to_cpu(node.d.version) > *highest_version)
508 *highest_version = je32_to_cpu(node.d.version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 break;
511
512 case JFFS2_NODETYPE_INODE:
513 D1(printk(KERN_DEBUG "Node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref)));
Artem B. Bityutskiydae62272005-07-15 11:13:57 +0100514
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 if (retlen < sizeof(node.i)) {
Artem B. Bityutskiydae62272005-07-15 11:13:57 +0100516 printk(KERN_WARNING "Warning! Short read dnode at %#08x\n", ref_offset(ref));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 err = -EIO;
518 goto free_out;
519 }
Artem B. Bityutskiydae62272005-07-15 11:13:57 +0100520
521 err = read_dnode(c, ref, &node.i, retlen, &ret_tn, latest_mctime, mctime_ver);
522 if (err == 1) {
523 jffs2_mark_node_obsolete(c, ref);
524 break;
525 } else if (unlikely(err))
526 goto free_out;
527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 if (je32_to_cpu(node.i.version) > *highest_version)
529 *highest_version = je32_to_cpu(node.i.version);
Artem B. Bityutskiydae62272005-07-15 11:13:57 +0100530
531 D1(printk(KERN_DEBUG "version %d, highest_version now %d\n",
532 je32_to_cpu(node.i.version), *highest_version));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 break;
535
536 default:
Artem B. Bityutskiydae62272005-07-15 11:13:57 +0100537 /* Check we've managed to read at least the common node header */
538 if (retlen < sizeof(struct jffs2_unknown_node)) {
539 printk(KERN_WARNING "Warning! Short read unknown node at %#08x\n",
540 ref_offset(ref));
541 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 }
Artem B. Bityutskiydae62272005-07-15 11:13:57 +0100543
544 err = read_unknown(c, ref, &node.u, retlen);
545 if (err == 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 jffs2_mark_node_obsolete(c, ref);
547 break;
Artem B. Bityutskiydae62272005-07-15 11:13:57 +0100548 } else if (unlikely(err))
549 goto free_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
551 }
552 spin_lock(&c->erase_completion_lock);
553
554 }
555 spin_unlock(&c->erase_completion_lock);
556 *tnp = ret_tn;
557 *fdp = ret_fd;
558
559 return 0;
560
561 free_out:
David Woodhouse9dee7502005-07-05 22:03:10 +0100562 jffs2_free_tmp_dnode_info_list(&ret_tn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 jffs2_free_full_dirent_list(ret_fd);
564 return err;
565}
566
567void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state)
568{
569 spin_lock(&c->inocache_lock);
570 ic->state = state;
571 wake_up(&c->inocache_wq);
572 spin_unlock(&c->inocache_lock);
573}
574
575/* During mount, this needs no locking. During normal operation, its
576 callers want to do other stuff while still holding the inocache_lock.
577 Rather than introducing special case get_ino_cache functions or
578 callbacks, we just let the caller do the locking itself. */
579
580struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
581{
582 struct jffs2_inode_cache *ret;
583
584 D2(printk(KERN_DEBUG "jffs2_get_ino_cache(): ino %u\n", ino));
585
586 ret = c->inocache_list[ino % INOCACHE_HASHSIZE];
587 while (ret && ret->ino < ino) {
588 ret = ret->next;
589 }
590
591 if (ret && ret->ino != ino)
592 ret = NULL;
593
594 D2(printk(KERN_DEBUG "jffs2_get_ino_cache found %p for ino %u\n", ret, ino));
595 return ret;
596}
597
598void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new)
599{
600 struct jffs2_inode_cache **prev;
Thomas Gleixner7d27c812005-05-22 21:47:19 +0200601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 spin_lock(&c->inocache_lock);
Thomas Gleixner7d27c812005-05-22 21:47:19 +0200603 if (!new->ino)
604 new->ino = ++c->highest_ino;
605
606 D2(printk(KERN_DEBUG "jffs2_add_ino_cache: Add %p (ino #%u)\n", new, new->ino));
607
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE];
609
610 while ((*prev) && (*prev)->ino < new->ino) {
611 prev = &(*prev)->next;
612 }
613 new->next = *prev;
614 *prev = new;
615
616 spin_unlock(&c->inocache_lock);
617}
618
619void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old)
620{
621 struct jffs2_inode_cache **prev;
David Woodhouse67e345d2005-02-27 23:01:36 +0000622 D1(printk(KERN_DEBUG "jffs2_del_ino_cache: Del %p (ino #%u)\n", old, old->ino));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 spin_lock(&c->inocache_lock);
624
625 prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE];
626
627 while ((*prev) && (*prev)->ino < old->ino) {
628 prev = &(*prev)->next;
629 }
630 if ((*prev) == old) {
631 *prev = old->next;
632 }
633
David Woodhouse67e345d2005-02-27 23:01:36 +0000634 /* Free it now unless it's in READING or CLEARING state, which
635 are the transitions upon read_inode() and clear_inode(). The
636 rest of the time we know nobody else is looking at it, and
637 if it's held by read_inode() or clear_inode() they'll free it
638 for themselves. */
639 if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING)
640 jffs2_free_inode_cache(old);
641
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 spin_unlock(&c->inocache_lock);
643}
644
645void jffs2_free_ino_caches(struct jffs2_sb_info *c)
646{
647 int i;
648 struct jffs2_inode_cache *this, *next;
649
650 for (i=0; i<INOCACHE_HASHSIZE; i++) {
651 this = c->inocache_list[i];
652 while (this) {
653 next = this->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 jffs2_free_inode_cache(this);
655 this = next;
656 }
657 c->inocache_list[i] = NULL;
658 }
659}
660
661void jffs2_free_raw_node_refs(struct jffs2_sb_info *c)
662{
663 int i;
664 struct jffs2_raw_node_ref *this, *next;
665
666 for (i=0; i<c->nr_blocks; i++) {
667 this = c->blocks[i].first_node;
668 while(this) {
669 next = this->next_phys;
670 jffs2_free_raw_node_ref(this);
671 this = next;
672 }
673 c->blocks[i].first_node = c->blocks[i].last_node = NULL;
674 }
675}
676
677struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset)
678{
679 /* The common case in lookup is that there will be a node
680 which precisely matches. So we go looking for that first */
681 struct rb_node *next;
682 struct jffs2_node_frag *prev = NULL;
683 struct jffs2_node_frag *frag = NULL;
684
685 D2(printk(KERN_DEBUG "jffs2_lookup_node_frag(%p, %d)\n", fragtree, offset));
686
687 next = fragtree->rb_node;
688
689 while(next) {
690 frag = rb_entry(next, struct jffs2_node_frag, rb);
691
692 D2(printk(KERN_DEBUG "Considering frag %d-%d (%p). left %p, right %p\n",
693 frag->ofs, frag->ofs+frag->size, frag, frag->rb.rb_left, frag->rb.rb_right));
694 if (frag->ofs + frag->size <= offset) {
695 D2(printk(KERN_DEBUG "Going right from frag %d-%d, before the region we care about\n",
696 frag->ofs, frag->ofs+frag->size));
697 /* Remember the closest smaller match on the way down */
698 if (!prev || frag->ofs > prev->ofs)
699 prev = frag;
700 next = frag->rb.rb_right;
701 } else if (frag->ofs > offset) {
702 D2(printk(KERN_DEBUG "Going left from frag %d-%d, after the region we care about\n",
703 frag->ofs, frag->ofs+frag->size));
704 next = frag->rb.rb_left;
705 } else {
706 D2(printk(KERN_DEBUG "Returning frag %d,%d, matched\n",
707 frag->ofs, frag->ofs+frag->size));
708 return frag;
709 }
710 }
711
712 /* Exact match not found. Go back up looking at each parent,
713 and return the closest smaller one */
714
715 if (prev)
716 D2(printk(KERN_DEBUG "No match. Returning frag %d,%d, closest previous\n",
717 prev->ofs, prev->ofs+prev->size));
718 else
719 D2(printk(KERN_DEBUG "Returning NULL, empty fragtree\n"));
720
721 return prev;
722}
723
724/* Pass 'c' argument to indicate that nodes should be marked obsolete as
725 they're killed. */
726void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c)
727{
728 struct jffs2_node_frag *frag;
729 struct jffs2_node_frag *parent;
730
731 if (!root->rb_node)
732 return;
733
734 frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb));
735
736 while(frag) {
737 if (frag->rb.rb_left) {
738 D2(printk(KERN_DEBUG "Going left from frag (%p) %d-%d\n",
739 frag, frag->ofs, frag->ofs+frag->size));
740 frag = frag_left(frag);
741 continue;
742 }
743 if (frag->rb.rb_right) {
744 D2(printk(KERN_DEBUG "Going right from frag (%p) %d-%d\n",
745 frag, frag->ofs, frag->ofs+frag->size));
746 frag = frag_right(frag);
747 continue;
748 }
749
750 D2(printk(KERN_DEBUG "jffs2_kill_fragtree: frag at 0x%x-0x%x: node %p, frags %d--\n",
751 frag->ofs, frag->ofs+frag->size, frag->node,
752 frag->node?frag->node->frags:0));
753
754 if (frag->node && !(--frag->node->frags)) {
755 /* Not a hole, and it's the final remaining frag
756 of this node. Free the node */
757 if (c)
758 jffs2_mark_node_obsolete(c, frag->node->raw);
759
760 jffs2_free_full_dnode(frag->node);
761 }
762 parent = frag_parent(frag);
763 if (parent) {
764 if (frag_left(parent) == frag)
765 parent->rb.rb_left = NULL;
766 else
767 parent->rb.rb_right = NULL;
768 }
769
770 jffs2_free_node_frag(frag);
771 frag = parent;
772
773 cond_resched();
774 }
775}
776
777void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base)
778{
779 struct rb_node *parent = &base->rb;
780 struct rb_node **link = &parent;
781
782 D2(printk(KERN_DEBUG "jffs2_fragtree_insert(%p; %d-%d, %p)\n", newfrag,
783 newfrag->ofs, newfrag->ofs+newfrag->size, base));
784
785 while (*link) {
786 parent = *link;
787 base = rb_entry(parent, struct jffs2_node_frag, rb);
788
789 D2(printk(KERN_DEBUG "fragtree_insert considering frag at 0x%x\n", base->ofs));
790 if (newfrag->ofs > base->ofs)
791 link = &base->rb.rb_right;
792 else if (newfrag->ofs < base->ofs)
793 link = &base->rb.rb_left;
794 else {
795 printk(KERN_CRIT "Duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base);
796 BUG();
797 }
798 }
799
800 rb_link_node(&newfrag->rb, &base->rb, link);
801}