blob: be38cc5c35cd97eb79728c8697c850df1454c101 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
David Woodhouse9dee7502005-07-05 22:03:10 +010010 * $Id: nodelist.c,v 1.95 2005/07/05 21:03:07 dwmw2 Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/sched.h>
16#include <linux/fs.h>
17#include <linux/mtd/mtd.h>
18#include <linux/rbtree.h>
19#include <linux/crc32.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include "nodelist.h"
23
24void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list)
25{
26 struct jffs2_full_dirent **prev = list;
27 D1(printk(KERN_DEBUG "jffs2_add_fd_to_list( %p, %p (->%p))\n", new, list, *list));
28
29 while ((*prev) && (*prev)->nhash <= new->nhash) {
30 if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) {
31 /* Duplicate. Free one */
32 if (new->version < (*prev)->version) {
33 D1(printk(KERN_DEBUG "Eep! Marking new dirent node obsolete\n"));
34 D1(printk(KERN_DEBUG "New dirent is \"%s\"->ino #%u. Old is \"%s\"->ino #%u\n", new->name, new->ino, (*prev)->name, (*prev)->ino));
35 jffs2_mark_node_obsolete(c, new->raw);
36 jffs2_free_full_dirent(new);
37 } else {
38 D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) obsolete\n", (*prev)->ino));
39 new->next = (*prev)->next;
40 jffs2_mark_node_obsolete(c, ((*prev)->raw));
41 jffs2_free_full_dirent(*prev);
42 *prev = new;
43 }
44 goto out;
45 }
46 prev = &((*prev)->next);
47 }
48 new->next = *prev;
49 *prev = new;
50
51 out:
52 D2(while(*list) {
53 printk(KERN_DEBUG "Dirent \"%s\" (hash 0x%08x, ino #%u\n", (*list)->name, (*list)->nhash, (*list)->ino);
54 list = &(*list)->next;
55 });
56}
57
58/* Put a new tmp_dnode_info into the list, keeping the list in
59 order of increasing version
60*/
David Woodhouse9dee7502005-07-05 22:03:10 +010061
62static void jffs2_add_tn_to_list(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063{
David Woodhouse9dee7502005-07-05 22:03:10 +010064 struct rb_node **p = &list->rb_node;
65 struct rb_node * parent = NULL;
66 struct jffs2_tmp_dnode_info *this;
67
68 while (*p) {
69 parent = *p;
70 this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
71
72 if (tn->version < this->version)
73 p = &(*p)->rb_left;
74 else if (tn->version > this->version)
75 p = &(*p)->rb_right;
76 else if (tn < this)
77 p = &(*p)->rb_left;
78 else
79 p = &(*p)->rb_right;
80 }
81
82 rb_link_node(&tn->rb, parent, p);
83 rb_insert_color(&tn->rb, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084}
85
David Woodhouse9dee7502005-07-05 22:03:10 +010086static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
David Woodhouse9dee7502005-07-05 22:03:10 +010088 struct rb_node *this;
89 struct jffs2_tmp_dnode_info *tn;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
David Woodhouse9dee7502005-07-05 22:03:10 +010091 this = list->rb_node;
92
93 /* Now at bottom of tree */
94 while (this) {
95 if (this->rb_left)
96 this = this->rb_left;
97 else if (this->rb_right)
98 this = this->rb_right;
99 else {
100 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
101 jffs2_free_full_dnode(tn->fn);
102 jffs2_free_tmp_dnode_info(tn);
103
104 this = this->rb_parent;
105 if (!this)
106 break;
107
108 if (this->rb_left == &tn->rb)
109 this->rb_left = NULL;
110 else if (this->rb_right == &tn->rb)
111 this->rb_right = NULL;
112 else BUG();
113 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 }
David Woodhouse9dee7502005-07-05 22:03:10 +0100115 list->rb_node = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116}
117
118static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
119{
120 struct jffs2_full_dirent *next;
121
122 while (fd) {
123 next = fd->next;
124 jffs2_free_full_dirent(fd);
125 fd = next;
126 }
127}
128
129/* Returns first valid node after 'ref'. May return 'ref' */
130static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
131{
132 while (ref && ref->next_in_ino) {
133 if (!ref_obsolete(ref))
134 return ref;
135 D1(printk(KERN_DEBUG "node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref)));
136 ref = ref->next_in_ino;
137 }
138 return NULL;
139}
140
141/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
142 with this ino, returning the former in order of version */
143
144int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
David Woodhouse9dee7502005-07-05 22:03:10 +0100145 struct rb_root *tnp, struct jffs2_full_dirent **fdp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 uint32_t *highest_version, uint32_t *latest_mctime,
147 uint32_t *mctime_ver)
148{
149 struct jffs2_raw_node_ref *ref, *valid_ref;
David Woodhouse9dee7502005-07-05 22:03:10 +0100150 struct jffs2_tmp_dnode_info *tn;
151 struct rb_root ret_tn = RB_ROOT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 struct jffs2_full_dirent *fd, *ret_fd = NULL;
153 union jffs2_node_union node;
154 size_t retlen;
155 int err;
156
157 *mctime_ver = 0;
158
159 D1(printk(KERN_DEBUG "jffs2_get_inode_nodes(): ino #%u\n", f->inocache->ino));
160
161 spin_lock(&c->erase_completion_lock);
162
163 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
164
Todd Poynor8fabed4a2005-01-19 19:22:04 +0000165 if (!valid_ref && (f->inocache->ino != 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 printk(KERN_WARNING "Eep. No valid nodes for ino #%u\n", f->inocache->ino);
167
168 while (valid_ref) {
169 /* We can hold a pointer to a non-obsolete node without the spinlock,
170 but _obsolete_ nodes may disappear at any time, if the block
171 they're in gets erased. So if we mark 'ref' obsolete while we're
172 not holding the lock, it can go away immediately. For that reason,
173 we find the next valid node first, before processing 'ref'.
174 */
175 ref = valid_ref;
176 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
177 spin_unlock(&c->erase_completion_lock);
178
179 cond_resched();
180
181 /* FIXME: point() */
182 err = jffs2_flash_read(c, (ref_offset(ref)),
183 min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)),
184 &retlen, (void *)&node);
185 if (err) {
186 printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref));
187 goto free_out;
188 }
189
190
191 /* Check we've managed to read at least the common node header */
192 if (retlen < min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node.u))) {
193 printk(KERN_WARNING "short read in get_inode_nodes()\n");
194 err = -EIO;
195 goto free_out;
196 }
197
198 switch (je16_to_cpu(node.u.nodetype)) {
199 case JFFS2_NODETYPE_DIRENT:
200 D1(printk(KERN_DEBUG "Node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref)));
201 if (ref_flags(ref) == REF_UNCHECKED) {
202 printk(KERN_WARNING "BUG: Dirent node at 0x%08x never got checked? How?\n", ref_offset(ref));
203 BUG();
204 }
205 if (retlen < sizeof(node.d)) {
206 printk(KERN_WARNING "short read in get_inode_nodes()\n");
207 err = -EIO;
208 goto free_out;
209 }
210 /* sanity check */
211 if (PAD((node.d.nsize + sizeof (node.d))) != PAD(je32_to_cpu (node.d.totlen))) {
212 printk(KERN_NOTICE "jffs2_get_inode_nodes(): Illegal nsize in node at 0x%08x: nsize 0x%02x, totlen %04x\n",
213 ref_offset(ref), node.d.nsize, je32_to_cpu(node.d.totlen));
214 jffs2_mark_node_obsolete(c, ref);
215 spin_lock(&c->erase_completion_lock);
216 continue;
217 }
218 if (je32_to_cpu(node.d.version) > *highest_version)
219 *highest_version = je32_to_cpu(node.d.version);
220 if (ref_obsolete(ref)) {
221 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
222 printk(KERN_ERR "Dirent node at 0x%08x became obsolete while we weren't looking\n",
223 ref_offset(ref));
224 BUG();
225 }
226
227 fd = jffs2_alloc_full_dirent(node.d.nsize+1);
228 if (!fd) {
229 err = -ENOMEM;
230 goto free_out;
231 }
232 fd->raw = ref;
233 fd->version = je32_to_cpu(node.d.version);
234 fd->ino = je32_to_cpu(node.d.ino);
235 fd->type = node.d.type;
236
237 /* Pick out the mctime of the latest dirent */
238 if(fd->version > *mctime_ver) {
239 *mctime_ver = fd->version;
240 *latest_mctime = je32_to_cpu(node.d.mctime);
241 }
242
243 /* memcpy as much of the name as possible from the raw
244 dirent we've already read from the flash
245 */
246 if (retlen > sizeof(struct jffs2_raw_dirent))
247 memcpy(&fd->name[0], &node.d.name[0], min_t(uint32_t, node.d.nsize, (retlen-sizeof(struct jffs2_raw_dirent))));
248
249 /* Do we need to copy any more of the name directly
250 from the flash?
251 */
252 if (node.d.nsize + sizeof(struct jffs2_raw_dirent) > retlen) {
253 /* FIXME: point() */
254 int already = retlen - sizeof(struct jffs2_raw_dirent);
255
256 err = jffs2_flash_read(c, (ref_offset(ref)) + retlen,
257 node.d.nsize - already, &retlen, &fd->name[already]);
258 if (!err && retlen != node.d.nsize - already)
259 err = -EIO;
260
261 if (err) {
262 printk(KERN_WARNING "Read remainder of name in jffs2_get_inode_nodes(): error %d\n", err);
263 jffs2_free_full_dirent(fd);
264 goto free_out;
265 }
266 }
267 fd->nhash = full_name_hash(fd->name, node.d.nsize);
268 fd->next = NULL;
269 fd->name[node.d.nsize] = '\0';
270 /* Wheee. We now have a complete jffs2_full_dirent structure, with
271 the name in it and everything. Link it into the list
272 */
273 D1(printk(KERN_DEBUG "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino));
274 jffs2_add_fd_to_list(c, fd, &ret_fd);
275 break;
276
277 case JFFS2_NODETYPE_INODE:
278 D1(printk(KERN_DEBUG "Node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref)));
279 if (retlen < sizeof(node.i)) {
280 printk(KERN_WARNING "read too short for dnode\n");
281 err = -EIO;
282 goto free_out;
283 }
284 if (je32_to_cpu(node.i.version) > *highest_version)
285 *highest_version = je32_to_cpu(node.i.version);
286 D1(printk(KERN_DEBUG "version %d, highest_version now %d\n", je32_to_cpu(node.i.version), *highest_version));
287
288 if (ref_obsolete(ref)) {
289 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
290 printk(KERN_ERR "Inode node at 0x%08x became obsolete while we weren't looking\n",
291 ref_offset(ref));
292 BUG();
293 }
294
295 /* If we've never checked the CRCs on this node, check them now. */
296 if (ref_flags(ref) == REF_UNCHECKED) {
297 uint32_t crc, len;
298 struct jffs2_eraseblock *jeb;
299
300 crc = crc32(0, &node, sizeof(node.i)-8);
301 if (crc != je32_to_cpu(node.i.node_crc)) {
302 printk(KERN_NOTICE "jffs2_get_inode_nodes(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
303 ref_offset(ref), je32_to_cpu(node.i.node_crc), crc);
304 jffs2_mark_node_obsolete(c, ref);
305 spin_lock(&c->erase_completion_lock);
306 continue;
307 }
308
309 /* sanity checks */
310 if ( je32_to_cpu(node.i.offset) > je32_to_cpu(node.i.isize) ||
311 PAD(je32_to_cpu(node.i.csize) + sizeof (node.i)) != PAD(je32_to_cpu(node.i.totlen))) {
312 printk(KERN_NOTICE "jffs2_get_inode_nodes(): Inode corrupted at 0x%08x, totlen %d, #ino %d, version %d, isize %d, csize %d, dsize %d \n",
313 ref_offset(ref), je32_to_cpu(node.i.totlen), je32_to_cpu(node.i.ino),
314 je32_to_cpu(node.i.version), je32_to_cpu(node.i.isize),
315 je32_to_cpu(node.i.csize), je32_to_cpu(node.i.dsize));
316 jffs2_mark_node_obsolete(c, ref);
317 spin_lock(&c->erase_completion_lock);
318 continue;
319 }
320
321 if (node.i.compr != JFFS2_COMPR_ZERO && je32_to_cpu(node.i.csize)) {
322 unsigned char *buf=NULL;
323 uint32_t pointed = 0;
324#ifndef __ECOS
325 if (c->mtd->point) {
326 err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize),
327 &retlen, &buf);
328 if (!err && retlen < je32_to_cpu(node.i.csize)) {
329 D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen));
330 c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize));
331 } else if (err){
332 D1(printk(KERN_DEBUG "MTD point failed %d\n", err));
333 } else
334 pointed = 1; /* succefully pointed to device */
335 }
336#endif
337 if(!pointed){
338 buf = kmalloc(je32_to_cpu(node.i.csize), GFP_KERNEL);
339 if (!buf)
340 return -ENOMEM;
341
342 err = jffs2_flash_read(c, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize),
343 &retlen, buf);
344 if (!err && retlen != je32_to_cpu(node.i.csize))
345 err = -EIO;
346 if (err) {
347 kfree(buf);
348 return err;
349 }
350 }
351 crc = crc32(0, buf, je32_to_cpu(node.i.csize));
352 if(!pointed)
353 kfree(buf);
354#ifndef __ECOS
355 else
356 c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize));
357#endif
358
359 if (crc != je32_to_cpu(node.i.data_crc)) {
360 printk(KERN_NOTICE "jffs2_get_inode_nodes(): Data CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
361 ref_offset(ref), je32_to_cpu(node.i.data_crc), crc);
362 jffs2_mark_node_obsolete(c, ref);
363 spin_lock(&c->erase_completion_lock);
364 continue;
365 }
366
367 }
368
369 /* Mark the node as having been checked and fix the accounting accordingly */
370 spin_lock(&c->erase_completion_lock);
371 jeb = &c->blocks[ref->flash_offset / c->sector_size];
372 len = ref_totlen(c, jeb, ref);
373
374 jeb->used_size += len;
375 jeb->unchecked_size -= len;
376 c->used_size += len;
377 c->unchecked_size -= len;
378
379 /* If node covers at least a whole page, or if it starts at the
380 beginning of a page and runs to the end of the file, or if
381 it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
382
383 If it's actually overlapped, it'll get made NORMAL (or OBSOLETE)
384 when the overlapping node(s) get added to the tree anyway.
385 */
386 if ((je32_to_cpu(node.i.dsize) >= PAGE_CACHE_SIZE) ||
387 ( ((je32_to_cpu(node.i.offset)&(PAGE_CACHE_SIZE-1))==0) &&
388 (je32_to_cpu(node.i.dsize)+je32_to_cpu(node.i.offset) == je32_to_cpu(node.i.isize)))) {
389 D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_PRISTINE\n", ref_offset(ref)));
390 ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
391 } else {
392 D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_NORMAL\n", ref_offset(ref)));
393 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
394 }
395 spin_unlock(&c->erase_completion_lock);
396 }
397
398 tn = jffs2_alloc_tmp_dnode_info();
399 if (!tn) {
400 D1(printk(KERN_DEBUG "alloc tn failed\n"));
401 err = -ENOMEM;
402 goto free_out;
403 }
404
405 tn->fn = jffs2_alloc_full_dnode();
406 if (!tn->fn) {
407 D1(printk(KERN_DEBUG "alloc fn failed\n"));
408 err = -ENOMEM;
409 jffs2_free_tmp_dnode_info(tn);
410 goto free_out;
411 }
412 tn->version = je32_to_cpu(node.i.version);
413 tn->fn->ofs = je32_to_cpu(node.i.offset);
414 /* There was a bug where we wrote hole nodes out with
415 csize/dsize swapped. Deal with it */
416 if (node.i.compr == JFFS2_COMPR_ZERO && !je32_to_cpu(node.i.dsize) && je32_to_cpu(node.i.csize))
417 tn->fn->size = je32_to_cpu(node.i.csize);
418 else // normal case...
419 tn->fn->size = je32_to_cpu(node.i.dsize);
420 tn->fn->raw = ref;
421 D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %04x, dsize %04x\n",
422 ref_offset(ref), je32_to_cpu(node.i.version),
423 je32_to_cpu(node.i.offset), je32_to_cpu(node.i.dsize)));
424 jffs2_add_tn_to_list(tn, &ret_tn);
425 break;
426
427 default:
428 if (ref_flags(ref) == REF_UNCHECKED) {
429 struct jffs2_eraseblock *jeb;
430 uint32_t len;
431
432 printk(KERN_ERR "Eep. Unknown node type %04x at %08x was marked REF_UNCHECKED\n",
433 je16_to_cpu(node.u.nodetype), ref_offset(ref));
434
435 /* Mark the node as having been checked and fix the accounting accordingly */
436 spin_lock(&c->erase_completion_lock);
437 jeb = &c->blocks[ref->flash_offset / c->sector_size];
438 len = ref_totlen(c, jeb, ref);
439
440 jeb->used_size += len;
441 jeb->unchecked_size -= len;
442 c->used_size += len;
443 c->unchecked_size -= len;
444
445 mark_ref_normal(ref);
446 spin_unlock(&c->erase_completion_lock);
447 }
448 node.u.nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(node.u.nodetype));
449 if (crc32(0, &node, sizeof(struct jffs2_unknown_node)-4) != je32_to_cpu(node.u.hdr_crc)) {
450 /* Hmmm. This should have been caught at scan time. */
451 printk(KERN_ERR "Node header CRC failed at %08x. But it must have been OK earlier.\n",
452 ref_offset(ref));
453 printk(KERN_ERR "Node was: { %04x, %04x, %08x, %08x }\n",
454 je16_to_cpu(node.u.magic), je16_to_cpu(node.u.nodetype), je32_to_cpu(node.u.totlen),
455 je32_to_cpu(node.u.hdr_crc));
456 jffs2_mark_node_obsolete(c, ref);
457 } else switch(je16_to_cpu(node.u.nodetype) & JFFS2_COMPAT_MASK) {
458 case JFFS2_FEATURE_INCOMPAT:
459 printk(KERN_NOTICE "Unknown INCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
460 /* EEP */
461 BUG();
462 break;
463 case JFFS2_FEATURE_ROCOMPAT:
464 printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
465 if (!(c->flags & JFFS2_SB_FLAG_RO))
466 BUG();
467 break;
468 case JFFS2_FEATURE_RWCOMPAT_COPY:
469 printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
470 break;
471 case JFFS2_FEATURE_RWCOMPAT_DELETE:
472 printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref));
473 jffs2_mark_node_obsolete(c, ref);
474 break;
475 }
476
477 }
478 spin_lock(&c->erase_completion_lock);
479
480 }
481 spin_unlock(&c->erase_completion_lock);
482 *tnp = ret_tn;
483 *fdp = ret_fd;
484
485 return 0;
486
487 free_out:
David Woodhouse9dee7502005-07-05 22:03:10 +0100488 jffs2_free_tmp_dnode_info_list(&ret_tn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 jffs2_free_full_dirent_list(ret_fd);
490 return err;
491}
492
493void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state)
494{
495 spin_lock(&c->inocache_lock);
496 ic->state = state;
497 wake_up(&c->inocache_wq);
498 spin_unlock(&c->inocache_lock);
499}
500
501/* During mount, this needs no locking. During normal operation, its
502 callers want to do other stuff while still holding the inocache_lock.
503 Rather than introducing special case get_ino_cache functions or
504 callbacks, we just let the caller do the locking itself. */
505
506struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
507{
508 struct jffs2_inode_cache *ret;
509
510 D2(printk(KERN_DEBUG "jffs2_get_ino_cache(): ino %u\n", ino));
511
512 ret = c->inocache_list[ino % INOCACHE_HASHSIZE];
513 while (ret && ret->ino < ino) {
514 ret = ret->next;
515 }
516
517 if (ret && ret->ino != ino)
518 ret = NULL;
519
520 D2(printk(KERN_DEBUG "jffs2_get_ino_cache found %p for ino %u\n", ret, ino));
521 return ret;
522}
523
524void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new)
525{
526 struct jffs2_inode_cache **prev;
Thomas Gleixner7d27c812005-05-22 21:47:19 +0200527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 spin_lock(&c->inocache_lock);
Thomas Gleixner7d27c812005-05-22 21:47:19 +0200529 if (!new->ino)
530 new->ino = ++c->highest_ino;
531
532 D2(printk(KERN_DEBUG "jffs2_add_ino_cache: Add %p (ino #%u)\n", new, new->ino));
533
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE];
535
536 while ((*prev) && (*prev)->ino < new->ino) {
537 prev = &(*prev)->next;
538 }
539 new->next = *prev;
540 *prev = new;
541
542 spin_unlock(&c->inocache_lock);
543}
544
545void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old)
546{
547 struct jffs2_inode_cache **prev;
David Woodhouse67e345d2005-02-27 23:01:36 +0000548 D1(printk(KERN_DEBUG "jffs2_del_ino_cache: Del %p (ino #%u)\n", old, old->ino));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 spin_lock(&c->inocache_lock);
550
551 prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE];
552
553 while ((*prev) && (*prev)->ino < old->ino) {
554 prev = &(*prev)->next;
555 }
556 if ((*prev) == old) {
557 *prev = old->next;
558 }
559
David Woodhouse67e345d2005-02-27 23:01:36 +0000560 /* Free it now unless it's in READING or CLEARING state, which
561 are the transitions upon read_inode() and clear_inode(). The
562 rest of the time we know nobody else is looking at it, and
563 if it's held by read_inode() or clear_inode() they'll free it
564 for themselves. */
565 if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING)
566 jffs2_free_inode_cache(old);
567
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 spin_unlock(&c->inocache_lock);
569}
570
571void jffs2_free_ino_caches(struct jffs2_sb_info *c)
572{
573 int i;
574 struct jffs2_inode_cache *this, *next;
575
576 for (i=0; i<INOCACHE_HASHSIZE; i++) {
577 this = c->inocache_list[i];
578 while (this) {
579 next = this->next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 jffs2_free_inode_cache(this);
581 this = next;
582 }
583 c->inocache_list[i] = NULL;
584 }
585}
586
587void jffs2_free_raw_node_refs(struct jffs2_sb_info *c)
588{
589 int i;
590 struct jffs2_raw_node_ref *this, *next;
591
592 for (i=0; i<c->nr_blocks; i++) {
593 this = c->blocks[i].first_node;
594 while(this) {
595 next = this->next_phys;
596 jffs2_free_raw_node_ref(this);
597 this = next;
598 }
599 c->blocks[i].first_node = c->blocks[i].last_node = NULL;
600 }
601}
602
603struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset)
604{
605 /* The common case in lookup is that there will be a node
606 which precisely matches. So we go looking for that first */
607 struct rb_node *next;
608 struct jffs2_node_frag *prev = NULL;
609 struct jffs2_node_frag *frag = NULL;
610
611 D2(printk(KERN_DEBUG "jffs2_lookup_node_frag(%p, %d)\n", fragtree, offset));
612
613 next = fragtree->rb_node;
614
615 while(next) {
616 frag = rb_entry(next, struct jffs2_node_frag, rb);
617
618 D2(printk(KERN_DEBUG "Considering frag %d-%d (%p). left %p, right %p\n",
619 frag->ofs, frag->ofs+frag->size, frag, frag->rb.rb_left, frag->rb.rb_right));
620 if (frag->ofs + frag->size <= offset) {
621 D2(printk(KERN_DEBUG "Going right from frag %d-%d, before the region we care about\n",
622 frag->ofs, frag->ofs+frag->size));
623 /* Remember the closest smaller match on the way down */
624 if (!prev || frag->ofs > prev->ofs)
625 prev = frag;
626 next = frag->rb.rb_right;
627 } else if (frag->ofs > offset) {
628 D2(printk(KERN_DEBUG "Going left from frag %d-%d, after the region we care about\n",
629 frag->ofs, frag->ofs+frag->size));
630 next = frag->rb.rb_left;
631 } else {
632 D2(printk(KERN_DEBUG "Returning frag %d,%d, matched\n",
633 frag->ofs, frag->ofs+frag->size));
634 return frag;
635 }
636 }
637
638 /* Exact match not found. Go back up looking at each parent,
639 and return the closest smaller one */
640
641 if (prev)
642 D2(printk(KERN_DEBUG "No match. Returning frag %d,%d, closest previous\n",
643 prev->ofs, prev->ofs+prev->size));
644 else
645 D2(printk(KERN_DEBUG "Returning NULL, empty fragtree\n"));
646
647 return prev;
648}
649
650/* Pass 'c' argument to indicate that nodes should be marked obsolete as
651 they're killed. */
652void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c)
653{
654 struct jffs2_node_frag *frag;
655 struct jffs2_node_frag *parent;
656
657 if (!root->rb_node)
658 return;
659
660 frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb));
661
662 while(frag) {
663 if (frag->rb.rb_left) {
664 D2(printk(KERN_DEBUG "Going left from frag (%p) %d-%d\n",
665 frag, frag->ofs, frag->ofs+frag->size));
666 frag = frag_left(frag);
667 continue;
668 }
669 if (frag->rb.rb_right) {
670 D2(printk(KERN_DEBUG "Going right from frag (%p) %d-%d\n",
671 frag, frag->ofs, frag->ofs+frag->size));
672 frag = frag_right(frag);
673 continue;
674 }
675
676 D2(printk(KERN_DEBUG "jffs2_kill_fragtree: frag at 0x%x-0x%x: node %p, frags %d--\n",
677 frag->ofs, frag->ofs+frag->size, frag->node,
678 frag->node?frag->node->frags:0));
679
680 if (frag->node && !(--frag->node->frags)) {
681 /* Not a hole, and it's the final remaining frag
682 of this node. Free the node */
683 if (c)
684 jffs2_mark_node_obsolete(c, frag->node->raw);
685
686 jffs2_free_full_dnode(frag->node);
687 }
688 parent = frag_parent(frag);
689 if (parent) {
690 if (frag_left(parent) == frag)
691 parent->rb.rb_left = NULL;
692 else
693 parent->rb.rb_right = NULL;
694 }
695
696 jffs2_free_node_frag(frag);
697 frag = parent;
698
699 cond_resched();
700 }
701}
702
703void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base)
704{
705 struct rb_node *parent = &base->rb;
706 struct rb_node **link = &parent;
707
708 D2(printk(KERN_DEBUG "jffs2_fragtree_insert(%p; %d-%d, %p)\n", newfrag,
709 newfrag->ofs, newfrag->ofs+newfrag->size, base));
710
711 while (*link) {
712 parent = *link;
713 base = rb_entry(parent, struct jffs2_node_frag, rb);
714
715 D2(printk(KERN_DEBUG "fragtree_insert considering frag at 0x%x\n", base->ofs));
716 if (newfrag->ofs > base->ofs)
717 link = &base->rb.rb_right;
718 else if (newfrag->ofs < base->ofs)
719 link = &base->rb.rb_left;
720 else {
721 printk(KERN_CRIT "Duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base);
722 BUG();
723 }
724 }
725
726 rb_link_node(&newfrag->rb, &base->rb, link);
727}