blob: 488787a823b6d32667eee9ebf37f7014ffe4bd55 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010010 * $Id: readinode.c,v 1.135 2005/08/01 12:05:19 dedekind Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 */
13
14#include <linux/kernel.h>
Andrew Lunn737b7662005-07-30 16:29:30 +010015#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/slab.h>
17#include <linux/fs.h>
18#include <linux/crc32.h>
19#include <linux/pagemap.h>
20#include <linux/mtd/mtd.h>
21#include <linux/compiler.h>
22#include "nodelist.h"
23
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010024/*
25 * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010026 * order of increasing version.
27 */
28static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010030 struct rb_node **p = &list->rb_node;
31 struct rb_node * parent = NULL;
32 struct jffs2_tmp_dnode_info *this;
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010034 while (*p) {
35 parent = *p;
36 this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010038 /* There may actually be a collision here, but it doesn't
39 actually matter. As long as the two nodes with the same
40 version are together, it's all fine. */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010041 if (tn->version > this->version)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010042 p = &(*p)->rb_left;
43 else
44 p = &(*p)->rb_right;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010045 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010047 rb_link_node(&tn->rb, parent, p);
48 rb_insert_color(&tn->rb, list);
49}
50
51static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
52{
53 struct rb_node *this;
54 struct jffs2_tmp_dnode_info *tn;
55
56 this = list->rb_node;
57
58 /* Now at bottom of tree */
59 while (this) {
60 if (this->rb_left)
61 this = this->rb_left;
62 else if (this->rb_right)
63 this = this->rb_right;
64 else {
65 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
66 jffs2_free_full_dnode(tn->fn);
67 jffs2_free_tmp_dnode_info(tn);
68
69 this = this->rb_parent;
70 if (!this)
71 break;
72
73 if (this->rb_left == &tn->rb)
74 this->rb_left = NULL;
75 else if (this->rb_right == &tn->rb)
76 this->rb_right = NULL;
77 else BUG();
78 }
79 }
80 list->rb_node = NULL;
81}
82
83static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
84{
85 struct jffs2_full_dirent *next;
86
87 while (fd) {
88 next = fd->next;
89 jffs2_free_full_dirent(fd);
90 fd = next;
91 }
92}
93
94/* Returns first valid node after 'ref'. May return 'ref' */
95static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
96{
97 while (ref && ref->next_in_ino) {
98 if (!ref_obsolete(ref))
99 return ref;
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100100 JFFS2_DBG_NODEREF("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100101 ref = ref->next_in_ino;
102 }
103 return NULL;
104}
105
106/*
107 * Helper function for jffs2_get_inode_nodes().
108 * It is called every time an directory entry node is found.
109 *
110 * Returns: 0 on succes;
111 * 1 if the node should be marked obsolete;
112 * negative error code on failure.
113 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100114static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
115 struct jffs2_raw_dirent *rd, uint32_t read, struct jffs2_full_dirent **fdp,
116 uint32_t *latest_mctime, uint32_t *mctime_ver)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100117{
118 struct jffs2_full_dirent *fd;
119
120 /* The direntry nodes are checked during the flash scanning */
121 BUG_ON(ref_flags(ref) == REF_UNCHECKED);
122 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
123 BUG_ON(ref_obsolete(ref));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100125 /* Sanity check */
126 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100127 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100128 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
129 return 1;
130 }
131
132 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
133 if (unlikely(!fd))
134 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100136 fd->raw = ref;
137 fd->version = je32_to_cpu(rd->version);
138 fd->ino = je32_to_cpu(rd->ino);
139 fd->type = rd->type;
140
141 /* Pick out the mctime of the latest dirent */
142 if(fd->version > *mctime_ver) {
143 *mctime_ver = fd->version;
144 *latest_mctime = je32_to_cpu(rd->mctime);
145 }
146
147 /*
148 * Copy as much of the name as possible from the raw
149 * dirent we've already read from the flash.
150 */
151 if (read > sizeof(*rd))
152 memcpy(&fd->name[0], &rd->name[0],
153 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
154
155 /* Do we need to copy any more of the name directly from the flash? */
156 if (rd->nsize + sizeof(*rd) > read) {
157 /* FIXME: point() */
158 int err;
159 int already = read - sizeof(*rd);
160
161 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
162 rd->nsize - already, &read, &fd->name[already]);
163 if (unlikely(read != rd->nsize - already) && likely(!err))
164 return -EIO;
165
166 if (unlikely(err)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100167 JFFS2_ERROR("read remainder of name: error %d\n", err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100168 jffs2_free_full_dirent(fd);
169 return -EIO;
170 }
171 }
172
173 fd->nhash = full_name_hash(fd->name, rd->nsize);
174 fd->next = NULL;
175 fd->name[rd->nsize] = '\0';
176
177 /*
178 * Wheee. We now have a complete jffs2_full_dirent structure, with
179 * the name in it and everything. Link it into the list
180 */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100181 jffs2_add_fd_to_list(c, fd, fdp);
182
183 return 0;
184}
185
186/*
187 * Helper function for jffs2_get_inode_nodes().
188 * It is called every time an inode node is found.
189 *
190 * Returns: 0 on succes;
191 * 1 if the node should be marked obsolete;
192 * negative error code on failure.
193 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100194static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
195 struct jffs2_raw_inode *rd, struct rb_root *tnp, int rdlen,
196 uint32_t *latest_mctime, uint32_t *mctime_ver)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100197{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100198 struct jffs2_tmp_dnode_info *tn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100199 uint32_t len, csize;
200 int ret = 1;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100201
202 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
203 BUG_ON(ref_obsolete(ref));
204
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100205 tn = jffs2_alloc_tmp_dnode_info();
206 if (!tn) {
207 JFFS2_ERROR("failed to allocate tn (%d bytes).\n", sizeof(*tn));
208 return -ENOMEM;
209 }
210
211 tn->partial_crc = 0;
212 csize = je32_to_cpu(rd->csize);
213
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100214 /* If we've never checked the CRCs on this node, check them now */
215 if (ref_flags(ref) == REF_UNCHECKED) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100216 uint32_t crc;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100217
218 crc = crc32(0, rd, sizeof(*rd) - 8);
219 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100220 JFFS2_NOTICE("header CRC failed on node at %#08x: read %#08x, calculated %#08x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100221 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100222 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100223 }
224
225 /* Sanity checks */
226 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
227 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100228 JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
Andrew Lunn737b7662005-07-30 16:29:30 +0100229 jffs2_dbg_dump_node(c, ref_offset(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100230 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100231 }
232
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100233 if (jffs2_is_writebuffered(c) && csize != 0) {
234 /* At this point we are supposed to check the data CRC
235 * of our unchecked node. But thus far, we do not
236 * know whether the node is valid or obsolete. To
237 * figure this out, we need to walk all the nodes of
238 * the inode and build the inode fragtree. We don't
239 * want to spend time checking data of nodes which may
240 * later be found to be obsolete. So we put off the full
241 * data CRC checking until we have read all the inode
242 * nodes and have started building the fragtree.
243 *
244 * The fragtree is being built starting with nodes
245 * having the highest version number, so we'll be able
246 * to detect whether a node is valid (i.e., it is not
247 * overlapped by a node with higher version) or not.
248 * And we'll be able to check only those nodes, which
249 * are not obsolete.
250 *
251 * Of course, this optimization only makes sense in case
252 * of NAND flashes (or other flashes whith
253 * !jffs2_can_mark_obsolete()), since on NOR flashes
254 * nodes are marked obsolete physically.
255 *
256 * Since NAND flashes (or other flashes with
257 * jffs2_is_writebuffered(c)) are anyway read by
258 * fractions of c->wbuf_pagesize, and we have just read
259 * the node header, it is likely that the starting part
260 * of the node data is also read when we read the
261 * header. So we don't mind to check the CRC of the
262 * starting part of the data of the node now, and check
263 * the second part later (in jffs2_check_node_data()).
264 * Of course, we will not need to re-read and re-check
265 * the NAND page which we have just read. This is why we
266 * read the whole NAND page at jffs2_get_inode_nodes(),
267 * while we needed only the node header.
268 */
269 unsigned char *buf;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100270
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100271 /* 'buf' will point to the start of data */
272 buf = (unsigned char *)rd + sizeof(*rd);
273 /* len will be the read data length */
274 len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100275
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100276 if (len)
277 tn->partial_crc = crc = crc32(0, buf, len);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100278
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100279 /* If we actually calculated the whole data CRC
280 * and it is wrong, drop the node. */
281 if (unlikely(tn->partial_crc
282 != je32_to_cpu(rd->data_crc)) &&
283 len == csize)
284 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100285
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100286 } else if (csize == 0) {
287 /*
288 * We checked the header CRC. If the node has no data, adjust
289 * the space accounting now. For other nodes this will be done
290 * later either when the node is marked obsolete or when its
291 * data is checked.
292 */
293 struct jffs2_eraseblock *jeb;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100294
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100295 JFFS2_DBG_READINODE("the node has no data.\n");
296 jeb = &c->blocks[ref->flash_offset / c->sector_size];
297 len = ref_totlen(c, jeb, ref);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100298
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100299 spin_lock(&c->erase_completion_lock);
300 jeb->used_size += len;
301 jeb->unchecked_size -= len;
302 c->used_size += len;
303 c->unchecked_size -= len;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100304 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100305 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100306 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100307 }
308
309 tn->fn = jffs2_alloc_full_dnode();
310 if (!tn->fn) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100311 JFFS2_ERROR("alloc fn failed\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100312 ret = -ENOMEM;
313 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100314 }
315
316 tn->version = je32_to_cpu(rd->version);
317 tn->fn->ofs = je32_to_cpu(rd->offset);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100318 tn->data_crc = je32_to_cpu(rd->data_crc);
319 tn->csize = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100320 tn->fn->raw = ref;
321
322 /* There was a bug where we wrote hole nodes out with
323 csize/dsize swapped. Deal with it */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100324 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
325 tn->fn->size = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100326 else // normal case...
327 tn->fn->size = je32_to_cpu(rd->dsize);
328
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100329 JFFS2_DBG_READINODE("dnode @%08x: ver %u, offset %#04x, dsize %#04x\n",
330 ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100331
332 jffs2_add_tn_to_tree(tn, tnp);
333
334 return 0;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100335
336free_out:
337 jffs2_free_tmp_dnode_info(tn);
338 return ret;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100339}
340
341/*
342 * Helper function for jffs2_get_inode_nodes().
343 * It is called every time an unknown node is found.
344 *
345 * Returns: 0 on succes;
346 * 1 if the node should be marked obsolete;
347 * negative error code on failure.
348 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100349static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100350{
351 /* We don't mark unknown nodes as REF_UNCHECKED */
352 BUG_ON(ref_flags(ref) == REF_UNCHECKED);
353
354 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
355
356 if (crc32(0, un, sizeof(struct jffs2_unknown_node) - 4) != je32_to_cpu(un->hdr_crc)) {
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100357 /* Hmmm. This should have been caught at scan time. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100358 JFFS2_NOTICE("node header CRC failed at %#08x. But it must have been OK earlier.\n", ref_offset(ref));
Andrew Lunn737b7662005-07-30 16:29:30 +0100359 jffs2_dbg_dump_node(c, ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100360 return 1;
361 } else {
362 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
363
364 case JFFS2_FEATURE_INCOMPAT:
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100365 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
366 je16_to_cpu(un->nodetype), ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100367 /* EEP */
368 BUG();
369 break;
370
371 case JFFS2_FEATURE_ROCOMPAT:
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100372 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100373 je16_to_cpu(un->nodetype), ref_offset(ref));
374 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
375 break;
376
377 case JFFS2_FEATURE_RWCOMPAT_COPY:
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100378 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100379 je16_to_cpu(un->nodetype), ref_offset(ref));
380 break;
381
382 case JFFS2_FEATURE_RWCOMPAT_DELETE:
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100383 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100384 je16_to_cpu(un->nodetype), ref_offset(ref));
385 return 1;
386 }
387 }
388
389 return 0;
390}
391
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100392/*
393 * Helper function for jffs2_get_inode_nodes().
394 * The function detects whether more data should be read and reads it if yes.
395 *
396 * Returns: 0 on succes;
397 * negative error code on failure.
398 */
399static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
400 int right_size, int *rdlen, unsigned char *buf, unsigned char *bufstart)
401{
402 int right_len, err, len;
403 size_t retlen;
404 uint32_t offs;
405
406 if (jffs2_is_writebuffered(c)) {
407 right_len = c->wbuf_pagesize - (bufstart - buf);
408 if (right_size + (int)(bufstart - buf) > c->wbuf_pagesize)
409 right_len += c->wbuf_pagesize;
410 } else
411 right_len = right_size;
412
413 if (*rdlen == right_len)
414 return 0;
415
416 /* We need to read more data */
417 offs = ref_offset(ref) + *rdlen;
418 if (jffs2_is_writebuffered(c)) {
419 bufstart = buf + c->wbuf_pagesize;
420 len = c->wbuf_pagesize;
421 } else {
422 bufstart = buf + *rdlen;
423 len = right_size - *rdlen;
424 }
425
426 JFFS2_DBG_READINODE("read more %d bytes.", len);
427
428 err = jffs2_flash_read(c, offs, len, &retlen, bufstart);
429 if (err) {
430 JFFS2_ERROR("can not read %d bytes from 0x%08x, "
431 "error code: %d.\n", len, offs, err);
432 return err;
433 }
434
435 if (retlen < len) {
436 JFFS2_ERROR("short read at %#08x: %d instead of %d.\n",
437 offs, retlen, len);
438 return -EIO;
439 }
440
441 *rdlen = right_len;
442
443 return 0;
444}
445
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100446/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
447 with this ino, returning the former in order of version */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100448static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
449 struct rb_root *tnp, struct jffs2_full_dirent **fdp,
450 uint32_t *highest_version, uint32_t *latest_mctime,
451 uint32_t *mctime_ver)
452{
453 struct jffs2_raw_node_ref *ref, *valid_ref;
454 struct rb_root ret_tn = RB_ROOT;
455 struct jffs2_full_dirent *ret_fd = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100456 unsigned char *buf = NULL;
457 union jffs2_node_union *node;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100458 size_t retlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100459 int len, err;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100460
461 *mctime_ver = 0;
462
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100463 JFFS2_DBG_READINODE("ino #%u\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100464
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100465 if (jffs2_is_writebuffered(c)) {
466 /*
467 * If we have the write buffer, we assume the minimal I/O unit
468 * is c->wbuf_pagesize. We implement some optimizations which in
469 * this case and we need a temporary buffer of size =
470 * 2*c->wbuf_pagesize bytes (see comments in read_dnode()).
471 * Basically, we want to read not only the node header, but the
472 * whole wbuf (NAND page in case of NAND) or 2, if the node
473 * header overlaps the border between the 2 wbufs.
474 */
475 len = 2*c->wbuf_pagesize;
476 } else {
477 /*
478 * When there is no write buffer, the size of the temporary
479 * buffer is the size of the larges node header.
480 */
481 len = sizeof(union jffs2_node_union);
482 }
483
484 /* FIXME: in case of NOR and available ->point() this
485 * needs to be fixed. */
486 buf = kmalloc(len, GFP_KERNEL);
487 if (!buf)
488 return -ENOMEM;
489
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100490 spin_lock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100491 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100492 if (!valid_ref && f->inocache->ino != 1)
493 JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100494 while (valid_ref) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100495 unsigned char *bufstart;
496
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100497 /* We can hold a pointer to a non-obsolete node without the spinlock,
498 but _obsolete_ nodes may disappear at any time, if the block
499 they're in gets erased. So if we mark 'ref' obsolete while we're
500 not holding the lock, it can go away immediately. For that reason,
501 we find the next valid node first, before processing 'ref'.
502 */
503 ref = valid_ref;
504 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
505 spin_unlock(&c->erase_completion_lock);
506
507 cond_resched();
508
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100509 /*
510 * At this point we don't know the type of the node we're going
511 * to read, so we do not know the size of its header. In order
512 * to minimize the amount of flash IO we assume the node has
513 * size = JFFS2_MIN_NODE_HEADER.
514 */
515 if (jffs2_is_writebuffered(c)) {
516 /*
517 * We treat 'buf' as 2 adjacent wbufs. We want to
518 * adjust bufstart such as it points to the
519 * beginning of the node within this wbuf.
520 */
521 bufstart = buf + (ref_offset(ref) % c->wbuf_pagesize);
522 /* We will read either one wbuf or 2 wbufs. */
523 len = c->wbuf_pagesize - (bufstart - buf);
524 if (JFFS2_MIN_NODE_HEADER +
525 (int)(bufstart - buf) > c->wbuf_pagesize) {
526 /* The header spans the border of the
527 * first wbuf */
528 len += c->wbuf_pagesize;
529 }
530 } else {
531 bufstart = buf;
532 len = JFFS2_MIN_NODE_HEADER;
533 }
534
535 JFFS2_DBG_READINODE("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
536
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100537 /* FIXME: point() */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100538 err = jffs2_flash_read(c, ref_offset(ref), len,
539 &retlen, bufstart);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100540 if (err) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100541 JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100542 goto free_out;
543 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100544
545 if (retlen < len) {
546 JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ref_offset(ref), retlen, len);
547 err = -EIO;
548 goto free_out;
549 }
550
551 node = (union jffs2_node_union *)bufstart;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100552
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100553 switch (je16_to_cpu(node->u.nodetype)) {
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100554
555 case JFFS2_NODETYPE_DIRENT:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100556
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100557 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) {
558 err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf, bufstart);
559 if (unlikely(err))
560 goto free_out;
561 }
562
563 err = read_direntry(c, ref, &node->d, retlen, &ret_fd, latest_mctime, mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100564 if (err == 1) {
565 jffs2_mark_node_obsolete(c, ref);
566 break;
567 } else if (unlikely(err))
568 goto free_out;
569
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100570 if (je32_to_cpu(node->d.version) > *highest_version)
571 *highest_version = je32_to_cpu(node->d.version);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100572
573 break;
574
575 case JFFS2_NODETYPE_INODE:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100576
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100577 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) {
578 err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf, bufstart);
579 if (unlikely(err))
580 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100581 }
582
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100583 err = read_dnode(c, ref, &node->i, &ret_tn, len, latest_mctime, mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100584 if (err == 1) {
585 jffs2_mark_node_obsolete(c, ref);
586 break;
587 } else if (unlikely(err))
588 goto free_out;
589
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100590 if (je32_to_cpu(node->i.version) > *highest_version)
591 *highest_version = je32_to_cpu(node->i.version);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 break;
594
595 default:
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100596 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) {
597 err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf, bufstart);
598 if (unlikely(err))
599 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100600 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100601
602 err = read_unknown(c, ref, &node->u);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100603 if (err == 1) {
604 jffs2_mark_node_obsolete(c, ref);
605 break;
606 } else if (unlikely(err))
607 goto free_out;
608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100610 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100612
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100613 spin_unlock(&c->erase_completion_lock);
614 *tnp = ret_tn;
615 *fdp = ret_fd;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100616 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100618 JFFS2_DBG_READINODE("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
619 f->inocache->ino, *highest_version, *latest_mctime, *mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100620 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100622 free_out:
623 jffs2_free_tmp_dnode_info_list(&ret_tn);
624 jffs2_free_full_dirent_list(ret_fd);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100625 kfree(buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100626 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627}
628
629static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
630 struct jffs2_inode_info *f,
631 struct jffs2_raw_inode *latest_node)
632{
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100633 struct jffs2_tmp_dnode_info *tn;
David Woodhouse9dee7502005-07-05 22:03:10 +0100634 struct rb_root tn_list;
635 struct rb_node *rb, *repl_rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 struct jffs2_full_dirent *fd_list;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100637 struct jffs2_full_dnode *fn, *first_fn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 uint32_t crc;
639 uint32_t latest_mctime, mctime_ver;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 size_t retlen;
641 int ret;
642
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100643 JFFS2_DBG_READINODE("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
645 /* Grab all nodes relevant to this ino */
646 ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver);
647
648 if (ret) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100649 JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 if (f->inocache->state == INO_STATE_READING)
651 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
652 return ret;
653 }
654 f->dents = fd_list;
655
David Woodhouse9dee7502005-07-05 22:03:10 +0100656 rb = rb_first(&tn_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
David Woodhouse9dee7502005-07-05 22:03:10 +0100658 while (rb) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100659 cond_resched();
David Woodhouse9dee7502005-07-05 22:03:10 +0100660 tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 fn = tn->fn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100662 ret = 1;
663 JFFS2_DBG_READINODE("consider node ver %u, phys offset "
664 "%#08x(%d), range %u-%u.\n", tn->version,
665 ref_offset(fn->raw), ref_flags(fn->raw),
666 fn->ofs, fn->ofs + fn->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
668 if (fn->size) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100669 ret = jffs2_add_older_frag_to_fragtree(c, f, tn);
670 /* TODO: the error code isn't checked, check it */
671 jffs2_dbg_fragtree_paranoia_check_nolock(f);
672 BUG_ON(ret < 0);
673 if (!first_fn && ret == 0)
674 first_fn = fn;
675 } else if (!first_fn) {
676 first_fn = fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 f->metadata = fn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100678 ret = 0; /* Prevent freeing the metadata update node */
679 } else
680 jffs2_mark_node_obsolete(c, fn->raw);
681
David Woodhouse9dee7502005-07-05 22:03:10 +0100682 BUG_ON(rb->rb_left);
David Woodhouse9dee7502005-07-05 22:03:10 +0100683 if (rb->rb_parent && rb->rb_parent->rb_left == rb) {
684 /* We were then left-hand child of our parent. We need
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100685 * to move our own right-hand child into our place. */
David Woodhouse9dee7502005-07-05 22:03:10 +0100686 repl_rb = rb->rb_right;
687 if (repl_rb)
688 repl_rb->rb_parent = rb->rb_parent;
689 } else
690 repl_rb = NULL;
691
692 rb = rb_next(rb);
693
694 /* Remove the spent tn from the tree; don't bother rebalancing
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100695 * but put our right-hand child in our own place. */
David Woodhouse9dee7502005-07-05 22:03:10 +0100696 if (tn->rb.rb_parent) {
697 if (tn->rb.rb_parent->rb_left == &tn->rb)
698 tn->rb.rb_parent->rb_left = repl_rb;
699 else if (tn->rb.rb_parent->rb_right == &tn->rb)
700 tn->rb.rb_parent->rb_right = repl_rb;
701 else BUG();
702 } else if (tn->rb.rb_right)
703 tn->rb.rb_right->rb_parent = NULL;
704
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 jffs2_free_tmp_dnode_info(tn);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100706 if (ret) {
707 JFFS2_DBG_READINODE("delete dnode %u-%u.\n",
708 fn->ofs, fn->ofs + fn->size);
709 jffs2_free_full_dnode(fn);
710 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 }
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100712 jffs2_dbg_fragtree_paranoia_check_nolock(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100714 BUG_ON(first_fn && ref_obsolete(first_fn->raw));
715
716 fn = first_fn;
717 if (unlikely(!first_fn)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 /* No data nodes for this inode. */
719 if (f->inocache->ino != 1) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100720 JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 if (!fd_list) {
722 if (f->inocache->state == INO_STATE_READING)
723 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
724 return -EIO;
725 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100726 JFFS2_NOTICE("but it has children so we fake some modes for it\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 }
728 latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
729 latest_node->version = cpu_to_je32(0);
730 latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
731 latest_node->isize = cpu_to_je32(0);
732 latest_node->gid = cpu_to_je16(0);
733 latest_node->uid = cpu_to_je16(0);
734 if (f->inocache->state == INO_STATE_READING)
735 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
736 return 0;
737 }
738
739 ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node);
740 if (ret || retlen != sizeof(*latest_node)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100741 JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
742 ret, retlen, sizeof(*latest_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 /* FIXME: If this fails, there seems to be a memory leak. Find it. */
744 up(&f->sem);
745 jffs2_do_clear_inode(c, f);
746 return ret?ret:-EIO;
747 }
748
749 crc = crc32(0, latest_node, sizeof(*latest_node)-8);
750 if (crc != je32_to_cpu(latest_node->node_crc)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100751 JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
752 f->inocache->ino, ref_offset(fn->raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 up(&f->sem);
754 jffs2_do_clear_inode(c, f);
755 return -EIO;
756 }
757
758 switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
759 case S_IFDIR:
760 if (mctime_ver > je32_to_cpu(latest_node->version)) {
761 /* The times in the latest_node are actually older than
762 mctime in the latest dirent. Cheat. */
763 latest_node->ctime = latest_node->mtime = cpu_to_je32(latest_mctime);
764 }
765 break;
766
767
768 case S_IFREG:
769 /* If it was a regular file, truncate it to the latest node's isize */
Artem B. Bityutskiyf302cd02005-07-24 16:29:59 +0100770 jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 break;
772
773 case S_IFLNK:
774 /* Hack to work around broken isize in old symlink code.
775 Remove this when dwmw2 comes to his senses and stops
776 symlinks from being an entirely gratuitous special
777 case. */
778 if (!je32_to_cpu(latest_node->isize))
779 latest_node->isize = latest_node->dsize;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000780
781 if (f->inocache->state != INO_STATE_CHECKING) {
782 /* Symlink's inode data is the target path. Read it and
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100783 * keep in RAM to facilitate quick follow symlink
784 * operation. */
785 f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
786 if (!f->target) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100787 JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000788 up(&f->sem);
789 jffs2_do_clear_inode(c, f);
790 return -ENOMEM;
791 }
792
793 ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node),
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100794 je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000795
796 if (ret || retlen != je32_to_cpu(latest_node->csize)) {
797 if (retlen != je32_to_cpu(latest_node->csize))
798 ret = -EIO;
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100799 kfree(f->target);
800 f->target = NULL;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000801 up(&f->sem);
802 jffs2_do_clear_inode(c, f);
803 return -ret;
804 }
805
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100806 f->target[je32_to_cpu(latest_node->csize)] = '\0';
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100807 JFFS2_DBG_READINODE("symlink's target '%s' cached\n", f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000808 }
809
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 /* fall through... */
811
812 case S_IFBLK:
813 case S_IFCHR:
814 /* Certain inode types should have only one data node, and it's
815 kept as the metadata node */
816 if (f->metadata) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100817 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 f->inocache->ino, jemode_to_cpu(latest_node->mode));
819 up(&f->sem);
820 jffs2_do_clear_inode(c, f);
821 return -EIO;
822 }
823 if (!frag_first(&f->fragtree)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100824 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 f->inocache->ino, jemode_to_cpu(latest_node->mode));
826 up(&f->sem);
827 jffs2_do_clear_inode(c, f);
828 return -EIO;
829 }
830 /* ASSERT: f->fraglist != NULL */
831 if (frag_next(frag_first(&f->fragtree))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100832 JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 f->inocache->ino, jemode_to_cpu(latest_node->mode));
834 /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
835 up(&f->sem);
836 jffs2_do_clear_inode(c, f);
837 return -EIO;
838 }
839 /* OK. We're happy */
840 f->metadata = frag_first(&f->fragtree)->node;
841 jffs2_free_node_frag(frag_first(&f->fragtree));
842 f->fragtree = RB_ROOT;
843 break;
844 }
845 if (f->inocache->state == INO_STATE_READING)
846 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
847
848 return 0;
849}
850
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100851/* Scan the list of all nodes present for this ino, build map of versions, etc. */
852int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
853 uint32_t ino, struct jffs2_raw_inode *latest_node)
854{
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100855 JFFS2_DBG_READINODE("read inode #%u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100856
857 retry_inocache:
858 spin_lock(&c->inocache_lock);
859 f->inocache = jffs2_get_ino_cache(c, ino);
860
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100861 if (f->inocache) {
862 /* Check its state. We may need to wait before we can use it */
863 switch(f->inocache->state) {
864 case INO_STATE_UNCHECKED:
865 case INO_STATE_CHECKEDABSENT:
866 f->inocache->state = INO_STATE_READING;
867 break;
868
869 case INO_STATE_CHECKING:
870 case INO_STATE_GC:
871 /* If it's in either of these states, we need
872 to wait for whoever's got it to finish and
873 put it back. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100874 JFFS2_DBG_READINODE("waiting for ino #%u in state %d\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100875 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
876 goto retry_inocache;
877
878 case INO_STATE_READING:
879 case INO_STATE_PRESENT:
880 /* Eep. This should never happen. It can
881 happen if Linux calls read_inode() again
882 before clear_inode() has finished though. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100883 JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100884 /* Fail. That's probably better than allowing it to succeed */
885 f->inocache = NULL;
886 break;
887
888 default:
889 BUG();
890 }
891 }
892 spin_unlock(&c->inocache_lock);
893
894 if (!f->inocache && ino == 1) {
895 /* Special case - no root inode on medium */
896 f->inocache = jffs2_alloc_inode_cache();
897 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100898 JFFS2_ERROR("cannot allocate inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100899 return -ENOMEM;
900 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100901 JFFS2_DBG_READINODE("creating inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100902 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
903 f->inocache->ino = f->inocache->nlink = 1;
904 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
905 f->inocache->state = INO_STATE_READING;
906 jffs2_add_ino_cache(c, f->inocache);
907 }
908 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100909 JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100910 return -ENOENT;
911 }
912
913 return jffs2_do_read_inode_internal(c, f, latest_node);
914}
915
916int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
917{
918 struct jffs2_raw_inode n;
919 struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL);
920 int ret;
921
922 if (!f)
923 return -ENOMEM;
924
925 memset(f, 0, sizeof(*f));
926 init_MUTEX_LOCKED(&f->sem);
927 f->inocache = ic;
928
929 ret = jffs2_do_read_inode_internal(c, f, &n);
930 if (!ret) {
931 up(&f->sem);
932 jffs2_do_clear_inode(c, f);
933 }
934 kfree (f);
935 return ret;
936}
937
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
939{
940 struct jffs2_full_dirent *fd, *fds;
941 int deleted;
942
943 down(&f->sem);
944 deleted = f->inocache && !f->inocache->nlink;
945
David Woodhouse67e345d2005-02-27 23:01:36 +0000946 if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
947 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
948
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 if (f->metadata) {
950 if (deleted)
951 jffs2_mark_node_obsolete(c, f->metadata->raw);
952 jffs2_free_full_dnode(f->metadata);
953 }
954
955 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
956
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100957 if (f->target) {
958 kfree(f->target);
959 f->target = NULL;
960 }
961
962 fds = f->dents;
963 while(fds) {
964 fd = fds;
965 fds = fd->next;
966 jffs2_free_full_dirent(fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 }
968
David Woodhouse67e345d2005-02-27 23:01:36 +0000969 if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
David Woodhouse67e345d2005-02-27 23:01:36 +0000971 if (f->inocache->nodes == (void *)f->inocache)
972 jffs2_del_ino_cache(c, f->inocache);
973 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
975 up(&f->sem);
976}