blob: 266423b2709d04be8709164b0dacdbbebc4f22b8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Thomas Gleixner182ec4e2005-11-07 11:16:07 +000010 * $Id: readinode.c,v 1.143 2005/11/07 11:14:41 gleixner Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 */
13
14#include <linux/kernel.h>
Andrew Lunn737b7662005-07-30 16:29:30 +010015#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/slab.h>
17#include <linux/fs.h>
18#include <linux/crc32.h>
19#include <linux/pagemap.h>
20#include <linux/mtd/mtd.h>
21#include <linux/compiler.h>
22#include "nodelist.h"
23
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010024/*
25 * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010026 * order of increasing version.
27 */
28static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010030 struct rb_node **p = &list->rb_node;
31 struct rb_node * parent = NULL;
32 struct jffs2_tmp_dnode_info *this;
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010034 while (*p) {
35 parent = *p;
36 this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010038 /* There may actually be a collision here, but it doesn't
39 actually matter. As long as the two nodes with the same
40 version are together, it's all fine. */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010041 if (tn->version > this->version)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010042 p = &(*p)->rb_left;
43 else
44 p = &(*p)->rb_right;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010045 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010047 rb_link_node(&tn->rb, parent, p);
48 rb_insert_color(&tn->rb, list);
49}
50
51static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
52{
53 struct rb_node *this;
54 struct jffs2_tmp_dnode_info *tn;
55
56 this = list->rb_node;
57
58 /* Now at bottom of tree */
59 while (this) {
60 if (this->rb_left)
61 this = this->rb_left;
62 else if (this->rb_right)
63 this = this->rb_right;
64 else {
65 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
66 jffs2_free_full_dnode(tn->fn);
67 jffs2_free_tmp_dnode_info(tn);
68
David Woodhouse21f1d5f2006-04-21 13:17:57 +010069 this = rb_parent(this);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010070 if (!this)
71 break;
72
73 if (this->rb_left == &tn->rb)
74 this->rb_left = NULL;
75 else if (this->rb_right == &tn->rb)
76 this->rb_right = NULL;
77 else BUG();
78 }
79 }
80 list->rb_node = NULL;
81}
82
83static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
84{
85 struct jffs2_full_dirent *next;
86
87 while (fd) {
88 next = fd->next;
89 jffs2_free_full_dirent(fd);
90 fd = next;
91 }
92}
93
94/* Returns first valid node after 'ref'. May return 'ref' */
95static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
96{
97 while (ref && ref->next_in_ino) {
98 if (!ref_obsolete(ref))
99 return ref;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100100 dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100101 ref = ref->next_in_ino;
102 }
103 return NULL;
104}
105
106/*
107 * Helper function for jffs2_get_inode_nodes().
108 * It is called every time an directory entry node is found.
109 *
110 * Returns: 0 on succes;
111 * 1 if the node should be marked obsolete;
112 * negative error code on failure.
113 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100114static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
Atsushi Nemoto0ef675d2006-03-09 17:33:38 -0800115 struct jffs2_raw_dirent *rd, size_t read, struct jffs2_full_dirent **fdp,
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100116 uint32_t *latest_mctime, uint32_t *mctime_ver)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100117{
118 struct jffs2_full_dirent *fd;
David Woodhouse1046d882006-06-18 22:44:21 +0100119 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000120
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100121 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
122 BUG_ON(ref_obsolete(ref));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000123
David Woodhouse1046d882006-06-18 22:44:21 +0100124 crc = crc32(0, rd, sizeof(*rd) - 8);
125 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
126 JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n",
127 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100128 return 1;
129 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000130
David Woodhouse1046d882006-06-18 22:44:21 +0100131 /* If we've never checked the CRCs on this node, check them now */
132 if (ref_flags(ref) == REF_UNCHECKED) {
133 struct jffs2_eraseblock *jeb;
134 int len;
135
136 /* Sanity check */
137 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
138 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
139 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
140 return 1;
141 }
142
143 jeb = &c->blocks[ref->flash_offset / c->sector_size];
144 len = ref_totlen(c, jeb, ref);
145
146 spin_lock(&c->erase_completion_lock);
147 jeb->used_size += len;
148 jeb->unchecked_size -= len;
149 c->used_size += len;
150 c->unchecked_size -= len;
151 ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
152 spin_unlock(&c->erase_completion_lock);
153 }
154
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100155 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
156 if (unlikely(!fd))
157 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100159 fd->raw = ref;
160 fd->version = je32_to_cpu(rd->version);
161 fd->ino = je32_to_cpu(rd->ino);
162 fd->type = rd->type;
163
164 /* Pick out the mctime of the latest dirent */
Artem B. Bityutskiy3a69e0c2005-08-17 14:46:26 +0100165 if(fd->version > *mctime_ver && je32_to_cpu(rd->mctime)) {
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100166 *mctime_ver = fd->version;
167 *latest_mctime = je32_to_cpu(rd->mctime);
168 }
169
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000170 /*
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100171 * Copy as much of the name as possible from the raw
172 * dirent we've already read from the flash.
173 */
174 if (read > sizeof(*rd))
175 memcpy(&fd->name[0], &rd->name[0],
176 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000177
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100178 /* Do we need to copy any more of the name directly from the flash? */
179 if (rd->nsize + sizeof(*rd) > read) {
180 /* FIXME: point() */
181 int err;
182 int already = read - sizeof(*rd);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000183
184 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100185 rd->nsize - already, &read, &fd->name[already]);
186 if (unlikely(read != rd->nsize - already) && likely(!err))
187 return -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000188
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100189 if (unlikely(err)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100190 JFFS2_ERROR("read remainder of name: error %d\n", err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100191 jffs2_free_full_dirent(fd);
192 return -EIO;
193 }
194 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000195
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100196 fd->nhash = full_name_hash(fd->name, rd->nsize);
197 fd->next = NULL;
198 fd->name[rd->nsize] = '\0';
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000199
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100200 /*
201 * Wheee. We now have a complete jffs2_full_dirent structure, with
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000202 * the name in it and everything. Link it into the list
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100203 */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100204 jffs2_add_fd_to_list(c, fd, fdp);
205
206 return 0;
207}
208
209/*
210 * Helper function for jffs2_get_inode_nodes().
211 * It is called every time an inode node is found.
212 *
213 * Returns: 0 on succes;
214 * 1 if the node should be marked obsolete;
215 * negative error code on failure.
216 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100217static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
218 struct jffs2_raw_inode *rd, struct rb_root *tnp, int rdlen,
219 uint32_t *latest_mctime, uint32_t *mctime_ver)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100220{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100221 struct jffs2_tmp_dnode_info *tn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100222 uint32_t len, csize;
223 int ret = 1;
David Woodhouse1046d882006-06-18 22:44:21 +0100224 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000225
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100226 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
227 BUG_ON(ref_obsolete(ref));
228
David Woodhouse1046d882006-06-18 22:44:21 +0100229 crc = crc32(0, rd, sizeof(*rd) - 8);
230 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
231 JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n",
232 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
233 return 1;
234 }
235
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100236 tn = jffs2_alloc_tmp_dnode_info();
237 if (!tn) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400238 JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100239 return -ENOMEM;
240 }
241
242 tn->partial_crc = 0;
243 csize = je32_to_cpu(rd->csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000244
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100245 /* If we've never checked the CRCs on this node, check them now */
246 if (ref_flags(ref) == REF_UNCHECKED) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000247
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100248 /* Sanity checks */
249 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
250 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100251 JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
Andrew Lunn737b7662005-07-30 16:29:30 +0100252 jffs2_dbg_dump_node(c, ref_offset(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100253 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100254 }
255
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100256 if (jffs2_is_writebuffered(c) && csize != 0) {
257 /* At this point we are supposed to check the data CRC
258 * of our unchecked node. But thus far, we do not
259 * know whether the node is valid or obsolete. To
260 * figure this out, we need to walk all the nodes of
261 * the inode and build the inode fragtree. We don't
262 * want to spend time checking data of nodes which may
263 * later be found to be obsolete. So we put off the full
264 * data CRC checking until we have read all the inode
265 * nodes and have started building the fragtree.
266 *
267 * The fragtree is being built starting with nodes
268 * having the highest version number, so we'll be able
269 * to detect whether a node is valid (i.e., it is not
270 * overlapped by a node with higher version) or not.
271 * And we'll be able to check only those nodes, which
272 * are not obsolete.
273 *
274 * Of course, this optimization only makes sense in case
275 * of NAND flashes (or other flashes whith
276 * !jffs2_can_mark_obsolete()), since on NOR flashes
277 * nodes are marked obsolete physically.
278 *
279 * Since NAND flashes (or other flashes with
280 * jffs2_is_writebuffered(c)) are anyway read by
281 * fractions of c->wbuf_pagesize, and we have just read
282 * the node header, it is likely that the starting part
283 * of the node data is also read when we read the
284 * header. So we don't mind to check the CRC of the
285 * starting part of the data of the node now, and check
286 * the second part later (in jffs2_check_node_data()).
287 * Of course, we will not need to re-read and re-check
288 * the NAND page which we have just read. This is why we
289 * read the whole NAND page at jffs2_get_inode_nodes(),
290 * while we needed only the node header.
291 */
292 unsigned char *buf;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100293
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100294 /* 'buf' will point to the start of data */
295 buf = (unsigned char *)rd + sizeof(*rd);
296 /* len will be the read data length */
297 len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100298 tn->partial_crc = crc32(0, buf, len);
299
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100300 dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100301
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100302 /* If we actually calculated the whole data CRC
303 * and it is wrong, drop the node. */
Artem B. Bityutskiy3c091332005-08-04 12:40:02 +0100304 if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100305 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
306 ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100307 goto free_out;
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100308 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100309
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100310 } else if (csize == 0) {
311 /*
312 * We checked the header CRC. If the node has no data, adjust
313 * the space accounting now. For other nodes this will be done
314 * later either when the node is marked obsolete or when its
315 * data is checked.
316 */
317 struct jffs2_eraseblock *jeb;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100318
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100319 dbg_readinode("the node has no data.\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100320 jeb = &c->blocks[ref->flash_offset / c->sector_size];
321 len = ref_totlen(c, jeb, ref);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100322
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100323 spin_lock(&c->erase_completion_lock);
324 jeb->used_size += len;
325 jeb->unchecked_size -= len;
326 c->used_size += len;
327 c->unchecked_size -= len;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100328 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100329 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100330 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100331 }
332
333 tn->fn = jffs2_alloc_full_dnode();
334 if (!tn->fn) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100335 JFFS2_ERROR("alloc fn failed\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100336 ret = -ENOMEM;
337 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100338 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000339
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100340 tn->version = je32_to_cpu(rd->version);
341 tn->fn->ofs = je32_to_cpu(rd->offset);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100342 tn->data_crc = je32_to_cpu(rd->data_crc);
343 tn->csize = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100344 tn->fn->raw = ref;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000345
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100346 /* There was a bug where we wrote hole nodes out with
347 csize/dsize swapped. Deal with it */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100348 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
349 tn->fn->size = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100350 else // normal case...
351 tn->fn->size = je32_to_cpu(rd->dsize);
352
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100353 dbg_readinode("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100354 ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000355
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100356 jffs2_add_tn_to_tree(tn, tnp);
357
358 return 0;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100359
360free_out:
361 jffs2_free_tmp_dnode_info(tn);
362 return ret;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100363}
364
365/*
366 * Helper function for jffs2_get_inode_nodes().
367 * It is called every time an unknown node is found.
368 *
David Woodhouse3877f0b2006-06-18 00:05:26 +0100369 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100370 * 1 if the node should be marked obsolete;
371 * negative error code on failure.
372 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100373static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100374{
375 /* We don't mark unknown nodes as REF_UNCHECKED */
376 BUG_ON(ref_flags(ref) == REF_UNCHECKED);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000377
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100378 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
379
David Woodhouse3877f0b2006-06-18 00:05:26 +0100380 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
381
382 case JFFS2_FEATURE_INCOMPAT:
383 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
384 je16_to_cpu(un->nodetype), ref_offset(ref));
385 /* EEP */
386 BUG();
387 break;
388
389 case JFFS2_FEATURE_ROCOMPAT:
390 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
391 je16_to_cpu(un->nodetype), ref_offset(ref));
392 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
393 break;
394
395 case JFFS2_FEATURE_RWCOMPAT_COPY:
396 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
397 je16_to_cpu(un->nodetype), ref_offset(ref));
398 break;
399
400 case JFFS2_FEATURE_RWCOMPAT_DELETE:
401 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
402 je16_to_cpu(un->nodetype), ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100403 return 1;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100404 }
405
406 return 0;
407}
408
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100409/*
410 * Helper function for jffs2_get_inode_nodes().
411 * The function detects whether more data should be read and reads it if yes.
412 *
413 * Returns: 0 on succes;
414 * negative error code on failure.
415 */
416static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
417 int right_size, int *rdlen, unsigned char *buf, unsigned char *bufstart)
418{
419 int right_len, err, len;
420 size_t retlen;
421 uint32_t offs;
422
423 if (jffs2_is_writebuffered(c)) {
424 right_len = c->wbuf_pagesize - (bufstart - buf);
425 if (right_size + (int)(bufstart - buf) > c->wbuf_pagesize)
426 right_len += c->wbuf_pagesize;
427 } else
428 right_len = right_size;
429
430 if (*rdlen == right_len)
431 return 0;
432
433 /* We need to read more data */
434 offs = ref_offset(ref) + *rdlen;
435 if (jffs2_is_writebuffered(c)) {
436 bufstart = buf + c->wbuf_pagesize;
437 len = c->wbuf_pagesize;
438 } else {
439 bufstart = buf + *rdlen;
440 len = right_size - *rdlen;
441 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000442
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100443 dbg_readinode("read more %d bytes\n", len);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100444
445 err = jffs2_flash_read(c, offs, len, &retlen, bufstart);
446 if (err) {
447 JFFS2_ERROR("can not read %d bytes from 0x%08x, "
448 "error code: %d.\n", len, offs, err);
449 return err;
450 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000451
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100452 if (retlen < len) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400453 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n",
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100454 offs, retlen, len);
455 return -EIO;
456 }
457
458 *rdlen = right_len;
459
460 return 0;
461}
462
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100463/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
464 with this ino, returning the former in order of version */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100465static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
466 struct rb_root *tnp, struct jffs2_full_dirent **fdp,
467 uint32_t *highest_version, uint32_t *latest_mctime,
468 uint32_t *mctime_ver)
469{
470 struct jffs2_raw_node_ref *ref, *valid_ref;
471 struct rb_root ret_tn = RB_ROOT;
472 struct jffs2_full_dirent *ret_fd = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100473 unsigned char *buf = NULL;
474 union jffs2_node_union *node;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100475 size_t retlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100476 int len, err;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100477
478 *mctime_ver = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000479
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100480 dbg_readinode("ino #%u\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100481
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100482 if (jffs2_is_writebuffered(c)) {
483 /*
484 * If we have the write buffer, we assume the minimal I/O unit
485 * is c->wbuf_pagesize. We implement some optimizations which in
486 * this case and we need a temporary buffer of size =
487 * 2*c->wbuf_pagesize bytes (see comments in read_dnode()).
488 * Basically, we want to read not only the node header, but the
489 * whole wbuf (NAND page in case of NAND) or 2, if the node
490 * header overlaps the border between the 2 wbufs.
491 */
492 len = 2*c->wbuf_pagesize;
493 } else {
494 /*
495 * When there is no write buffer, the size of the temporary
496 * buffer is the size of the larges node header.
497 */
498 len = sizeof(union jffs2_node_union);
499 }
500
501 /* FIXME: in case of NOR and available ->point() this
502 * needs to be fixed. */
503 buf = kmalloc(len, GFP_KERNEL);
504 if (!buf)
505 return -ENOMEM;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000506
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100507 spin_lock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100508 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100509 if (!valid_ref && f->inocache->ino != 1)
510 JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100511 while (valid_ref) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100512 unsigned char *bufstart;
513
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100514 /* We can hold a pointer to a non-obsolete node without the spinlock,
515 but _obsolete_ nodes may disappear at any time, if the block
516 they're in gets erased. So if we mark 'ref' obsolete while we're
517 not holding the lock, it can go away immediately. For that reason,
518 we find the next valid node first, before processing 'ref'.
519 */
520 ref = valid_ref;
521 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
522 spin_unlock(&c->erase_completion_lock);
523
524 cond_resched();
525
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100526 /*
527 * At this point we don't know the type of the node we're going
528 * to read, so we do not know the size of its header. In order
529 * to minimize the amount of flash IO we assume the node has
530 * size = JFFS2_MIN_NODE_HEADER.
531 */
532 if (jffs2_is_writebuffered(c)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000533 /*
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100534 * We treat 'buf' as 2 adjacent wbufs. We want to
535 * adjust bufstart such as it points to the
536 * beginning of the node within this wbuf.
537 */
538 bufstart = buf + (ref_offset(ref) % c->wbuf_pagesize);
539 /* We will read either one wbuf or 2 wbufs. */
540 len = c->wbuf_pagesize - (bufstart - buf);
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100541 if (JFFS2_MIN_NODE_HEADER + (int)(bufstart - buf) > c->wbuf_pagesize) {
542 /* The header spans the border of the first wbuf */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100543 len += c->wbuf_pagesize;
544 }
545 } else {
546 bufstart = buf;
547 len = JFFS2_MIN_NODE_HEADER;
548 }
549
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100550 dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100551
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100552 /* FIXME: point() */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100553 err = jffs2_flash_read(c, ref_offset(ref), len,
554 &retlen, bufstart);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100555 if (err) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100556 JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100557 goto free_out;
558 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000559
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100560 if (retlen < len) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400561 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100562 err = -EIO;
563 goto free_out;
564 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000565
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100566 node = (union jffs2_node_union *)bufstart;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000567
David Woodhouse3877f0b2006-06-18 00:05:26 +0100568 /* No need to mask in the valid bit; it shouldn't be invalid */
569 if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) {
570 JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n",
571 ref_offset(ref), je16_to_cpu(node->u.magic),
572 je16_to_cpu(node->u.nodetype),
573 je32_to_cpu(node->u.totlen),
574 je32_to_cpu(node->u.hdr_crc));
575 jffs2_dbg_dump_node(c, ref_offset(ref));
576 jffs2_mark_node_obsolete(c, ref);
577 goto cont;
578 }
579
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100580 switch (je16_to_cpu(node->u.nodetype)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000581
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100582 case JFFS2_NODETYPE_DIRENT:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100583
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100584 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) {
585 err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf, bufstart);
586 if (unlikely(err))
587 goto free_out;
588 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000589
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100590 err = read_direntry(c, ref, &node->d, retlen, &ret_fd, latest_mctime, mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100591 if (err == 1) {
592 jffs2_mark_node_obsolete(c, ref);
593 break;
594 } else if (unlikely(err))
595 goto free_out;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000596
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100597 if (je32_to_cpu(node->d.version) > *highest_version)
598 *highest_version = je32_to_cpu(node->d.version);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100599
600 break;
601
602 case JFFS2_NODETYPE_INODE:
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000603
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100604 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) {
605 err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf, bufstart);
606 if (unlikely(err))
607 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100608 }
609
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100610 err = read_dnode(c, ref, &node->i, &ret_tn, len, latest_mctime, mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100611 if (err == 1) {
612 jffs2_mark_node_obsolete(c, ref);
613 break;
614 } else if (unlikely(err))
615 goto free_out;
616
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100617 if (je32_to_cpu(node->i.version) > *highest_version)
618 *highest_version = je32_to_cpu(node->i.version);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000619
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 break;
621
622 default:
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100623 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) {
624 err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf, bufstart);
625 if (unlikely(err))
626 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100627 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000628
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100629 err = read_unknown(c, ref, &node->u);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100630 if (err == 1) {
631 jffs2_mark_node_obsolete(c, ref);
632 break;
633 } else if (unlikely(err))
634 goto free_out;
635
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 }
David Woodhouse3877f0b2006-06-18 00:05:26 +0100637 cont:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100638 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100640
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100641 spin_unlock(&c->erase_completion_lock);
642 *tnp = ret_tn;
643 *fdp = ret_fd;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100644 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100646 dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100647 f->inocache->ino, *highest_version, *latest_mctime, *mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100648 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100650 free_out:
651 jffs2_free_tmp_dnode_info_list(&ret_tn);
652 jffs2_free_full_dirent_list(ret_fd);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100653 kfree(buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100654 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655}
656
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000657static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 struct jffs2_inode_info *f,
659 struct jffs2_raw_inode *latest_node)
660{
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100661 struct jffs2_tmp_dnode_info *tn;
David Woodhouse9dee7502005-07-05 22:03:10 +0100662 struct rb_root tn_list;
663 struct rb_node *rb, *repl_rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 struct jffs2_full_dirent *fd_list;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100665 struct jffs2_full_dnode *fn, *first_fn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 uint32_t crc;
667 uint32_t latest_mctime, mctime_ver;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 size_t retlen;
669 int ret;
670
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100671 dbg_readinode("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672
673 /* Grab all nodes relevant to this ino */
674 ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver);
675
676 if (ret) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100677 JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 if (f->inocache->state == INO_STATE_READING)
679 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
680 return ret;
681 }
682 f->dents = fd_list;
683
David Woodhouse9dee7502005-07-05 22:03:10 +0100684 rb = rb_first(&tn_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
David Woodhouse9dee7502005-07-05 22:03:10 +0100686 while (rb) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100687 cond_resched();
David Woodhouse9dee7502005-07-05 22:03:10 +0100688 tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 fn = tn->fn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100690 ret = 1;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100691 dbg_readinode("consider node ver %u, phys offset "
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100692 "%#08x(%d), range %u-%u.\n", tn->version,
693 ref_offset(fn->raw), ref_flags(fn->raw),
694 fn->ofs, fn->ofs + fn->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
696 if (fn->size) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100697 ret = jffs2_add_older_frag_to_fragtree(c, f, tn);
698 /* TODO: the error code isn't checked, check it */
699 jffs2_dbg_fragtree_paranoia_check_nolock(f);
700 BUG_ON(ret < 0);
701 if (!first_fn && ret == 0)
702 first_fn = fn;
703 } else if (!first_fn) {
704 first_fn = fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 f->metadata = fn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100706 ret = 0; /* Prevent freeing the metadata update node */
707 } else
708 jffs2_mark_node_obsolete(c, fn->raw);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000709
David Woodhouse9dee7502005-07-05 22:03:10 +0100710 BUG_ON(rb->rb_left);
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100711 if (rb_parent(rb) && rb_parent(rb)->rb_left == rb) {
David Woodhouse9dee7502005-07-05 22:03:10 +0100712 /* We were then left-hand child of our parent. We need
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100713 * to move our own right-hand child into our place. */
David Woodhouse9dee7502005-07-05 22:03:10 +0100714 repl_rb = rb->rb_right;
715 if (repl_rb)
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100716 rb_set_parent(repl_rb, rb_parent(rb));
David Woodhouse9dee7502005-07-05 22:03:10 +0100717 } else
718 repl_rb = NULL;
719
720 rb = rb_next(rb);
721
722 /* Remove the spent tn from the tree; don't bother rebalancing
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100723 * but put our right-hand child in our own place. */
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100724 if (rb_parent(&tn->rb)) {
725 if (rb_parent(&tn->rb)->rb_left == &tn->rb)
726 rb_parent(&tn->rb)->rb_left = repl_rb;
727 else if (rb_parent(&tn->rb)->rb_right == &tn->rb)
728 rb_parent(&tn->rb)->rb_right = repl_rb;
David Woodhouse9dee7502005-07-05 22:03:10 +0100729 else BUG();
730 } else if (tn->rb.rb_right)
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100731 rb_set_parent(tn->rb.rb_right, NULL);
David Woodhouse9dee7502005-07-05 22:03:10 +0100732
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 jffs2_free_tmp_dnode_info(tn);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100734 if (ret) {
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100735 dbg_readinode("delete dnode %u-%u.\n",
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100736 fn->ofs, fn->ofs + fn->size);
737 jffs2_free_full_dnode(fn);
738 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 }
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100740 jffs2_dbg_fragtree_paranoia_check_nolock(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100742 BUG_ON(first_fn && ref_obsolete(first_fn->raw));
743
744 fn = first_fn;
745 if (unlikely(!first_fn)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 /* No data nodes for this inode. */
747 if (f->inocache->ino != 1) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100748 JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 if (!fd_list) {
750 if (f->inocache->state == INO_STATE_READING)
751 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
752 return -EIO;
753 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100754 JFFS2_NOTICE("but it has children so we fake some modes for it\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 }
756 latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
757 latest_node->version = cpu_to_je32(0);
758 latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
759 latest_node->isize = cpu_to_je32(0);
760 latest_node->gid = cpu_to_je16(0);
761 latest_node->uid = cpu_to_je16(0);
762 if (f->inocache->state == INO_STATE_READING)
763 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
764 return 0;
765 }
766
767 ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node);
768 if (ret || retlen != sizeof(*latest_node)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100769 JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
770 ret, retlen, sizeof(*latest_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 /* FIXME: If this fails, there seems to be a memory leak. Find it. */
772 up(&f->sem);
773 jffs2_do_clear_inode(c, f);
774 return ret?ret:-EIO;
775 }
776
777 crc = crc32(0, latest_node, sizeof(*latest_node)-8);
778 if (crc != je32_to_cpu(latest_node->node_crc)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100779 JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
780 f->inocache->ino, ref_offset(fn->raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 up(&f->sem);
782 jffs2_do_clear_inode(c, f);
783 return -EIO;
784 }
785
786 switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
787 case S_IFDIR:
788 if (mctime_ver > je32_to_cpu(latest_node->version)) {
789 /* The times in the latest_node are actually older than
790 mctime in the latest dirent. Cheat. */
791 latest_node->ctime = latest_node->mtime = cpu_to_je32(latest_mctime);
792 }
793 break;
794
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000795
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 case S_IFREG:
797 /* If it was a regular file, truncate it to the latest node's isize */
Artem B. Bityutskiyf302cd02005-07-24 16:29:59 +0100798 jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 break;
800
801 case S_IFLNK:
802 /* Hack to work around broken isize in old symlink code.
803 Remove this when dwmw2 comes to his senses and stops
804 symlinks from being an entirely gratuitous special
805 case. */
806 if (!je32_to_cpu(latest_node->isize))
807 latest_node->isize = latest_node->dsize;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000808
809 if (f->inocache->state != INO_STATE_CHECKING) {
810 /* Symlink's inode data is the target path. Read it and
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100811 * keep in RAM to facilitate quick follow symlink
812 * operation. */
813 f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
814 if (!f->target) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100815 JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000816 up(&f->sem);
817 jffs2_do_clear_inode(c, f);
818 return -ENOMEM;
819 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000820
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000821 ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node),
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100822 je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000823
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000824 if (ret || retlen != je32_to_cpu(latest_node->csize)) {
825 if (retlen != je32_to_cpu(latest_node->csize))
826 ret = -EIO;
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100827 kfree(f->target);
828 f->target = NULL;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000829 up(&f->sem);
830 jffs2_do_clear_inode(c, f);
831 return -ret;
832 }
833
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100834 f->target[je32_to_cpu(latest_node->csize)] = '\0';
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100835 dbg_readinode("symlink's target '%s' cached\n", f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000836 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000837
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 /* fall through... */
839
840 case S_IFBLK:
841 case S_IFCHR:
842 /* Certain inode types should have only one data node, and it's
843 kept as the metadata node */
844 if (f->metadata) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100845 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 f->inocache->ino, jemode_to_cpu(latest_node->mode));
847 up(&f->sem);
848 jffs2_do_clear_inode(c, f);
849 return -EIO;
850 }
851 if (!frag_first(&f->fragtree)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100852 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 f->inocache->ino, jemode_to_cpu(latest_node->mode));
854 up(&f->sem);
855 jffs2_do_clear_inode(c, f);
856 return -EIO;
857 }
858 /* ASSERT: f->fraglist != NULL */
859 if (frag_next(frag_first(&f->fragtree))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100860 JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 f->inocache->ino, jemode_to_cpu(latest_node->mode));
862 /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
863 up(&f->sem);
864 jffs2_do_clear_inode(c, f);
865 return -EIO;
866 }
867 /* OK. We're happy */
868 f->metadata = frag_first(&f->fragtree)->node;
869 jffs2_free_node_frag(frag_first(&f->fragtree));
870 f->fragtree = RB_ROOT;
871 break;
872 }
873 if (f->inocache->state == INO_STATE_READING)
874 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
875
876 return 0;
877}
878
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100879/* Scan the list of all nodes present for this ino, build map of versions, etc. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000880int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100881 uint32_t ino, struct jffs2_raw_inode *latest_node)
882{
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100883 dbg_readinode("read inode #%u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100884
885 retry_inocache:
886 spin_lock(&c->inocache_lock);
887 f->inocache = jffs2_get_ino_cache(c, ino);
888
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100889 if (f->inocache) {
890 /* Check its state. We may need to wait before we can use it */
891 switch(f->inocache->state) {
892 case INO_STATE_UNCHECKED:
893 case INO_STATE_CHECKEDABSENT:
894 f->inocache->state = INO_STATE_READING;
895 break;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000896
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100897 case INO_STATE_CHECKING:
898 case INO_STATE_GC:
899 /* If it's in either of these states, we need
900 to wait for whoever's got it to finish and
901 put it back. */
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100902 dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100903 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
904 goto retry_inocache;
905
906 case INO_STATE_READING:
907 case INO_STATE_PRESENT:
908 /* Eep. This should never happen. It can
909 happen if Linux calls read_inode() again
910 before clear_inode() has finished though. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100911 JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100912 /* Fail. That's probably better than allowing it to succeed */
913 f->inocache = NULL;
914 break;
915
916 default:
917 BUG();
918 }
919 }
920 spin_unlock(&c->inocache_lock);
921
922 if (!f->inocache && ino == 1) {
923 /* Special case - no root inode on medium */
924 f->inocache = jffs2_alloc_inode_cache();
925 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100926 JFFS2_ERROR("cannot allocate inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100927 return -ENOMEM;
928 }
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100929 dbg_readinode("creating inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100930 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
931 f->inocache->ino = f->inocache->nlink = 1;
932 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
933 f->inocache->state = INO_STATE_READING;
934 jffs2_add_ino_cache(c, f->inocache);
935 }
936 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100937 JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100938 return -ENOENT;
939 }
940
941 return jffs2_do_read_inode_internal(c, f, latest_node);
942}
943
944int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
945{
946 struct jffs2_raw_inode n;
947 struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL);
948 int ret;
949
950 if (!f)
951 return -ENOMEM;
952
953 memset(f, 0, sizeof(*f));
954 init_MUTEX_LOCKED(&f->sem);
955 f->inocache = ic;
956
957 ret = jffs2_do_read_inode_internal(c, f, &n);
958 if (!ret) {
959 up(&f->sem);
960 jffs2_do_clear_inode(c, f);
961 }
962 kfree (f);
963 return ret;
964}
965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
967{
968 struct jffs2_full_dirent *fd, *fds;
969 int deleted;
970
KaiGai Koheic7afb0f2006-07-02 15:13:46 +0100971 jffs2_clear_acl(f);
KaiGai Kohei355ed4e2006-06-24 09:15:36 +0900972 jffs2_xattr_delete_inode(c, f->inocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 down(&f->sem);
974 deleted = f->inocache && !f->inocache->nlink;
975
David Woodhouse67e345d2005-02-27 23:01:36 +0000976 if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
977 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 if (f->metadata) {
980 if (deleted)
981 jffs2_mark_node_obsolete(c, f->metadata->raw);
982 jffs2_free_full_dnode(f->metadata);
983 }
984
985 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
986
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100987 if (f->target) {
988 kfree(f->target);
989 f->target = NULL;
990 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000991
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100992 fds = f->dents;
993 while(fds) {
994 fd = fds;
995 fds = fd->next;
996 jffs2_free_full_dirent(fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 }
998
David Woodhouse67e345d2005-02-27 23:01:36 +0000999 if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
David Woodhouse67e345d2005-02-27 23:01:36 +00001001 if (f->inocache->nodes == (void *)f->inocache)
1002 jffs2_del_ino_cache(c, f->inocache);
1003 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005 up(&f->sem);
1006}