blob: 717a48cf7df2aadbccde0432024dafde0e80a410 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Thomas Gleixner182ec4e2005-11-07 11:16:07 +000010 * $Id: readinode.c,v 1.143 2005/11/07 11:14:41 gleixner Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 */
13
14#include <linux/kernel.h>
Andrew Lunn737b7662005-07-30 16:29:30 +010015#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/slab.h>
17#include <linux/fs.h>
18#include <linux/crc32.h>
19#include <linux/pagemap.h>
20#include <linux/mtd/mtd.h>
21#include <linux/compiler.h>
22#include "nodelist.h"
23
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010024/*
25 * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010026 * order of increasing version.
27 */
28static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010030 struct rb_node **p = &list->rb_node;
31 struct rb_node * parent = NULL;
32 struct jffs2_tmp_dnode_info *this;
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010034 while (*p) {
35 parent = *p;
36 this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010038 /* There may actually be a collision here, but it doesn't
39 actually matter. As long as the two nodes with the same
40 version are together, it's all fine. */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010041 if (tn->version > this->version)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010042 p = &(*p)->rb_left;
43 else
44 p = &(*p)->rb_right;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010045 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010047 rb_link_node(&tn->rb, parent, p);
48 rb_insert_color(&tn->rb, list);
49}
50
51static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
52{
53 struct rb_node *this;
54 struct jffs2_tmp_dnode_info *tn;
55
56 this = list->rb_node;
57
58 /* Now at bottom of tree */
59 while (this) {
60 if (this->rb_left)
61 this = this->rb_left;
62 else if (this->rb_right)
63 this = this->rb_right;
64 else {
65 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
66 jffs2_free_full_dnode(tn->fn);
67 jffs2_free_tmp_dnode_info(tn);
68
David Woodhouse21f1d5f2006-04-21 13:17:57 +010069 this = rb_parent(this);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010070 if (!this)
71 break;
72
73 if (this->rb_left == &tn->rb)
74 this->rb_left = NULL;
75 else if (this->rb_right == &tn->rb)
76 this->rb_right = NULL;
77 else BUG();
78 }
79 }
80 list->rb_node = NULL;
81}
82
83static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
84{
85 struct jffs2_full_dirent *next;
86
87 while (fd) {
88 next = fd->next;
89 jffs2_free_full_dirent(fd);
90 fd = next;
91 }
92}
93
94/* Returns first valid node after 'ref'. May return 'ref' */
95static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
96{
97 while (ref && ref->next_in_ino) {
98 if (!ref_obsolete(ref))
99 return ref;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100100 dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100101 ref = ref->next_in_ino;
102 }
103 return NULL;
104}
105
106/*
107 * Helper function for jffs2_get_inode_nodes().
108 * It is called every time an directory entry node is found.
109 *
110 * Returns: 0 on succes;
111 * 1 if the node should be marked obsolete;
112 * negative error code on failure.
113 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100114static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
Atsushi Nemoto0ef675d2006-03-09 17:33:38 -0800115 struct jffs2_raw_dirent *rd, size_t read, struct jffs2_full_dirent **fdp,
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100116 uint32_t *latest_mctime, uint32_t *mctime_ver)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100117{
118 struct jffs2_full_dirent *fd;
David Woodhouse1046d882006-06-18 22:44:21 +0100119 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000120
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100121 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
122 BUG_ON(ref_obsolete(ref));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000123
David Woodhouse1046d882006-06-18 22:44:21 +0100124 crc = crc32(0, rd, sizeof(*rd) - 8);
125 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
126 JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n",
127 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100128 return 1;
129 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000130
David Woodhouse1046d882006-06-18 22:44:21 +0100131 /* If we've never checked the CRCs on this node, check them now */
132 if (ref_flags(ref) == REF_UNCHECKED) {
133 struct jffs2_eraseblock *jeb;
134 int len;
135
136 /* Sanity check */
137 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
138 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
139 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
140 return 1;
141 }
142
143 jeb = &c->blocks[ref->flash_offset / c->sector_size];
144 len = ref_totlen(c, jeb, ref);
145
146 spin_lock(&c->erase_completion_lock);
147 jeb->used_size += len;
148 jeb->unchecked_size -= len;
149 c->used_size += len;
150 c->unchecked_size -= len;
151 ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
152 spin_unlock(&c->erase_completion_lock);
153 }
154
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100155 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
156 if (unlikely(!fd))
157 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100159 fd->raw = ref;
160 fd->version = je32_to_cpu(rd->version);
161 fd->ino = je32_to_cpu(rd->ino);
162 fd->type = rd->type;
163
164 /* Pick out the mctime of the latest dirent */
Artem B. Bityutskiy3a69e0c2005-08-17 14:46:26 +0100165 if(fd->version > *mctime_ver && je32_to_cpu(rd->mctime)) {
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100166 *mctime_ver = fd->version;
167 *latest_mctime = je32_to_cpu(rd->mctime);
168 }
169
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000170 /*
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100171 * Copy as much of the name as possible from the raw
172 * dirent we've already read from the flash.
173 */
174 if (read > sizeof(*rd))
175 memcpy(&fd->name[0], &rd->name[0],
176 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000177
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100178 /* Do we need to copy any more of the name directly from the flash? */
179 if (rd->nsize + sizeof(*rd) > read) {
180 /* FIXME: point() */
181 int err;
182 int already = read - sizeof(*rd);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000183
184 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100185 rd->nsize - already, &read, &fd->name[already]);
186 if (unlikely(read != rd->nsize - already) && likely(!err))
187 return -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000188
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100189 if (unlikely(err)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100190 JFFS2_ERROR("read remainder of name: error %d\n", err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100191 jffs2_free_full_dirent(fd);
192 return -EIO;
193 }
194 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000195
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100196 fd->nhash = full_name_hash(fd->name, rd->nsize);
197 fd->next = NULL;
198 fd->name[rd->nsize] = '\0';
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000199
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100200 /*
201 * Wheee. We now have a complete jffs2_full_dirent structure, with
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000202 * the name in it and everything. Link it into the list
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100203 */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100204 jffs2_add_fd_to_list(c, fd, fdp);
205
206 return 0;
207}
208
209/*
210 * Helper function for jffs2_get_inode_nodes().
211 * It is called every time an inode node is found.
212 *
213 * Returns: 0 on succes;
214 * 1 if the node should be marked obsolete;
215 * negative error code on failure.
216 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100217static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
218 struct jffs2_raw_inode *rd, struct rb_root *tnp, int rdlen,
219 uint32_t *latest_mctime, uint32_t *mctime_ver)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100220{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100221 struct jffs2_tmp_dnode_info *tn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100222 uint32_t len, csize;
223 int ret = 1;
David Woodhouse1046d882006-06-18 22:44:21 +0100224 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000225
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100226 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
227 BUG_ON(ref_obsolete(ref));
228
David Woodhouse1046d882006-06-18 22:44:21 +0100229 crc = crc32(0, rd, sizeof(*rd) - 8);
230 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
231 JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n",
232 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
233 return 1;
234 }
235
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100236 tn = jffs2_alloc_tmp_dnode_info();
237 if (!tn) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400238 JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100239 return -ENOMEM;
240 }
241
242 tn->partial_crc = 0;
243 csize = je32_to_cpu(rd->csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000244
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100245 /* If we've never checked the CRCs on this node, check them now */
246 if (ref_flags(ref) == REF_UNCHECKED) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000247
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100248 /* Sanity checks */
249 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
250 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100251 JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
Andrew Lunn737b7662005-07-30 16:29:30 +0100252 jffs2_dbg_dump_node(c, ref_offset(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100253 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100254 }
255
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100256 if (jffs2_is_writebuffered(c) && csize != 0) {
257 /* At this point we are supposed to check the data CRC
258 * of our unchecked node. But thus far, we do not
259 * know whether the node is valid or obsolete. To
260 * figure this out, we need to walk all the nodes of
261 * the inode and build the inode fragtree. We don't
262 * want to spend time checking data of nodes which may
263 * later be found to be obsolete. So we put off the full
264 * data CRC checking until we have read all the inode
265 * nodes and have started building the fragtree.
266 *
267 * The fragtree is being built starting with nodes
268 * having the highest version number, so we'll be able
269 * to detect whether a node is valid (i.e., it is not
270 * overlapped by a node with higher version) or not.
271 * And we'll be able to check only those nodes, which
272 * are not obsolete.
273 *
274 * Of course, this optimization only makes sense in case
275 * of NAND flashes (or other flashes whith
276 * !jffs2_can_mark_obsolete()), since on NOR flashes
277 * nodes are marked obsolete physically.
278 *
279 * Since NAND flashes (or other flashes with
280 * jffs2_is_writebuffered(c)) are anyway read by
281 * fractions of c->wbuf_pagesize, and we have just read
282 * the node header, it is likely that the starting part
283 * of the node data is also read when we read the
284 * header. So we don't mind to check the CRC of the
285 * starting part of the data of the node now, and check
286 * the second part later (in jffs2_check_node_data()).
287 * Of course, we will not need to re-read and re-check
288 * the NAND page which we have just read. This is why we
289 * read the whole NAND page at jffs2_get_inode_nodes(),
290 * while we needed only the node header.
291 */
292 unsigned char *buf;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100293
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100294 /* 'buf' will point to the start of data */
295 buf = (unsigned char *)rd + sizeof(*rd);
296 /* len will be the read data length */
297 len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100298 tn->partial_crc = crc32(0, buf, len);
299
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100300 dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100301
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100302 /* If we actually calculated the whole data CRC
303 * and it is wrong, drop the node. */
Artem B. Bityutskiy3c091332005-08-04 12:40:02 +0100304 if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100305 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
306 ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100307 goto free_out;
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100308 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100309
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100310 } else if (csize == 0) {
311 /*
312 * We checked the header CRC. If the node has no data, adjust
313 * the space accounting now. For other nodes this will be done
314 * later either when the node is marked obsolete or when its
315 * data is checked.
316 */
317 struct jffs2_eraseblock *jeb;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100318
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100319 dbg_readinode("the node has no data.\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100320 jeb = &c->blocks[ref->flash_offset / c->sector_size];
321 len = ref_totlen(c, jeb, ref);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100322
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100323 spin_lock(&c->erase_completion_lock);
324 jeb->used_size += len;
325 jeb->unchecked_size -= len;
326 c->used_size += len;
327 c->unchecked_size -= len;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100328 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100329 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100330 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100331 }
332
333 tn->fn = jffs2_alloc_full_dnode();
334 if (!tn->fn) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100335 JFFS2_ERROR("alloc fn failed\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100336 ret = -ENOMEM;
337 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100338 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000339
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100340 tn->version = je32_to_cpu(rd->version);
341 tn->fn->ofs = je32_to_cpu(rd->offset);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100342 tn->data_crc = je32_to_cpu(rd->data_crc);
343 tn->csize = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100344 tn->fn->raw = ref;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000345
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100346 /* There was a bug where we wrote hole nodes out with
347 csize/dsize swapped. Deal with it */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100348 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
349 tn->fn->size = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100350 else // normal case...
351 tn->fn->size = je32_to_cpu(rd->dsize);
352
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100353 dbg_readinode("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100354 ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000355
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100356 jffs2_add_tn_to_tree(tn, tnp);
357
358 return 0;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100359
360free_out:
361 jffs2_free_tmp_dnode_info(tn);
362 return ret;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100363}
364
365/*
366 * Helper function for jffs2_get_inode_nodes().
367 * It is called every time an unknown node is found.
368 *
David Woodhouse3877f0b2006-06-18 00:05:26 +0100369 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100370 * 1 if the node should be marked obsolete;
371 * negative error code on failure.
372 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100373static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100374{
375 /* We don't mark unknown nodes as REF_UNCHECKED */
David Woodhousec7258a42007-03-09 11:44:00 +0000376 if (ref_flags(ref) == REF_UNCHECKED) {
377 JFFS2_ERROR("REF_UNCHECKED but unknown node at %#08x\n",
378 ref_offset(ref));
379 JFFS2_ERROR("Node is {%04x,%04x,%08x,%08x}. Please report this error.\n",
380 je16_to_cpu(un->magic), je16_to_cpu(un->nodetype),
381 je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc));
382 return 1;
383 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000384
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100385 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
386
David Woodhouse3877f0b2006-06-18 00:05:26 +0100387 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
388
389 case JFFS2_FEATURE_INCOMPAT:
390 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
391 je16_to_cpu(un->nodetype), ref_offset(ref));
392 /* EEP */
393 BUG();
394 break;
395
396 case JFFS2_FEATURE_ROCOMPAT:
397 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
398 je16_to_cpu(un->nodetype), ref_offset(ref));
399 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
400 break;
401
402 case JFFS2_FEATURE_RWCOMPAT_COPY:
403 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
404 je16_to_cpu(un->nodetype), ref_offset(ref));
405 break;
406
407 case JFFS2_FEATURE_RWCOMPAT_DELETE:
408 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
409 je16_to_cpu(un->nodetype), ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100410 return 1;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100411 }
412
413 return 0;
414}
415
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100416/*
417 * Helper function for jffs2_get_inode_nodes().
418 * The function detects whether more data should be read and reads it if yes.
419 *
420 * Returns: 0 on succes;
421 * negative error code on failure.
422 */
423static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
424 int right_size, int *rdlen, unsigned char *buf, unsigned char *bufstart)
425{
426 int right_len, err, len;
427 size_t retlen;
428 uint32_t offs;
429
430 if (jffs2_is_writebuffered(c)) {
431 right_len = c->wbuf_pagesize - (bufstart - buf);
432 if (right_size + (int)(bufstart - buf) > c->wbuf_pagesize)
433 right_len += c->wbuf_pagesize;
434 } else
435 right_len = right_size;
436
437 if (*rdlen == right_len)
438 return 0;
439
440 /* We need to read more data */
441 offs = ref_offset(ref) + *rdlen;
442 if (jffs2_is_writebuffered(c)) {
443 bufstart = buf + c->wbuf_pagesize;
444 len = c->wbuf_pagesize;
445 } else {
446 bufstart = buf + *rdlen;
447 len = right_size - *rdlen;
448 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000449
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100450 dbg_readinode("read more %d bytes\n", len);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100451
452 err = jffs2_flash_read(c, offs, len, &retlen, bufstart);
453 if (err) {
454 JFFS2_ERROR("can not read %d bytes from 0x%08x, "
455 "error code: %d.\n", len, offs, err);
456 return err;
457 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000458
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100459 if (retlen < len) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400460 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n",
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100461 offs, retlen, len);
462 return -EIO;
463 }
464
465 *rdlen = right_len;
466
467 return 0;
468}
469
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100470/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
471 with this ino, returning the former in order of version */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100472static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
473 struct rb_root *tnp, struct jffs2_full_dirent **fdp,
474 uint32_t *highest_version, uint32_t *latest_mctime,
475 uint32_t *mctime_ver)
476{
477 struct jffs2_raw_node_ref *ref, *valid_ref;
478 struct rb_root ret_tn = RB_ROOT;
479 struct jffs2_full_dirent *ret_fd = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100480 unsigned char *buf = NULL;
481 union jffs2_node_union *node;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100482 size_t retlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100483 int len, err;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100484
485 *mctime_ver = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000486
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100487 dbg_readinode("ino #%u\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100488
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100489 if (jffs2_is_writebuffered(c)) {
490 /*
491 * If we have the write buffer, we assume the minimal I/O unit
492 * is c->wbuf_pagesize. We implement some optimizations which in
493 * this case and we need a temporary buffer of size =
494 * 2*c->wbuf_pagesize bytes (see comments in read_dnode()).
495 * Basically, we want to read not only the node header, but the
496 * whole wbuf (NAND page in case of NAND) or 2, if the node
497 * header overlaps the border between the 2 wbufs.
498 */
499 len = 2*c->wbuf_pagesize;
500 } else {
501 /*
502 * When there is no write buffer, the size of the temporary
503 * buffer is the size of the larges node header.
504 */
505 len = sizeof(union jffs2_node_union);
506 }
507
508 /* FIXME: in case of NOR and available ->point() this
509 * needs to be fixed. */
510 buf = kmalloc(len, GFP_KERNEL);
511 if (!buf)
512 return -ENOMEM;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000513
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100514 spin_lock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100515 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100516 if (!valid_ref && f->inocache->ino != 1)
517 JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100518 while (valid_ref) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100519 unsigned char *bufstart;
520
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100521 /* We can hold a pointer to a non-obsolete node without the spinlock,
522 but _obsolete_ nodes may disappear at any time, if the block
523 they're in gets erased. So if we mark 'ref' obsolete while we're
524 not holding the lock, it can go away immediately. For that reason,
525 we find the next valid node first, before processing 'ref'.
526 */
527 ref = valid_ref;
528 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
529 spin_unlock(&c->erase_completion_lock);
530
531 cond_resched();
532
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100533 /*
534 * At this point we don't know the type of the node we're going
535 * to read, so we do not know the size of its header. In order
536 * to minimize the amount of flash IO we assume the node has
537 * size = JFFS2_MIN_NODE_HEADER.
538 */
539 if (jffs2_is_writebuffered(c)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000540 /*
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100541 * We treat 'buf' as 2 adjacent wbufs. We want to
542 * adjust bufstart such as it points to the
543 * beginning of the node within this wbuf.
544 */
545 bufstart = buf + (ref_offset(ref) % c->wbuf_pagesize);
546 /* We will read either one wbuf or 2 wbufs. */
547 len = c->wbuf_pagesize - (bufstart - buf);
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100548 if (JFFS2_MIN_NODE_HEADER + (int)(bufstart - buf) > c->wbuf_pagesize) {
549 /* The header spans the border of the first wbuf */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100550 len += c->wbuf_pagesize;
551 }
552 } else {
553 bufstart = buf;
554 len = JFFS2_MIN_NODE_HEADER;
555 }
556
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100557 dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100558
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100559 /* FIXME: point() */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100560 err = jffs2_flash_read(c, ref_offset(ref), len,
561 &retlen, bufstart);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100562 if (err) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100563 JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100564 goto free_out;
565 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000566
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100567 if (retlen < len) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400568 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100569 err = -EIO;
570 goto free_out;
571 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000572
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100573 node = (union jffs2_node_union *)bufstart;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000574
David Woodhouse3877f0b2006-06-18 00:05:26 +0100575 /* No need to mask in the valid bit; it shouldn't be invalid */
576 if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) {
577 JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n",
578 ref_offset(ref), je16_to_cpu(node->u.magic),
579 je16_to_cpu(node->u.nodetype),
580 je32_to_cpu(node->u.totlen),
581 je32_to_cpu(node->u.hdr_crc));
582 jffs2_dbg_dump_node(c, ref_offset(ref));
583 jffs2_mark_node_obsolete(c, ref);
584 goto cont;
585 }
David Woodhousec7258a42007-03-09 11:44:00 +0000586 /* Due to poor choice of crc32 seed, an all-zero node will have a correct CRC */
587 if (!je32_to_cpu(node->u.hdr_crc) && !je16_to_cpu(node->u.nodetype) &&
588 !je16_to_cpu(node->u.magic) && !je32_to_cpu(node->u.totlen)) {
589 JFFS2_NOTICE("All zero node header at %#08x.\n", ref_offset(ref));
590 jffs2_mark_node_obsolete(c, ref);
591 goto cont;
592 }
David Woodhouse3877f0b2006-06-18 00:05:26 +0100593
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100594 switch (je16_to_cpu(node->u.nodetype)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000595
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100596 case JFFS2_NODETYPE_DIRENT:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100597
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100598 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) {
599 err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf, bufstart);
600 if (unlikely(err))
601 goto free_out;
602 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000603
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100604 err = read_direntry(c, ref, &node->d, retlen, &ret_fd, latest_mctime, mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100605 if (err == 1) {
606 jffs2_mark_node_obsolete(c, ref);
607 break;
608 } else if (unlikely(err))
609 goto free_out;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000610
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100611 if (je32_to_cpu(node->d.version) > *highest_version)
612 *highest_version = je32_to_cpu(node->d.version);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100613
614 break;
615
616 case JFFS2_NODETYPE_INODE:
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000617
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100618 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) {
619 err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf, bufstart);
620 if (unlikely(err))
621 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100622 }
623
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100624 err = read_dnode(c, ref, &node->i, &ret_tn, len, latest_mctime, mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100625 if (err == 1) {
626 jffs2_mark_node_obsolete(c, ref);
627 break;
628 } else if (unlikely(err))
629 goto free_out;
630
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100631 if (je32_to_cpu(node->i.version) > *highest_version)
632 *highest_version = je32_to_cpu(node->i.version);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000633
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 break;
635
636 default:
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100637 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) {
638 err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf, bufstart);
639 if (unlikely(err))
640 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100641 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000642
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100643 err = read_unknown(c, ref, &node->u);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100644 if (err == 1) {
645 jffs2_mark_node_obsolete(c, ref);
646 break;
647 } else if (unlikely(err))
648 goto free_out;
649
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 }
David Woodhouse3877f0b2006-06-18 00:05:26 +0100651 cont:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100652 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100654
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100655 spin_unlock(&c->erase_completion_lock);
656 *tnp = ret_tn;
657 *fdp = ret_fd;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100658 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100660 dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100661 f->inocache->ino, *highest_version, *latest_mctime, *mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100662 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100664 free_out:
665 jffs2_free_tmp_dnode_info_list(&ret_tn);
666 jffs2_free_full_dirent_list(ret_fd);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100667 kfree(buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100668 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669}
670
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000671static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 struct jffs2_inode_info *f,
673 struct jffs2_raw_inode *latest_node)
674{
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100675 struct jffs2_tmp_dnode_info *tn;
David Woodhouse9dee7502005-07-05 22:03:10 +0100676 struct rb_root tn_list;
677 struct rb_node *rb, *repl_rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 struct jffs2_full_dirent *fd_list;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100679 struct jffs2_full_dnode *fn, *first_fn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 uint32_t crc;
681 uint32_t latest_mctime, mctime_ver;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 size_t retlen;
683 int ret;
684
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100685 dbg_readinode("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
687 /* Grab all nodes relevant to this ino */
688 ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver);
689
690 if (ret) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100691 JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 if (f->inocache->state == INO_STATE_READING)
693 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
694 return ret;
695 }
696 f->dents = fd_list;
697
David Woodhouse9dee7502005-07-05 22:03:10 +0100698 rb = rb_first(&tn_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
David Woodhouse9dee7502005-07-05 22:03:10 +0100700 while (rb) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100701 cond_resched();
David Woodhouse9dee7502005-07-05 22:03:10 +0100702 tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 fn = tn->fn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100704 ret = 1;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100705 dbg_readinode("consider node ver %u, phys offset "
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100706 "%#08x(%d), range %u-%u.\n", tn->version,
707 ref_offset(fn->raw), ref_flags(fn->raw),
708 fn->ofs, fn->ofs + fn->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
710 if (fn->size) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100711 ret = jffs2_add_older_frag_to_fragtree(c, f, tn);
712 /* TODO: the error code isn't checked, check it */
713 jffs2_dbg_fragtree_paranoia_check_nolock(f);
714 BUG_ON(ret < 0);
715 if (!first_fn && ret == 0)
716 first_fn = fn;
717 } else if (!first_fn) {
718 first_fn = fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 f->metadata = fn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100720 ret = 0; /* Prevent freeing the metadata update node */
721 } else
722 jffs2_mark_node_obsolete(c, fn->raw);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000723
David Woodhouse9dee7502005-07-05 22:03:10 +0100724 BUG_ON(rb->rb_left);
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100725 if (rb_parent(rb) && rb_parent(rb)->rb_left == rb) {
David Woodhouse9dee7502005-07-05 22:03:10 +0100726 /* We were then left-hand child of our parent. We need
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100727 * to move our own right-hand child into our place. */
David Woodhouse9dee7502005-07-05 22:03:10 +0100728 repl_rb = rb->rb_right;
729 if (repl_rb)
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100730 rb_set_parent(repl_rb, rb_parent(rb));
David Woodhouse9dee7502005-07-05 22:03:10 +0100731 } else
732 repl_rb = NULL;
733
734 rb = rb_next(rb);
735
736 /* Remove the spent tn from the tree; don't bother rebalancing
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100737 * but put our right-hand child in our own place. */
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100738 if (rb_parent(&tn->rb)) {
739 if (rb_parent(&tn->rb)->rb_left == &tn->rb)
740 rb_parent(&tn->rb)->rb_left = repl_rb;
741 else if (rb_parent(&tn->rb)->rb_right == &tn->rb)
742 rb_parent(&tn->rb)->rb_right = repl_rb;
David Woodhouse9dee7502005-07-05 22:03:10 +0100743 else BUG();
744 } else if (tn->rb.rb_right)
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100745 rb_set_parent(tn->rb.rb_right, NULL);
David Woodhouse9dee7502005-07-05 22:03:10 +0100746
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 jffs2_free_tmp_dnode_info(tn);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100748 if (ret) {
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100749 dbg_readinode("delete dnode %u-%u.\n",
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100750 fn->ofs, fn->ofs + fn->size);
751 jffs2_free_full_dnode(fn);
752 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 }
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100754 jffs2_dbg_fragtree_paranoia_check_nolock(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100756 BUG_ON(first_fn && ref_obsolete(first_fn->raw));
757
758 fn = first_fn;
759 if (unlikely(!first_fn)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 /* No data nodes for this inode. */
761 if (f->inocache->ino != 1) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100762 JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 if (!fd_list) {
764 if (f->inocache->state == INO_STATE_READING)
765 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
766 return -EIO;
767 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100768 JFFS2_NOTICE("but it has children so we fake some modes for it\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 }
770 latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
771 latest_node->version = cpu_to_je32(0);
772 latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
773 latest_node->isize = cpu_to_je32(0);
774 latest_node->gid = cpu_to_je16(0);
775 latest_node->uid = cpu_to_je16(0);
776 if (f->inocache->state == INO_STATE_READING)
777 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
778 return 0;
779 }
780
781 ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node);
782 if (ret || retlen != sizeof(*latest_node)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100783 JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
784 ret, retlen, sizeof(*latest_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 /* FIXME: If this fails, there seems to be a memory leak. Find it. */
786 up(&f->sem);
787 jffs2_do_clear_inode(c, f);
788 return ret?ret:-EIO;
789 }
790
791 crc = crc32(0, latest_node, sizeof(*latest_node)-8);
792 if (crc != je32_to_cpu(latest_node->node_crc)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100793 JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
794 f->inocache->ino, ref_offset(fn->raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 up(&f->sem);
796 jffs2_do_clear_inode(c, f);
797 return -EIO;
798 }
799
800 switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
801 case S_IFDIR:
802 if (mctime_ver > je32_to_cpu(latest_node->version)) {
803 /* The times in the latest_node are actually older than
804 mctime in the latest dirent. Cheat. */
805 latest_node->ctime = latest_node->mtime = cpu_to_je32(latest_mctime);
806 }
807 break;
808
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000809
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 case S_IFREG:
811 /* If it was a regular file, truncate it to the latest node's isize */
Artem B. Bityutskiyf302cd02005-07-24 16:29:59 +0100812 jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 break;
814
815 case S_IFLNK:
816 /* Hack to work around broken isize in old symlink code.
817 Remove this when dwmw2 comes to his senses and stops
818 symlinks from being an entirely gratuitous special
819 case. */
820 if (!je32_to_cpu(latest_node->isize))
821 latest_node->isize = latest_node->dsize;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000822
823 if (f->inocache->state != INO_STATE_CHECKING) {
824 /* Symlink's inode data is the target path. Read it and
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100825 * keep in RAM to facilitate quick follow symlink
826 * operation. */
827 f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
828 if (!f->target) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100829 JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000830 up(&f->sem);
831 jffs2_do_clear_inode(c, f);
832 return -ENOMEM;
833 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000834
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000835 ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node),
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100836 je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000837
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000838 if (ret || retlen != je32_to_cpu(latest_node->csize)) {
839 if (retlen != je32_to_cpu(latest_node->csize))
840 ret = -EIO;
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100841 kfree(f->target);
842 f->target = NULL;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000843 up(&f->sem);
844 jffs2_do_clear_inode(c, f);
845 return -ret;
846 }
847
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100848 f->target[je32_to_cpu(latest_node->csize)] = '\0';
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100849 dbg_readinode("symlink's target '%s' cached\n", f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000850 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000851
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 /* fall through... */
853
854 case S_IFBLK:
855 case S_IFCHR:
856 /* Certain inode types should have only one data node, and it's
857 kept as the metadata node */
858 if (f->metadata) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100859 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 f->inocache->ino, jemode_to_cpu(latest_node->mode));
861 up(&f->sem);
862 jffs2_do_clear_inode(c, f);
863 return -EIO;
864 }
865 if (!frag_first(&f->fragtree)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100866 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 f->inocache->ino, jemode_to_cpu(latest_node->mode));
868 up(&f->sem);
869 jffs2_do_clear_inode(c, f);
870 return -EIO;
871 }
872 /* ASSERT: f->fraglist != NULL */
873 if (frag_next(frag_first(&f->fragtree))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100874 JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 f->inocache->ino, jemode_to_cpu(latest_node->mode));
876 /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
877 up(&f->sem);
878 jffs2_do_clear_inode(c, f);
879 return -EIO;
880 }
881 /* OK. We're happy */
882 f->metadata = frag_first(&f->fragtree)->node;
883 jffs2_free_node_frag(frag_first(&f->fragtree));
884 f->fragtree = RB_ROOT;
885 break;
886 }
887 if (f->inocache->state == INO_STATE_READING)
888 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
889
890 return 0;
891}
892
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100893/* Scan the list of all nodes present for this ino, build map of versions, etc. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000894int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100895 uint32_t ino, struct jffs2_raw_inode *latest_node)
896{
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100897 dbg_readinode("read inode #%u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100898
899 retry_inocache:
900 spin_lock(&c->inocache_lock);
901 f->inocache = jffs2_get_ino_cache(c, ino);
902
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100903 if (f->inocache) {
904 /* Check its state. We may need to wait before we can use it */
905 switch(f->inocache->state) {
906 case INO_STATE_UNCHECKED:
907 case INO_STATE_CHECKEDABSENT:
908 f->inocache->state = INO_STATE_READING;
909 break;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000910
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100911 case INO_STATE_CHECKING:
912 case INO_STATE_GC:
913 /* If it's in either of these states, we need
914 to wait for whoever's got it to finish and
915 put it back. */
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100916 dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100917 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
918 goto retry_inocache;
919
920 case INO_STATE_READING:
921 case INO_STATE_PRESENT:
922 /* Eep. This should never happen. It can
923 happen if Linux calls read_inode() again
924 before clear_inode() has finished though. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100925 JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100926 /* Fail. That's probably better than allowing it to succeed */
927 f->inocache = NULL;
928 break;
929
930 default:
931 BUG();
932 }
933 }
934 spin_unlock(&c->inocache_lock);
935
936 if (!f->inocache && ino == 1) {
937 /* Special case - no root inode on medium */
938 f->inocache = jffs2_alloc_inode_cache();
939 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100940 JFFS2_ERROR("cannot allocate inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100941 return -ENOMEM;
942 }
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100943 dbg_readinode("creating inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100944 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
945 f->inocache->ino = f->inocache->nlink = 1;
946 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
947 f->inocache->state = INO_STATE_READING;
948 jffs2_add_ino_cache(c, f->inocache);
949 }
950 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100951 JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100952 return -ENOENT;
953 }
954
955 return jffs2_do_read_inode_internal(c, f, latest_node);
956}
957
958int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
959{
960 struct jffs2_raw_inode n;
Yan Burman3d375d92006-12-04 15:03:01 -0800961 struct jffs2_inode_info *f = kzalloc(sizeof(*f), GFP_KERNEL);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100962 int ret;
963
964 if (!f)
965 return -ENOMEM;
966
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100967 init_MUTEX_LOCKED(&f->sem);
968 f->inocache = ic;
969
970 ret = jffs2_do_read_inode_internal(c, f, &n);
971 if (!ret) {
972 up(&f->sem);
973 jffs2_do_clear_inode(c, f);
974 }
975 kfree (f);
976 return ret;
977}
978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
980{
981 struct jffs2_full_dirent *fd, *fds;
982 int deleted;
983
KaiGai Koheic7afb0f2006-07-02 15:13:46 +0100984 jffs2_clear_acl(f);
KaiGai Kohei355ed4e2006-06-24 09:15:36 +0900985 jffs2_xattr_delete_inode(c, f->inocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 down(&f->sem);
987 deleted = f->inocache && !f->inocache->nlink;
988
David Woodhouse67e345d2005-02-27 23:01:36 +0000989 if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
990 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
991
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 if (f->metadata) {
993 if (deleted)
994 jffs2_mark_node_obsolete(c, f->metadata->raw);
995 jffs2_free_full_dnode(f->metadata);
996 }
997
998 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
999
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001000 if (f->target) {
1001 kfree(f->target);
1002 f->target = NULL;
1003 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001004
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +01001005 fds = f->dents;
1006 while(fds) {
1007 fd = fds;
1008 fds = fd->next;
1009 jffs2_free_full_dirent(fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 }
1011
David Woodhouse67e345d2005-02-27 23:01:36 +00001012 if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
David Woodhouse67e345d2005-02-27 23:01:36 +00001014 if (f->inocache->nodes == (void *)f->inocache)
1015 jffs2_del_ino_cache(c, f->inocache);
1016 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
1018 up(&f->sem);
1019}