blob: 1298848336b8139e099eb51b757e34b610e9835d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Thomas Gleixner182ec4e2005-11-07 11:16:07 +000010 * $Id: readinode.c,v 1.143 2005/11/07 11:14:41 gleixner Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 */
13
14#include <linux/kernel.h>
Andrew Lunn737b7662005-07-30 16:29:30 +010015#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/slab.h>
17#include <linux/fs.h>
18#include <linux/crc32.h>
19#include <linux/pagemap.h>
20#include <linux/mtd/mtd.h>
21#include <linux/compiler.h>
22#include "nodelist.h"
23
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010024/*
25 * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010026 * order of increasing version.
27 */
28static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010030 struct rb_node **p = &list->rb_node;
31 struct rb_node * parent = NULL;
32 struct jffs2_tmp_dnode_info *this;
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010034 while (*p) {
35 parent = *p;
36 this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010038 /* There may actually be a collision here, but it doesn't
39 actually matter. As long as the two nodes with the same
40 version are together, it's all fine. */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010041 if (tn->version > this->version)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010042 p = &(*p)->rb_left;
43 else
44 p = &(*p)->rb_right;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010045 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010047 rb_link_node(&tn->rb, parent, p);
48 rb_insert_color(&tn->rb, list);
49}
50
51static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
52{
53 struct rb_node *this;
54 struct jffs2_tmp_dnode_info *tn;
55
56 this = list->rb_node;
57
58 /* Now at bottom of tree */
59 while (this) {
60 if (this->rb_left)
61 this = this->rb_left;
62 else if (this->rb_right)
63 this = this->rb_right;
64 else {
65 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
66 jffs2_free_full_dnode(tn->fn);
67 jffs2_free_tmp_dnode_info(tn);
68
David Woodhouse21f1d5f2006-04-21 13:17:57 +010069 this = rb_parent(this);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010070 if (!this)
71 break;
72
73 if (this->rb_left == &tn->rb)
74 this->rb_left = NULL;
75 else if (this->rb_right == &tn->rb)
76 this->rb_right = NULL;
77 else BUG();
78 }
79 }
80 list->rb_node = NULL;
81}
82
83static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
84{
85 struct jffs2_full_dirent *next;
86
87 while (fd) {
88 next = fd->next;
89 jffs2_free_full_dirent(fd);
90 fd = next;
91 }
92}
93
94/* Returns first valid node after 'ref'. May return 'ref' */
95static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
96{
97 while (ref && ref->next_in_ino) {
98 if (!ref_obsolete(ref))
99 return ref;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100100 dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100101 ref = ref->next_in_ino;
102 }
103 return NULL;
104}
105
106/*
107 * Helper function for jffs2_get_inode_nodes().
108 * It is called every time an directory entry node is found.
109 *
110 * Returns: 0 on succes;
111 * 1 if the node should be marked obsolete;
112 * negative error code on failure.
113 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100114static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
Atsushi Nemoto0ef675d2006-03-09 17:33:38 -0800115 struct jffs2_raw_dirent *rd, size_t read, struct jffs2_full_dirent **fdp,
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100116 uint32_t *latest_mctime, uint32_t *mctime_ver)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100117{
118 struct jffs2_full_dirent *fd;
David Woodhouse1046d882006-06-18 22:44:21 +0100119 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000120
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100121 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
122 BUG_ON(ref_obsolete(ref));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000123
David Woodhouse1046d882006-06-18 22:44:21 +0100124 crc = crc32(0, rd, sizeof(*rd) - 8);
125 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
126 JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n",
127 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100128 return 1;
129 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000130
David Woodhouse1046d882006-06-18 22:44:21 +0100131 /* If we've never checked the CRCs on this node, check them now */
132 if (ref_flags(ref) == REF_UNCHECKED) {
133 struct jffs2_eraseblock *jeb;
134 int len;
135
136 /* Sanity check */
137 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
138 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
139 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
140 return 1;
141 }
142
143 jeb = &c->blocks[ref->flash_offset / c->sector_size];
144 len = ref_totlen(c, jeb, ref);
145
146 spin_lock(&c->erase_completion_lock);
147 jeb->used_size += len;
148 jeb->unchecked_size -= len;
149 c->used_size += len;
150 c->unchecked_size -= len;
151 ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
152 spin_unlock(&c->erase_completion_lock);
153 }
154
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100155 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
156 if (unlikely(!fd))
157 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100159 fd->raw = ref;
160 fd->version = je32_to_cpu(rd->version);
161 fd->ino = je32_to_cpu(rd->ino);
162 fd->type = rd->type;
163
164 /* Pick out the mctime of the latest dirent */
Artem B. Bityutskiy3a69e0c2005-08-17 14:46:26 +0100165 if(fd->version > *mctime_ver && je32_to_cpu(rd->mctime)) {
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100166 *mctime_ver = fd->version;
167 *latest_mctime = je32_to_cpu(rd->mctime);
168 }
169
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000170 /*
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100171 * Copy as much of the name as possible from the raw
172 * dirent we've already read from the flash.
173 */
174 if (read > sizeof(*rd))
175 memcpy(&fd->name[0], &rd->name[0],
176 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000177
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100178 /* Do we need to copy any more of the name directly from the flash? */
179 if (rd->nsize + sizeof(*rd) > read) {
180 /* FIXME: point() */
181 int err;
182 int already = read - sizeof(*rd);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000183
184 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100185 rd->nsize - already, &read, &fd->name[already]);
186 if (unlikely(read != rd->nsize - already) && likely(!err))
187 return -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000188
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100189 if (unlikely(err)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100190 JFFS2_ERROR("read remainder of name: error %d\n", err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100191 jffs2_free_full_dirent(fd);
192 return -EIO;
193 }
194 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000195
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100196 fd->nhash = full_name_hash(fd->name, rd->nsize);
197 fd->next = NULL;
198 fd->name[rd->nsize] = '\0';
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000199
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100200 /*
201 * Wheee. We now have a complete jffs2_full_dirent structure, with
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000202 * the name in it and everything. Link it into the list
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100203 */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100204 jffs2_add_fd_to_list(c, fd, fdp);
205
206 return 0;
207}
208
209/*
210 * Helper function for jffs2_get_inode_nodes().
211 * It is called every time an inode node is found.
212 *
213 * Returns: 0 on succes;
214 * 1 if the node should be marked obsolete;
215 * negative error code on failure.
216 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100217static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
218 struct jffs2_raw_inode *rd, struct rb_root *tnp, int rdlen,
219 uint32_t *latest_mctime, uint32_t *mctime_ver)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100220{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100221 struct jffs2_tmp_dnode_info *tn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100222 uint32_t len, csize;
223 int ret = 1;
David Woodhouse1046d882006-06-18 22:44:21 +0100224 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000225
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100226 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
227 BUG_ON(ref_obsolete(ref));
228
David Woodhouse1046d882006-06-18 22:44:21 +0100229 crc = crc32(0, rd, sizeof(*rd) - 8);
230 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
231 JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n",
232 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
233 return 1;
234 }
235
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100236 tn = jffs2_alloc_tmp_dnode_info();
237 if (!tn) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400238 JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100239 return -ENOMEM;
240 }
241
242 tn->partial_crc = 0;
243 csize = je32_to_cpu(rd->csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000244
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100245 /* If we've never checked the CRCs on this node, check them now */
246 if (ref_flags(ref) == REF_UNCHECKED) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000247
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100248 /* Sanity checks */
249 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
250 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100251 JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
Andrew Lunn737b7662005-07-30 16:29:30 +0100252 jffs2_dbg_dump_node(c, ref_offset(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100253 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100254 }
255
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100256 if (jffs2_is_writebuffered(c) && csize != 0) {
257 /* At this point we are supposed to check the data CRC
258 * of our unchecked node. But thus far, we do not
259 * know whether the node is valid or obsolete. To
260 * figure this out, we need to walk all the nodes of
261 * the inode and build the inode fragtree. We don't
262 * want to spend time checking data of nodes which may
263 * later be found to be obsolete. So we put off the full
264 * data CRC checking until we have read all the inode
265 * nodes and have started building the fragtree.
266 *
267 * The fragtree is being built starting with nodes
268 * having the highest version number, so we'll be able
269 * to detect whether a node is valid (i.e., it is not
270 * overlapped by a node with higher version) or not.
271 * And we'll be able to check only those nodes, which
272 * are not obsolete.
273 *
274 * Of course, this optimization only makes sense in case
275 * of NAND flashes (or other flashes whith
276 * !jffs2_can_mark_obsolete()), since on NOR flashes
277 * nodes are marked obsolete physically.
278 *
279 * Since NAND flashes (or other flashes with
280 * jffs2_is_writebuffered(c)) are anyway read by
281 * fractions of c->wbuf_pagesize, and we have just read
282 * the node header, it is likely that the starting part
283 * of the node data is also read when we read the
284 * header. So we don't mind to check the CRC of the
285 * starting part of the data of the node now, and check
286 * the second part later (in jffs2_check_node_data()).
287 * Of course, we will not need to re-read and re-check
288 * the NAND page which we have just read. This is why we
289 * read the whole NAND page at jffs2_get_inode_nodes(),
290 * while we needed only the node header.
291 */
292 unsigned char *buf;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100293
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100294 /* 'buf' will point to the start of data */
295 buf = (unsigned char *)rd + sizeof(*rd);
296 /* len will be the read data length */
297 len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100298 tn->partial_crc = crc32(0, buf, len);
299
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100300 dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100301
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100302 /* If we actually calculated the whole data CRC
303 * and it is wrong, drop the node. */
Artem B. Bityutskiy3c091332005-08-04 12:40:02 +0100304 if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100305 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
306 ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100307 goto free_out;
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100308 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100309
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100310 } else if (csize == 0) {
311 /*
312 * We checked the header CRC. If the node has no data, adjust
313 * the space accounting now. For other nodes this will be done
314 * later either when the node is marked obsolete or when its
315 * data is checked.
316 */
317 struct jffs2_eraseblock *jeb;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100318
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100319 dbg_readinode("the node has no data.\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100320 jeb = &c->blocks[ref->flash_offset / c->sector_size];
321 len = ref_totlen(c, jeb, ref);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100322
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100323 spin_lock(&c->erase_completion_lock);
324 jeb->used_size += len;
325 jeb->unchecked_size -= len;
326 c->used_size += len;
327 c->unchecked_size -= len;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100328 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100329 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100330 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100331 }
332
333 tn->fn = jffs2_alloc_full_dnode();
334 if (!tn->fn) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100335 JFFS2_ERROR("alloc fn failed\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100336 ret = -ENOMEM;
337 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100338 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000339
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100340 tn->version = je32_to_cpu(rd->version);
341 tn->fn->ofs = je32_to_cpu(rd->offset);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100342 tn->data_crc = je32_to_cpu(rd->data_crc);
343 tn->csize = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100344 tn->fn->raw = ref;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000345
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100346 /* There was a bug where we wrote hole nodes out with
347 csize/dsize swapped. Deal with it */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100348 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
349 tn->fn->size = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100350 else // normal case...
351 tn->fn->size = je32_to_cpu(rd->dsize);
352
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100353 dbg_readinode("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100354 ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000355
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100356 jffs2_add_tn_to_tree(tn, tnp);
357
358 return 0;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100359
360free_out:
361 jffs2_free_tmp_dnode_info(tn);
362 return ret;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100363}
364
365/*
366 * Helper function for jffs2_get_inode_nodes().
367 * It is called every time an unknown node is found.
368 *
David Woodhouse3877f0b2006-06-18 00:05:26 +0100369 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100370 * 1 if the node should be marked obsolete;
371 * negative error code on failure.
372 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100373static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100374{
375 /* We don't mark unknown nodes as REF_UNCHECKED */
David Woodhousec7258a42007-03-09 11:44:00 +0000376 if (ref_flags(ref) == REF_UNCHECKED) {
377 JFFS2_ERROR("REF_UNCHECKED but unknown node at %#08x\n",
378 ref_offset(ref));
379 JFFS2_ERROR("Node is {%04x,%04x,%08x,%08x}. Please report this error.\n",
380 je16_to_cpu(un->magic), je16_to_cpu(un->nodetype),
381 je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc));
382 return 1;
383 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000384
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100385 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
386
David Woodhouse3877f0b2006-06-18 00:05:26 +0100387 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
388
389 case JFFS2_FEATURE_INCOMPAT:
390 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
391 je16_to_cpu(un->nodetype), ref_offset(ref));
392 /* EEP */
393 BUG();
394 break;
395
396 case JFFS2_FEATURE_ROCOMPAT:
397 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
398 je16_to_cpu(un->nodetype), ref_offset(ref));
399 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
400 break;
401
402 case JFFS2_FEATURE_RWCOMPAT_COPY:
403 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
404 je16_to_cpu(un->nodetype), ref_offset(ref));
405 break;
406
407 case JFFS2_FEATURE_RWCOMPAT_DELETE:
408 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
409 je16_to_cpu(un->nodetype), ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100410 return 1;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100411 }
412
413 return 0;
414}
415
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100416/*
417 * Helper function for jffs2_get_inode_nodes().
418 * The function detects whether more data should be read and reads it if yes.
419 *
420 * Returns: 0 on succes;
421 * negative error code on failure.
422 */
423static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300424 int needed_len, int *rdlen, unsigned char *buf)
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100425{
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300426 int err, to_read = needed_len - *rdlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100427 size_t retlen;
428 uint32_t offs;
429
430 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300431 int rem = to_read % c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100432
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300433 if (rem)
434 to_read += c->wbuf_pagesize - rem;
435 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100436
437 /* We need to read more data */
438 offs = ref_offset(ref) + *rdlen;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000439
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300440 dbg_readinode("read more %d bytes\n", to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100441
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300442 err = jffs2_flash_read(c, offs, to_read, &retlen, buf + *rdlen);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100443 if (err) {
444 JFFS2_ERROR("can not read %d bytes from 0x%08x, "
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300445 "error code: %d.\n", to_read, offs, err);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100446 return err;
447 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000448
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300449 if (retlen < to_read) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400450 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n",
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300451 offs, retlen, to_read);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100452 return -EIO;
453 }
454
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300455 *rdlen += to_read;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100456 return 0;
457}
458
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100459/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
460 with this ino, returning the former in order of version */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100461static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
462 struct rb_root *tnp, struct jffs2_full_dirent **fdp,
463 uint32_t *highest_version, uint32_t *latest_mctime,
464 uint32_t *mctime_ver)
465{
466 struct jffs2_raw_node_ref *ref, *valid_ref;
467 struct rb_root ret_tn = RB_ROOT;
468 struct jffs2_full_dirent *ret_fd = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100469 unsigned char *buf = NULL;
470 union jffs2_node_union *node;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100471 size_t retlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100472 int len, err;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100473
474 *mctime_ver = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000475
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100476 dbg_readinode("ino #%u\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100477
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100478 /* FIXME: in case of NOR and available ->point() this
479 * needs to be fixed. */
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300480 len = sizeof(union jffs2_node_union) + c->wbuf_pagesize;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100481 buf = kmalloc(len, GFP_KERNEL);
482 if (!buf)
483 return -ENOMEM;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000484
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100485 spin_lock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100486 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100487 if (!valid_ref && f->inocache->ino != 1)
488 JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100489 while (valid_ref) {
490 /* We can hold a pointer to a non-obsolete node without the spinlock,
491 but _obsolete_ nodes may disappear at any time, if the block
492 they're in gets erased. So if we mark 'ref' obsolete while we're
493 not holding the lock, it can go away immediately. For that reason,
494 we find the next valid node first, before processing 'ref'.
495 */
496 ref = valid_ref;
497 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
498 spin_unlock(&c->erase_completion_lock);
499
500 cond_resched();
501
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100502 /*
503 * At this point we don't know the type of the node we're going
504 * to read, so we do not know the size of its header. In order
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300505 * to minimize the amount of flash IO we assume the header is
506 * of size = JFFS2_MIN_NODE_HEADER.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100507 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300508 len = JFFS2_MIN_NODE_HEADER;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100509 if (jffs2_is_writebuffered(c)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300510 int end, rem;
511
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000512 /*
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300513 * We are about to read JFFS2_MIN_NODE_HEADER bytes,
514 * but this flash has some minimal I/O unit. It is
515 * possible that we'll need to read more soon, so read
516 * up to the next min. I/O unit, in order not to
517 * re-read the same min. I/O unit twice.
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100518 */
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300519 end = ref_offset(ref) + len;
520 rem = end % c->wbuf_pagesize;
521 if (rem)
522 end += c->wbuf_pagesize - rem;
523 len = end - ref_offset(ref);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100524 }
525
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100526 dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100527
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100528 /* FIXME: point() */
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300529 err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100530 if (err) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100531 JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100532 goto free_out;
533 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000534
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100535 if (retlen < len) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400536 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100537 err = -EIO;
538 goto free_out;
539 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000540
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300541 node = (union jffs2_node_union *)buf;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000542
David Woodhouse3877f0b2006-06-18 00:05:26 +0100543 /* No need to mask in the valid bit; it shouldn't be invalid */
544 if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) {
545 JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n",
546 ref_offset(ref), je16_to_cpu(node->u.magic),
547 je16_to_cpu(node->u.nodetype),
548 je32_to_cpu(node->u.totlen),
549 je32_to_cpu(node->u.hdr_crc));
550 jffs2_dbg_dump_node(c, ref_offset(ref));
551 jffs2_mark_node_obsolete(c, ref);
552 goto cont;
553 }
David Woodhousec7258a42007-03-09 11:44:00 +0000554 /* Due to poor choice of crc32 seed, an all-zero node will have a correct CRC */
555 if (!je32_to_cpu(node->u.hdr_crc) && !je16_to_cpu(node->u.nodetype) &&
556 !je16_to_cpu(node->u.magic) && !je32_to_cpu(node->u.totlen)) {
557 JFFS2_NOTICE("All zero node header at %#08x.\n", ref_offset(ref));
558 jffs2_mark_node_obsolete(c, ref);
559 goto cont;
560 }
David Woodhouse3877f0b2006-06-18 00:05:26 +0100561
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100562 switch (je16_to_cpu(node->u.nodetype)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000563
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100564 case JFFS2_NODETYPE_DIRENT:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100565
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100566 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300567 err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100568 if (unlikely(err))
569 goto free_out;
570 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000571
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100572 err = read_direntry(c, ref, &node->d, retlen, &ret_fd, latest_mctime, mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100573 if (err == 1) {
574 jffs2_mark_node_obsolete(c, ref);
575 break;
576 } else if (unlikely(err))
577 goto free_out;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000578
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100579 if (je32_to_cpu(node->d.version) > *highest_version)
580 *highest_version = je32_to_cpu(node->d.version);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100581
582 break;
583
584 case JFFS2_NODETYPE_INODE:
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000585
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100586 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300587 err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100588 if (unlikely(err))
589 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100590 }
591
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100592 err = read_dnode(c, ref, &node->i, &ret_tn, len, latest_mctime, mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100593 if (err == 1) {
594 jffs2_mark_node_obsolete(c, ref);
595 break;
596 } else if (unlikely(err))
597 goto free_out;
598
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100599 if (je32_to_cpu(node->i.version) > *highest_version)
600 *highest_version = je32_to_cpu(node->i.version);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 break;
603
604 default:
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100605 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) {
Artem Bityutskiy10731f82007-04-04 13:59:11 +0300606 err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100607 if (unlikely(err))
608 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100609 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000610
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100611 err = read_unknown(c, ref, &node->u);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100612 if (err == 1) {
613 jffs2_mark_node_obsolete(c, ref);
614 break;
615 } else if (unlikely(err))
616 goto free_out;
617
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 }
David Woodhouse3877f0b2006-06-18 00:05:26 +0100619 cont:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100620 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100622
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100623 spin_unlock(&c->erase_completion_lock);
624 *tnp = ret_tn;
625 *fdp = ret_fd;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100626 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100628 dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100629 f->inocache->ino, *highest_version, *latest_mctime, *mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100630 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100632 free_out:
633 jffs2_free_tmp_dnode_info_list(&ret_tn);
634 jffs2_free_full_dirent_list(ret_fd);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100635 kfree(buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100636 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637}
638
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000639static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 struct jffs2_inode_info *f,
641 struct jffs2_raw_inode *latest_node)
642{
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100643 struct jffs2_tmp_dnode_info *tn;
David Woodhouse9dee7502005-07-05 22:03:10 +0100644 struct rb_root tn_list;
645 struct rb_node *rb, *repl_rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 struct jffs2_full_dirent *fd_list;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100647 struct jffs2_full_dnode *fn, *first_fn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 uint32_t crc;
649 uint32_t latest_mctime, mctime_ver;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 size_t retlen;
651 int ret;
652
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100653 dbg_readinode("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
655 /* Grab all nodes relevant to this ino */
656 ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver);
657
658 if (ret) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100659 JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 if (f->inocache->state == INO_STATE_READING)
661 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
662 return ret;
663 }
664 f->dents = fd_list;
665
David Woodhouse9dee7502005-07-05 22:03:10 +0100666 rb = rb_first(&tn_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
David Woodhouse9dee7502005-07-05 22:03:10 +0100668 while (rb) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100669 cond_resched();
David Woodhouse9dee7502005-07-05 22:03:10 +0100670 tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 fn = tn->fn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100672 ret = 1;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100673 dbg_readinode("consider node ver %u, phys offset "
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100674 "%#08x(%d), range %u-%u.\n", tn->version,
675 ref_offset(fn->raw), ref_flags(fn->raw),
676 fn->ofs, fn->ofs + fn->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678 if (fn->size) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100679 ret = jffs2_add_older_frag_to_fragtree(c, f, tn);
680 /* TODO: the error code isn't checked, check it */
681 jffs2_dbg_fragtree_paranoia_check_nolock(f);
682 BUG_ON(ret < 0);
683 if (!first_fn && ret == 0)
684 first_fn = fn;
685 } else if (!first_fn) {
686 first_fn = fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 f->metadata = fn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100688 ret = 0; /* Prevent freeing the metadata update node */
689 } else
690 jffs2_mark_node_obsolete(c, fn->raw);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000691
David Woodhouse9dee7502005-07-05 22:03:10 +0100692 BUG_ON(rb->rb_left);
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100693 if (rb_parent(rb) && rb_parent(rb)->rb_left == rb) {
David Woodhouse9dee7502005-07-05 22:03:10 +0100694 /* We were then left-hand child of our parent. We need
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100695 * to move our own right-hand child into our place. */
David Woodhouse9dee7502005-07-05 22:03:10 +0100696 repl_rb = rb->rb_right;
697 if (repl_rb)
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100698 rb_set_parent(repl_rb, rb_parent(rb));
David Woodhouse9dee7502005-07-05 22:03:10 +0100699 } else
700 repl_rb = NULL;
701
702 rb = rb_next(rb);
703
704 /* Remove the spent tn from the tree; don't bother rebalancing
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100705 * but put our right-hand child in our own place. */
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100706 if (rb_parent(&tn->rb)) {
707 if (rb_parent(&tn->rb)->rb_left == &tn->rb)
708 rb_parent(&tn->rb)->rb_left = repl_rb;
709 else if (rb_parent(&tn->rb)->rb_right == &tn->rb)
710 rb_parent(&tn->rb)->rb_right = repl_rb;
David Woodhouse9dee7502005-07-05 22:03:10 +0100711 else BUG();
712 } else if (tn->rb.rb_right)
David Woodhouse21f1d5f2006-04-21 13:17:57 +0100713 rb_set_parent(tn->rb.rb_right, NULL);
David Woodhouse9dee7502005-07-05 22:03:10 +0100714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 jffs2_free_tmp_dnode_info(tn);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100716 if (ret) {
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100717 dbg_readinode("delete dnode %u-%u.\n",
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100718 fn->ofs, fn->ofs + fn->size);
719 jffs2_free_full_dnode(fn);
720 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 }
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100722 jffs2_dbg_fragtree_paranoia_check_nolock(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100724 BUG_ON(first_fn && ref_obsolete(first_fn->raw));
725
726 fn = first_fn;
727 if (unlikely(!first_fn)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 /* No data nodes for this inode. */
729 if (f->inocache->ino != 1) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100730 JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 if (!fd_list) {
732 if (f->inocache->state == INO_STATE_READING)
733 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
734 return -EIO;
735 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100736 JFFS2_NOTICE("but it has children so we fake some modes for it\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 }
738 latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
739 latest_node->version = cpu_to_je32(0);
740 latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
741 latest_node->isize = cpu_to_je32(0);
742 latest_node->gid = cpu_to_je16(0);
743 latest_node->uid = cpu_to_je16(0);
744 if (f->inocache->state == INO_STATE_READING)
745 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
746 return 0;
747 }
748
749 ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node);
750 if (ret || retlen != sizeof(*latest_node)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100751 JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
752 ret, retlen, sizeof(*latest_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 /* FIXME: If this fails, there seems to be a memory leak. Find it. */
754 up(&f->sem);
755 jffs2_do_clear_inode(c, f);
756 return ret?ret:-EIO;
757 }
758
759 crc = crc32(0, latest_node, sizeof(*latest_node)-8);
760 if (crc != je32_to_cpu(latest_node->node_crc)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100761 JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
762 f->inocache->ino, ref_offset(fn->raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 up(&f->sem);
764 jffs2_do_clear_inode(c, f);
765 return -EIO;
766 }
767
768 switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
769 case S_IFDIR:
770 if (mctime_ver > je32_to_cpu(latest_node->version)) {
771 /* The times in the latest_node are actually older than
772 mctime in the latest dirent. Cheat. */
773 latest_node->ctime = latest_node->mtime = cpu_to_je32(latest_mctime);
774 }
775 break;
776
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 case S_IFREG:
779 /* If it was a regular file, truncate it to the latest node's isize */
Artem B. Bityutskiyf302cd02005-07-24 16:29:59 +0100780 jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 break;
782
783 case S_IFLNK:
784 /* Hack to work around broken isize in old symlink code.
785 Remove this when dwmw2 comes to his senses and stops
786 symlinks from being an entirely gratuitous special
787 case. */
788 if (!je32_to_cpu(latest_node->isize))
789 latest_node->isize = latest_node->dsize;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000790
791 if (f->inocache->state != INO_STATE_CHECKING) {
792 /* Symlink's inode data is the target path. Read it and
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100793 * keep in RAM to facilitate quick follow symlink
794 * operation. */
795 f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
796 if (!f->target) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100797 JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000798 up(&f->sem);
799 jffs2_do_clear_inode(c, f);
800 return -ENOMEM;
801 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000802
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000803 ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node),
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100804 je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000805
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000806 if (ret || retlen != je32_to_cpu(latest_node->csize)) {
807 if (retlen != je32_to_cpu(latest_node->csize))
808 ret = -EIO;
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100809 kfree(f->target);
810 f->target = NULL;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000811 up(&f->sem);
812 jffs2_do_clear_inode(c, f);
813 return -ret;
814 }
815
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100816 f->target[je32_to_cpu(latest_node->csize)] = '\0';
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100817 dbg_readinode("symlink's target '%s' cached\n", f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000818 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000819
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 /* fall through... */
821
822 case S_IFBLK:
823 case S_IFCHR:
824 /* Certain inode types should have only one data node, and it's
825 kept as the metadata node */
826 if (f->metadata) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100827 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 f->inocache->ino, jemode_to_cpu(latest_node->mode));
829 up(&f->sem);
830 jffs2_do_clear_inode(c, f);
831 return -EIO;
832 }
833 if (!frag_first(&f->fragtree)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100834 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 f->inocache->ino, jemode_to_cpu(latest_node->mode));
836 up(&f->sem);
837 jffs2_do_clear_inode(c, f);
838 return -EIO;
839 }
840 /* ASSERT: f->fraglist != NULL */
841 if (frag_next(frag_first(&f->fragtree))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100842 JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 f->inocache->ino, jemode_to_cpu(latest_node->mode));
844 /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
845 up(&f->sem);
846 jffs2_do_clear_inode(c, f);
847 return -EIO;
848 }
849 /* OK. We're happy */
850 f->metadata = frag_first(&f->fragtree)->node;
851 jffs2_free_node_frag(frag_first(&f->fragtree));
852 f->fragtree = RB_ROOT;
853 break;
854 }
855 if (f->inocache->state == INO_STATE_READING)
856 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
857
858 return 0;
859}
860
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100861/* Scan the list of all nodes present for this ino, build map of versions, etc. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000862int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100863 uint32_t ino, struct jffs2_raw_inode *latest_node)
864{
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100865 dbg_readinode("read inode #%u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100866
867 retry_inocache:
868 spin_lock(&c->inocache_lock);
869 f->inocache = jffs2_get_ino_cache(c, ino);
870
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100871 if (f->inocache) {
872 /* Check its state. We may need to wait before we can use it */
873 switch(f->inocache->state) {
874 case INO_STATE_UNCHECKED:
875 case INO_STATE_CHECKEDABSENT:
876 f->inocache->state = INO_STATE_READING;
877 break;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000878
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100879 case INO_STATE_CHECKING:
880 case INO_STATE_GC:
881 /* If it's in either of these states, we need
882 to wait for whoever's got it to finish and
883 put it back. */
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100884 dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100885 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
886 goto retry_inocache;
887
888 case INO_STATE_READING:
889 case INO_STATE_PRESENT:
890 /* Eep. This should never happen. It can
891 happen if Linux calls read_inode() again
892 before clear_inode() has finished though. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100893 JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100894 /* Fail. That's probably better than allowing it to succeed */
895 f->inocache = NULL;
896 break;
897
898 default:
899 BUG();
900 }
901 }
902 spin_unlock(&c->inocache_lock);
903
904 if (!f->inocache && ino == 1) {
905 /* Special case - no root inode on medium */
906 f->inocache = jffs2_alloc_inode_cache();
907 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100908 JFFS2_ERROR("cannot allocate inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100909 return -ENOMEM;
910 }
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100911 dbg_readinode("creating inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100912 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
913 f->inocache->ino = f->inocache->nlink = 1;
914 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
915 f->inocache->state = INO_STATE_READING;
916 jffs2_add_ino_cache(c, f->inocache);
917 }
918 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100919 JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100920 return -ENOENT;
921 }
922
923 return jffs2_do_read_inode_internal(c, f, latest_node);
924}
925
926int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
927{
928 struct jffs2_raw_inode n;
Yan Burman3d375d92006-12-04 15:03:01 -0800929 struct jffs2_inode_info *f = kzalloc(sizeof(*f), GFP_KERNEL);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100930 int ret;
931
932 if (!f)
933 return -ENOMEM;
934
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100935 init_MUTEX_LOCKED(&f->sem);
936 f->inocache = ic;
937
938 ret = jffs2_do_read_inode_internal(c, f, &n);
939 if (!ret) {
940 up(&f->sem);
941 jffs2_do_clear_inode(c, f);
942 }
943 kfree (f);
944 return ret;
945}
946
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
948{
949 struct jffs2_full_dirent *fd, *fds;
950 int deleted;
951
KaiGai Koheic7afb0f2006-07-02 15:13:46 +0100952 jffs2_clear_acl(f);
KaiGai Kohei355ed4e2006-06-24 09:15:36 +0900953 jffs2_xattr_delete_inode(c, f->inocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 down(&f->sem);
955 deleted = f->inocache && !f->inocache->nlink;
956
David Woodhouse67e345d2005-02-27 23:01:36 +0000957 if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
958 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
959
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 if (f->metadata) {
961 if (deleted)
962 jffs2_mark_node_obsolete(c, f->metadata->raw);
963 jffs2_free_full_dnode(f->metadata);
964 }
965
966 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
967
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100968 if (f->target) {
969 kfree(f->target);
970 f->target = NULL;
971 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000972
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100973 fds = f->dents;
974 while(fds) {
975 fd = fds;
976 fds = fd->next;
977 jffs2_free_full_dirent(fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 }
979
David Woodhouse67e345d2005-02-27 23:01:36 +0000980 if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
David Woodhouse67e345d2005-02-27 23:01:36 +0000982 if (f->inocache->nodes == (void *)f->inocache)
983 jffs2_del_ino_cache(c, f->inocache);
984 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
986 up(&f->sem);
987}