blob: 5351b34d5419785c1b9f746c441bc394a1b5957a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Thomas Gleixner182ec4e2005-11-07 11:16:07 +000010 * $Id: readinode.c,v 1.143 2005/11/07 11:14:41 gleixner Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 */
13
14#include <linux/kernel.h>
Andrew Lunn737b7662005-07-30 16:29:30 +010015#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/slab.h>
17#include <linux/fs.h>
18#include <linux/crc32.h>
19#include <linux/pagemap.h>
20#include <linux/mtd/mtd.h>
21#include <linux/compiler.h>
22#include "nodelist.h"
23
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010024/*
25 * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010026 * order of increasing version.
27 */
28static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010030 struct rb_node **p = &list->rb_node;
31 struct rb_node * parent = NULL;
32 struct jffs2_tmp_dnode_info *this;
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010034 while (*p) {
35 parent = *p;
36 this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010038 /* There may actually be a collision here, but it doesn't
39 actually matter. As long as the two nodes with the same
40 version are together, it's all fine. */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010041 if (tn->version > this->version)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010042 p = &(*p)->rb_left;
43 else
44 p = &(*p)->rb_right;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010045 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010047 rb_link_node(&tn->rb, parent, p);
48 rb_insert_color(&tn->rb, list);
49}
50
51static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
52{
53 struct rb_node *this;
54 struct jffs2_tmp_dnode_info *tn;
55
56 this = list->rb_node;
57
58 /* Now at bottom of tree */
59 while (this) {
60 if (this->rb_left)
61 this = this->rb_left;
62 else if (this->rb_right)
63 this = this->rb_right;
64 else {
65 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
66 jffs2_free_full_dnode(tn->fn);
67 jffs2_free_tmp_dnode_info(tn);
68
69 this = this->rb_parent;
70 if (!this)
71 break;
72
73 if (this->rb_left == &tn->rb)
74 this->rb_left = NULL;
75 else if (this->rb_right == &tn->rb)
76 this->rb_right = NULL;
77 else BUG();
78 }
79 }
80 list->rb_node = NULL;
81}
82
83static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
84{
85 struct jffs2_full_dirent *next;
86
87 while (fd) {
88 next = fd->next;
89 jffs2_free_full_dirent(fd);
90 fd = next;
91 }
92}
93
94/* Returns first valid node after 'ref'. May return 'ref' */
95static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
96{
97 while (ref && ref->next_in_ino) {
98 if (!ref_obsolete(ref))
99 return ref;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100100 dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100101 ref = ref->next_in_ino;
102 }
103 return NULL;
104}
105
106/*
107 * Helper function for jffs2_get_inode_nodes().
108 * It is called every time an directory entry node is found.
109 *
110 * Returns: 0 on succes;
111 * 1 if the node should be marked obsolete;
112 * negative error code on failure.
113 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100114static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
Atsushi Nemoto0ef675d2006-03-09 17:33:38 -0800115 struct jffs2_raw_dirent *rd, size_t read, struct jffs2_full_dirent **fdp,
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100116 uint32_t *latest_mctime, uint32_t *mctime_ver)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100117{
118 struct jffs2_full_dirent *fd;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000119
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100120 /* The direntry nodes are checked during the flash scanning */
121 BUG_ON(ref_flags(ref) == REF_UNCHECKED);
122 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
123 BUG_ON(ref_obsolete(ref));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000124
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100125 /* Sanity check */
126 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100127 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100128 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
129 return 1;
130 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000131
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100132 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
133 if (unlikely(!fd))
134 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100136 fd->raw = ref;
137 fd->version = je32_to_cpu(rd->version);
138 fd->ino = je32_to_cpu(rd->ino);
139 fd->type = rd->type;
140
141 /* Pick out the mctime of the latest dirent */
Artem B. Bityutskiy3a69e0c2005-08-17 14:46:26 +0100142 if(fd->version > *mctime_ver && je32_to_cpu(rd->mctime)) {
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100143 *mctime_ver = fd->version;
144 *latest_mctime = je32_to_cpu(rd->mctime);
145 }
146
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000147 /*
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100148 * Copy as much of the name as possible from the raw
149 * dirent we've already read from the flash.
150 */
151 if (read > sizeof(*rd))
152 memcpy(&fd->name[0], &rd->name[0],
153 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000154
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100155 /* Do we need to copy any more of the name directly from the flash? */
156 if (rd->nsize + sizeof(*rd) > read) {
157 /* FIXME: point() */
158 int err;
159 int already = read - sizeof(*rd);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000160
161 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100162 rd->nsize - already, &read, &fd->name[already]);
163 if (unlikely(read != rd->nsize - already) && likely(!err))
164 return -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000165
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100166 if (unlikely(err)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100167 JFFS2_ERROR("read remainder of name: error %d\n", err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100168 jffs2_free_full_dirent(fd);
169 return -EIO;
170 }
171 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000172
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100173 fd->nhash = full_name_hash(fd->name, rd->nsize);
174 fd->next = NULL;
175 fd->name[rd->nsize] = '\0';
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000176
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100177 /*
178 * Wheee. We now have a complete jffs2_full_dirent structure, with
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000179 * the name in it and everything. Link it into the list
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100180 */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100181 jffs2_add_fd_to_list(c, fd, fdp);
182
183 return 0;
184}
185
186/*
187 * Helper function for jffs2_get_inode_nodes().
188 * It is called every time an inode node is found.
189 *
190 * Returns: 0 on succes;
191 * 1 if the node should be marked obsolete;
192 * negative error code on failure.
193 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100194static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
195 struct jffs2_raw_inode *rd, struct rb_root *tnp, int rdlen,
196 uint32_t *latest_mctime, uint32_t *mctime_ver)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100197{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100198 struct jffs2_tmp_dnode_info *tn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100199 uint32_t len, csize;
200 int ret = 1;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000201
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100202 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
203 BUG_ON(ref_obsolete(ref));
204
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100205 tn = jffs2_alloc_tmp_dnode_info();
206 if (!tn) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400207 JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100208 return -ENOMEM;
209 }
210
211 tn->partial_crc = 0;
212 csize = je32_to_cpu(rd->csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000213
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100214 /* If we've never checked the CRCs on this node, check them now */
215 if (ref_flags(ref) == REF_UNCHECKED) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100216 uint32_t crc;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100217
218 crc = crc32(0, rd, sizeof(*rd) - 8);
219 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100220 JFFS2_NOTICE("header CRC failed on node at %#08x: read %#08x, calculated %#08x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100221 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100222 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100223 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000224
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100225 /* Sanity checks */
226 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
227 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100228 JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
Andrew Lunn737b7662005-07-30 16:29:30 +0100229 jffs2_dbg_dump_node(c, ref_offset(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100230 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100231 }
232
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100233 if (jffs2_is_writebuffered(c) && csize != 0) {
234 /* At this point we are supposed to check the data CRC
235 * of our unchecked node. But thus far, we do not
236 * know whether the node is valid or obsolete. To
237 * figure this out, we need to walk all the nodes of
238 * the inode and build the inode fragtree. We don't
239 * want to spend time checking data of nodes which may
240 * later be found to be obsolete. So we put off the full
241 * data CRC checking until we have read all the inode
242 * nodes and have started building the fragtree.
243 *
244 * The fragtree is being built starting with nodes
245 * having the highest version number, so we'll be able
246 * to detect whether a node is valid (i.e., it is not
247 * overlapped by a node with higher version) or not.
248 * And we'll be able to check only those nodes, which
249 * are not obsolete.
250 *
251 * Of course, this optimization only makes sense in case
252 * of NAND flashes (or other flashes whith
253 * !jffs2_can_mark_obsolete()), since on NOR flashes
254 * nodes are marked obsolete physically.
255 *
256 * Since NAND flashes (or other flashes with
257 * jffs2_is_writebuffered(c)) are anyway read by
258 * fractions of c->wbuf_pagesize, and we have just read
259 * the node header, it is likely that the starting part
260 * of the node data is also read when we read the
261 * header. So we don't mind to check the CRC of the
262 * starting part of the data of the node now, and check
263 * the second part later (in jffs2_check_node_data()).
264 * Of course, we will not need to re-read and re-check
265 * the NAND page which we have just read. This is why we
266 * read the whole NAND page at jffs2_get_inode_nodes(),
267 * while we needed only the node header.
268 */
269 unsigned char *buf;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100270
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100271 /* 'buf' will point to the start of data */
272 buf = (unsigned char *)rd + sizeof(*rd);
273 /* len will be the read data length */
274 len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100275 tn->partial_crc = crc32(0, buf, len);
276
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100277 dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100278
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100279 /* If we actually calculated the whole data CRC
280 * and it is wrong, drop the node. */
Artem B. Bityutskiy3c091332005-08-04 12:40:02 +0100281 if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100282 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
283 ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100284 goto free_out;
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100285 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100286
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100287 } else if (csize == 0) {
288 /*
289 * We checked the header CRC. If the node has no data, adjust
290 * the space accounting now. For other nodes this will be done
291 * later either when the node is marked obsolete or when its
292 * data is checked.
293 */
294 struct jffs2_eraseblock *jeb;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100295
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100296 dbg_readinode("the node has no data.\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100297 jeb = &c->blocks[ref->flash_offset / c->sector_size];
298 len = ref_totlen(c, jeb, ref);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100299
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100300 spin_lock(&c->erase_completion_lock);
301 jeb->used_size += len;
302 jeb->unchecked_size -= len;
303 c->used_size += len;
304 c->unchecked_size -= len;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100305 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100306 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100307 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100308 }
309
310 tn->fn = jffs2_alloc_full_dnode();
311 if (!tn->fn) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100312 JFFS2_ERROR("alloc fn failed\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100313 ret = -ENOMEM;
314 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100315 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000316
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100317 tn->version = je32_to_cpu(rd->version);
318 tn->fn->ofs = je32_to_cpu(rd->offset);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100319 tn->data_crc = je32_to_cpu(rd->data_crc);
320 tn->csize = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100321 tn->fn->raw = ref;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000322
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100323 /* There was a bug where we wrote hole nodes out with
324 csize/dsize swapped. Deal with it */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100325 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
326 tn->fn->size = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100327 else // normal case...
328 tn->fn->size = je32_to_cpu(rd->dsize);
329
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100330 dbg_readinode("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100331 ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000332
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100333 jffs2_add_tn_to_tree(tn, tnp);
334
335 return 0;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100336
337free_out:
338 jffs2_free_tmp_dnode_info(tn);
339 return ret;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100340}
341
342/*
343 * Helper function for jffs2_get_inode_nodes().
344 * It is called every time an unknown node is found.
345 *
David Woodhouse3877f0b2006-06-18 00:05:26 +0100346 * Returns: 0 on success;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100347 * 1 if the node should be marked obsolete;
348 * negative error code on failure.
349 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100350static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100351{
352 /* We don't mark unknown nodes as REF_UNCHECKED */
353 BUG_ON(ref_flags(ref) == REF_UNCHECKED);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000354
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100355 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
356
David Woodhouse3877f0b2006-06-18 00:05:26 +0100357 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
358
359 case JFFS2_FEATURE_INCOMPAT:
360 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
361 je16_to_cpu(un->nodetype), ref_offset(ref));
362 /* EEP */
363 BUG();
364 break;
365
366 case JFFS2_FEATURE_ROCOMPAT:
367 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
368 je16_to_cpu(un->nodetype), ref_offset(ref));
369 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
370 break;
371
372 case JFFS2_FEATURE_RWCOMPAT_COPY:
373 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
374 je16_to_cpu(un->nodetype), ref_offset(ref));
375 break;
376
377 case JFFS2_FEATURE_RWCOMPAT_DELETE:
378 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
379 je16_to_cpu(un->nodetype), ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100380 return 1;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100381 }
382
383 return 0;
384}
385
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100386/*
387 * Helper function for jffs2_get_inode_nodes().
388 * The function detects whether more data should be read and reads it if yes.
389 *
390 * Returns: 0 on succes;
391 * negative error code on failure.
392 */
393static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
394 int right_size, int *rdlen, unsigned char *buf, unsigned char *bufstart)
395{
396 int right_len, err, len;
397 size_t retlen;
398 uint32_t offs;
399
400 if (jffs2_is_writebuffered(c)) {
401 right_len = c->wbuf_pagesize - (bufstart - buf);
402 if (right_size + (int)(bufstart - buf) > c->wbuf_pagesize)
403 right_len += c->wbuf_pagesize;
404 } else
405 right_len = right_size;
406
407 if (*rdlen == right_len)
408 return 0;
409
410 /* We need to read more data */
411 offs = ref_offset(ref) + *rdlen;
412 if (jffs2_is_writebuffered(c)) {
413 bufstart = buf + c->wbuf_pagesize;
414 len = c->wbuf_pagesize;
415 } else {
416 bufstart = buf + *rdlen;
417 len = right_size - *rdlen;
418 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000419
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100420 dbg_readinode("read more %d bytes\n", len);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100421
422 err = jffs2_flash_read(c, offs, len, &retlen, bufstart);
423 if (err) {
424 JFFS2_ERROR("can not read %d bytes from 0x%08x, "
425 "error code: %d.\n", len, offs, err);
426 return err;
427 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000428
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100429 if (retlen < len) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400430 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n",
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100431 offs, retlen, len);
432 return -EIO;
433 }
434
435 *rdlen = right_len;
436
437 return 0;
438}
439
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100440/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
441 with this ino, returning the former in order of version */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100442static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
443 struct rb_root *tnp, struct jffs2_full_dirent **fdp,
444 uint32_t *highest_version, uint32_t *latest_mctime,
445 uint32_t *mctime_ver)
446{
447 struct jffs2_raw_node_ref *ref, *valid_ref;
448 struct rb_root ret_tn = RB_ROOT;
449 struct jffs2_full_dirent *ret_fd = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100450 unsigned char *buf = NULL;
451 union jffs2_node_union *node;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100452 size_t retlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100453 int len, err;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100454
455 *mctime_ver = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000456
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100457 dbg_readinode("ino #%u\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100458
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100459 if (jffs2_is_writebuffered(c)) {
460 /*
461 * If we have the write buffer, we assume the minimal I/O unit
462 * is c->wbuf_pagesize. We implement some optimizations which in
463 * this case and we need a temporary buffer of size =
464 * 2*c->wbuf_pagesize bytes (see comments in read_dnode()).
465 * Basically, we want to read not only the node header, but the
466 * whole wbuf (NAND page in case of NAND) or 2, if the node
467 * header overlaps the border between the 2 wbufs.
468 */
469 len = 2*c->wbuf_pagesize;
470 } else {
471 /*
472 * When there is no write buffer, the size of the temporary
473 * buffer is the size of the larges node header.
474 */
475 len = sizeof(union jffs2_node_union);
476 }
477
478 /* FIXME: in case of NOR and available ->point() this
479 * needs to be fixed. */
480 buf = kmalloc(len, GFP_KERNEL);
481 if (!buf)
482 return -ENOMEM;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000483
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100484 spin_lock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100485 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100486 if (!valid_ref && f->inocache->ino != 1)
487 JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100488 while (valid_ref) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100489 unsigned char *bufstart;
490
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100491 /* We can hold a pointer to a non-obsolete node without the spinlock,
492 but _obsolete_ nodes may disappear at any time, if the block
493 they're in gets erased. So if we mark 'ref' obsolete while we're
494 not holding the lock, it can go away immediately. For that reason,
495 we find the next valid node first, before processing 'ref'.
496 */
497 ref = valid_ref;
498 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
499 spin_unlock(&c->erase_completion_lock);
500
501 cond_resched();
502
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100503 /*
504 * At this point we don't know the type of the node we're going
505 * to read, so we do not know the size of its header. In order
506 * to minimize the amount of flash IO we assume the node has
507 * size = JFFS2_MIN_NODE_HEADER.
508 */
509 if (jffs2_is_writebuffered(c)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000510 /*
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100511 * We treat 'buf' as 2 adjacent wbufs. We want to
512 * adjust bufstart such as it points to the
513 * beginning of the node within this wbuf.
514 */
515 bufstart = buf + (ref_offset(ref) % c->wbuf_pagesize);
516 /* We will read either one wbuf or 2 wbufs. */
517 len = c->wbuf_pagesize - (bufstart - buf);
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100518 if (JFFS2_MIN_NODE_HEADER + (int)(bufstart - buf) > c->wbuf_pagesize) {
519 /* The header spans the border of the first wbuf */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100520 len += c->wbuf_pagesize;
521 }
522 } else {
523 bufstart = buf;
524 len = JFFS2_MIN_NODE_HEADER;
525 }
526
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100527 dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100528
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100529 /* FIXME: point() */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100530 err = jffs2_flash_read(c, ref_offset(ref), len,
531 &retlen, bufstart);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100532 if (err) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100533 JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100534 goto free_out;
535 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000536
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100537 if (retlen < len) {
Randy Dunlapfb6a82c2006-04-11 20:12:10 -0400538 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100539 err = -EIO;
540 goto free_out;
541 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000542
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100543 node = (union jffs2_node_union *)bufstart;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000544
David Woodhouse3877f0b2006-06-18 00:05:26 +0100545 /* No need to mask in the valid bit; it shouldn't be invalid */
546 if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) {
547 JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n",
548 ref_offset(ref), je16_to_cpu(node->u.magic),
549 je16_to_cpu(node->u.nodetype),
550 je32_to_cpu(node->u.totlen),
551 je32_to_cpu(node->u.hdr_crc));
552 jffs2_dbg_dump_node(c, ref_offset(ref));
553 jffs2_mark_node_obsolete(c, ref);
554 goto cont;
555 }
556
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100557 switch (je16_to_cpu(node->u.nodetype)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000558
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100559 case JFFS2_NODETYPE_DIRENT:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100560
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100561 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) {
562 err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf, bufstart);
563 if (unlikely(err))
564 goto free_out;
565 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000566
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100567 err = read_direntry(c, ref, &node->d, retlen, &ret_fd, latest_mctime, mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100568 if (err == 1) {
569 jffs2_mark_node_obsolete(c, ref);
570 break;
571 } else if (unlikely(err))
572 goto free_out;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000573
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100574 if (je32_to_cpu(node->d.version) > *highest_version)
575 *highest_version = je32_to_cpu(node->d.version);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100576
577 break;
578
579 case JFFS2_NODETYPE_INODE:
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000580
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100581 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) {
582 err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf, bufstart);
583 if (unlikely(err))
584 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100585 }
586
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100587 err = read_dnode(c, ref, &node->i, &ret_tn, len, latest_mctime, mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100588 if (err == 1) {
589 jffs2_mark_node_obsolete(c, ref);
590 break;
591 } else if (unlikely(err))
592 goto free_out;
593
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100594 if (je32_to_cpu(node->i.version) > *highest_version)
595 *highest_version = je32_to_cpu(node->i.version);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000596
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 break;
598
599 default:
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100600 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) {
601 err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf, bufstart);
602 if (unlikely(err))
603 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100604 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000605
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100606 err = read_unknown(c, ref, &node->u);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100607 if (err == 1) {
608 jffs2_mark_node_obsolete(c, ref);
609 break;
610 } else if (unlikely(err))
611 goto free_out;
612
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 }
David Woodhouse3877f0b2006-06-18 00:05:26 +0100614 cont:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100615 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100617
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100618 spin_unlock(&c->erase_completion_lock);
619 *tnp = ret_tn;
620 *fdp = ret_fd;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100621 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100623 dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100624 f->inocache->ino, *highest_version, *latest_mctime, *mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100625 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100627 free_out:
628 jffs2_free_tmp_dnode_info_list(&ret_tn);
629 jffs2_free_full_dirent_list(ret_fd);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100630 kfree(buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100631 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632}
633
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000634static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 struct jffs2_inode_info *f,
636 struct jffs2_raw_inode *latest_node)
637{
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100638 struct jffs2_tmp_dnode_info *tn;
David Woodhouse9dee7502005-07-05 22:03:10 +0100639 struct rb_root tn_list;
640 struct rb_node *rb, *repl_rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 struct jffs2_full_dirent *fd_list;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100642 struct jffs2_full_dnode *fn, *first_fn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 uint32_t crc;
644 uint32_t latest_mctime, mctime_ver;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 size_t retlen;
646 int ret;
647
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100648 dbg_readinode("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
650 /* Grab all nodes relevant to this ino */
651 ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver);
652
653 if (ret) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100654 JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 if (f->inocache->state == INO_STATE_READING)
656 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
657 return ret;
658 }
659 f->dents = fd_list;
660
David Woodhouse9dee7502005-07-05 22:03:10 +0100661 rb = rb_first(&tn_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
David Woodhouse9dee7502005-07-05 22:03:10 +0100663 while (rb) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100664 cond_resched();
David Woodhouse9dee7502005-07-05 22:03:10 +0100665 tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 fn = tn->fn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100667 ret = 1;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100668 dbg_readinode("consider node ver %u, phys offset "
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100669 "%#08x(%d), range %u-%u.\n", tn->version,
670 ref_offset(fn->raw), ref_flags(fn->raw),
671 fn->ofs, fn->ofs + fn->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672
673 if (fn->size) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100674 ret = jffs2_add_older_frag_to_fragtree(c, f, tn);
675 /* TODO: the error code isn't checked, check it */
676 jffs2_dbg_fragtree_paranoia_check_nolock(f);
677 BUG_ON(ret < 0);
678 if (!first_fn && ret == 0)
679 first_fn = fn;
680 } else if (!first_fn) {
681 first_fn = fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 f->metadata = fn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100683 ret = 0; /* Prevent freeing the metadata update node */
684 } else
685 jffs2_mark_node_obsolete(c, fn->raw);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000686
David Woodhouse9dee7502005-07-05 22:03:10 +0100687 BUG_ON(rb->rb_left);
David Woodhouse9dee7502005-07-05 22:03:10 +0100688 if (rb->rb_parent && rb->rb_parent->rb_left == rb) {
689 /* We were then left-hand child of our parent. We need
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100690 * to move our own right-hand child into our place. */
David Woodhouse9dee7502005-07-05 22:03:10 +0100691 repl_rb = rb->rb_right;
692 if (repl_rb)
693 repl_rb->rb_parent = rb->rb_parent;
694 } else
695 repl_rb = NULL;
696
697 rb = rb_next(rb);
698
699 /* Remove the spent tn from the tree; don't bother rebalancing
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100700 * but put our right-hand child in our own place. */
David Woodhouse9dee7502005-07-05 22:03:10 +0100701 if (tn->rb.rb_parent) {
702 if (tn->rb.rb_parent->rb_left == &tn->rb)
703 tn->rb.rb_parent->rb_left = repl_rb;
704 else if (tn->rb.rb_parent->rb_right == &tn->rb)
705 tn->rb.rb_parent->rb_right = repl_rb;
706 else BUG();
707 } else if (tn->rb.rb_right)
708 tn->rb.rb_right->rb_parent = NULL;
709
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 jffs2_free_tmp_dnode_info(tn);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100711 if (ret) {
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100712 dbg_readinode("delete dnode %u-%u.\n",
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100713 fn->ofs, fn->ofs + fn->size);
714 jffs2_free_full_dnode(fn);
715 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 }
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100717 jffs2_dbg_fragtree_paranoia_check_nolock(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100719 BUG_ON(first_fn && ref_obsolete(first_fn->raw));
720
721 fn = first_fn;
722 if (unlikely(!first_fn)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 /* No data nodes for this inode. */
724 if (f->inocache->ino != 1) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100725 JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 if (!fd_list) {
727 if (f->inocache->state == INO_STATE_READING)
728 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
729 return -EIO;
730 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100731 JFFS2_NOTICE("but it has children so we fake some modes for it\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 }
733 latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
734 latest_node->version = cpu_to_je32(0);
735 latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
736 latest_node->isize = cpu_to_je32(0);
737 latest_node->gid = cpu_to_je16(0);
738 latest_node->uid = cpu_to_je16(0);
739 if (f->inocache->state == INO_STATE_READING)
740 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
741 return 0;
742 }
743
744 ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node);
745 if (ret || retlen != sizeof(*latest_node)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100746 JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
747 ret, retlen, sizeof(*latest_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 /* FIXME: If this fails, there seems to be a memory leak. Find it. */
749 up(&f->sem);
750 jffs2_do_clear_inode(c, f);
751 return ret?ret:-EIO;
752 }
753
754 crc = crc32(0, latest_node, sizeof(*latest_node)-8);
755 if (crc != je32_to_cpu(latest_node->node_crc)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100756 JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
757 f->inocache->ino, ref_offset(fn->raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 up(&f->sem);
759 jffs2_do_clear_inode(c, f);
760 return -EIO;
761 }
762
763 switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
764 case S_IFDIR:
765 if (mctime_ver > je32_to_cpu(latest_node->version)) {
766 /* The times in the latest_node are actually older than
767 mctime in the latest dirent. Cheat. */
768 latest_node->ctime = latest_node->mtime = cpu_to_je32(latest_mctime);
769 }
770 break;
771
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000772
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 case S_IFREG:
774 /* If it was a regular file, truncate it to the latest node's isize */
Artem B. Bityutskiyf302cd02005-07-24 16:29:59 +0100775 jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 break;
777
778 case S_IFLNK:
779 /* Hack to work around broken isize in old symlink code.
780 Remove this when dwmw2 comes to his senses and stops
781 symlinks from being an entirely gratuitous special
782 case. */
783 if (!je32_to_cpu(latest_node->isize))
784 latest_node->isize = latest_node->dsize;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000785
786 if (f->inocache->state != INO_STATE_CHECKING) {
787 /* Symlink's inode data is the target path. Read it and
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100788 * keep in RAM to facilitate quick follow symlink
789 * operation. */
790 f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
791 if (!f->target) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100792 JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000793 up(&f->sem);
794 jffs2_do_clear_inode(c, f);
795 return -ENOMEM;
796 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000797
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000798 ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node),
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100799 je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000800
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000801 if (ret || retlen != je32_to_cpu(latest_node->csize)) {
802 if (retlen != je32_to_cpu(latest_node->csize))
803 ret = -EIO;
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100804 kfree(f->target);
805 f->target = NULL;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000806 up(&f->sem);
807 jffs2_do_clear_inode(c, f);
808 return -ret;
809 }
810
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100811 f->target[je32_to_cpu(latest_node->csize)] = '\0';
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100812 dbg_readinode("symlink's target '%s' cached\n", f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000813 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000814
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 /* fall through... */
816
817 case S_IFBLK:
818 case S_IFCHR:
819 /* Certain inode types should have only one data node, and it's
820 kept as the metadata node */
821 if (f->metadata) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100822 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 f->inocache->ino, jemode_to_cpu(latest_node->mode));
824 up(&f->sem);
825 jffs2_do_clear_inode(c, f);
826 return -EIO;
827 }
828 if (!frag_first(&f->fragtree)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100829 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 f->inocache->ino, jemode_to_cpu(latest_node->mode));
831 up(&f->sem);
832 jffs2_do_clear_inode(c, f);
833 return -EIO;
834 }
835 /* ASSERT: f->fraglist != NULL */
836 if (frag_next(frag_first(&f->fragtree))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100837 JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 f->inocache->ino, jemode_to_cpu(latest_node->mode));
839 /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
840 up(&f->sem);
841 jffs2_do_clear_inode(c, f);
842 return -EIO;
843 }
844 /* OK. We're happy */
845 f->metadata = frag_first(&f->fragtree)->node;
846 jffs2_free_node_frag(frag_first(&f->fragtree));
847 f->fragtree = RB_ROOT;
848 break;
849 }
850 if (f->inocache->state == INO_STATE_READING)
851 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
852
853 return 0;
854}
855
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100856/* Scan the list of all nodes present for this ino, build map of versions, etc. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000857int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100858 uint32_t ino, struct jffs2_raw_inode *latest_node)
859{
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100860 dbg_readinode("read inode #%u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100861
862 retry_inocache:
863 spin_lock(&c->inocache_lock);
864 f->inocache = jffs2_get_ino_cache(c, ino);
865
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100866 if (f->inocache) {
867 /* Check its state. We may need to wait before we can use it */
868 switch(f->inocache->state) {
869 case INO_STATE_UNCHECKED:
870 case INO_STATE_CHECKEDABSENT:
871 f->inocache->state = INO_STATE_READING;
872 break;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000873
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100874 case INO_STATE_CHECKING:
875 case INO_STATE_GC:
876 /* If it's in either of these states, we need
877 to wait for whoever's got it to finish and
878 put it back. */
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100879 dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100880 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
881 goto retry_inocache;
882
883 case INO_STATE_READING:
884 case INO_STATE_PRESENT:
885 /* Eep. This should never happen. It can
886 happen if Linux calls read_inode() again
887 before clear_inode() has finished though. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100888 JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100889 /* Fail. That's probably better than allowing it to succeed */
890 f->inocache = NULL;
891 break;
892
893 default:
894 BUG();
895 }
896 }
897 spin_unlock(&c->inocache_lock);
898
899 if (!f->inocache && ino == 1) {
900 /* Special case - no root inode on medium */
901 f->inocache = jffs2_alloc_inode_cache();
902 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100903 JFFS2_ERROR("cannot allocate inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100904 return -ENOMEM;
905 }
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100906 dbg_readinode("creating inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100907 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
908 f->inocache->ino = f->inocache->nlink = 1;
909 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
910 f->inocache->state = INO_STATE_READING;
911 jffs2_add_ino_cache(c, f->inocache);
912 }
913 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100914 JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100915 return -ENOENT;
916 }
917
918 return jffs2_do_read_inode_internal(c, f, latest_node);
919}
920
921int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
922{
923 struct jffs2_raw_inode n;
924 struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL);
925 int ret;
926
927 if (!f)
928 return -ENOMEM;
929
930 memset(f, 0, sizeof(*f));
931 init_MUTEX_LOCKED(&f->sem);
932 f->inocache = ic;
933
934 ret = jffs2_do_read_inode_internal(c, f, &n);
935 if (!ret) {
936 up(&f->sem);
937 jffs2_do_clear_inode(c, f);
938 }
939 kfree (f);
940 return ret;
941}
942
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
944{
945 struct jffs2_full_dirent *fd, *fds;
946 int deleted;
947
948 down(&f->sem);
949 deleted = f->inocache && !f->inocache->nlink;
950
David Woodhouse67e345d2005-02-27 23:01:36 +0000951 if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
952 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
953
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 if (f->metadata) {
955 if (deleted)
956 jffs2_mark_node_obsolete(c, f->metadata->raw);
957 jffs2_free_full_dnode(f->metadata);
958 }
959
960 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
961
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100962 if (f->target) {
963 kfree(f->target);
964 f->target = NULL;
965 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000966
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100967 fds = f->dents;
968 while(fds) {
969 fd = fds;
970 fds = fd->next;
971 jffs2_free_full_dirent(fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 }
973
David Woodhouse67e345d2005-02-27 23:01:36 +0000974 if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
David Woodhouse67e345d2005-02-27 23:01:36 +0000976 if (f->inocache->nodes == (void *)f->inocache)
977 jffs2_del_ino_cache(c, f->inocache);
978 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
980 up(&f->sem);
981}