blob: 5f0652df5d47dab051839cf88218e2c353c28733 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Thomas Gleixner182ec4e2005-11-07 11:16:07 +000010 * $Id: readinode.c,v 1.143 2005/11/07 11:14:41 gleixner Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 */
13
14#include <linux/kernel.h>
Andrew Lunn737b7662005-07-30 16:29:30 +010015#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/slab.h>
17#include <linux/fs.h>
18#include <linux/crc32.h>
19#include <linux/pagemap.h>
20#include <linux/mtd/mtd.h>
21#include <linux/compiler.h>
22#include "nodelist.h"
23
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010024/*
25 * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010026 * order of increasing version.
27 */
28static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010030 struct rb_node **p = &list->rb_node;
31 struct rb_node * parent = NULL;
32 struct jffs2_tmp_dnode_info *this;
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010034 while (*p) {
35 parent = *p;
36 this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010038 /* There may actually be a collision here, but it doesn't
39 actually matter. As long as the two nodes with the same
40 version are together, it's all fine. */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010041 if (tn->version > this->version)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010042 p = &(*p)->rb_left;
43 else
44 p = &(*p)->rb_right;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +010045 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +010047 rb_link_node(&tn->rb, parent, p);
48 rb_insert_color(&tn->rb, list);
49}
50
51static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
52{
53 struct rb_node *this;
54 struct jffs2_tmp_dnode_info *tn;
55
56 this = list->rb_node;
57
58 /* Now at bottom of tree */
59 while (this) {
60 if (this->rb_left)
61 this = this->rb_left;
62 else if (this->rb_right)
63 this = this->rb_right;
64 else {
65 tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
66 jffs2_free_full_dnode(tn->fn);
67 jffs2_free_tmp_dnode_info(tn);
68
69 this = this->rb_parent;
70 if (!this)
71 break;
72
73 if (this->rb_left == &tn->rb)
74 this->rb_left = NULL;
75 else if (this->rb_right == &tn->rb)
76 this->rb_right = NULL;
77 else BUG();
78 }
79 }
80 list->rb_node = NULL;
81}
82
83static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
84{
85 struct jffs2_full_dirent *next;
86
87 while (fd) {
88 next = fd->next;
89 jffs2_free_full_dirent(fd);
90 fd = next;
91 }
92}
93
94/* Returns first valid node after 'ref'. May return 'ref' */
95static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
96{
97 while (ref && ref->next_in_ino) {
98 if (!ref_obsolete(ref))
99 return ref;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100100 dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100101 ref = ref->next_in_ino;
102 }
103 return NULL;
104}
105
106/*
107 * Helper function for jffs2_get_inode_nodes().
108 * It is called every time an directory entry node is found.
109 *
110 * Returns: 0 on succes;
111 * 1 if the node should be marked obsolete;
112 * negative error code on failure.
113 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100114static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
115 struct jffs2_raw_dirent *rd, uint32_t read, struct jffs2_full_dirent **fdp,
116 uint32_t *latest_mctime, uint32_t *mctime_ver)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100117{
118 struct jffs2_full_dirent *fd;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000119
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100120 /* The direntry nodes are checked during the flash scanning */
121 BUG_ON(ref_flags(ref) == REF_UNCHECKED);
122 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
123 BUG_ON(ref_obsolete(ref));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000124
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100125 /* Sanity check */
126 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100127 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100128 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
129 return 1;
130 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000131
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100132 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
133 if (unlikely(!fd))
134 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100136 fd->raw = ref;
137 fd->version = je32_to_cpu(rd->version);
138 fd->ino = je32_to_cpu(rd->ino);
139 fd->type = rd->type;
140
141 /* Pick out the mctime of the latest dirent */
Artem B. Bityutskiy3a69e0c2005-08-17 14:46:26 +0100142 if(fd->version > *mctime_ver && je32_to_cpu(rd->mctime)) {
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100143 *mctime_ver = fd->version;
144 *latest_mctime = je32_to_cpu(rd->mctime);
145 }
146
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000147 /*
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100148 * Copy as much of the name as possible from the raw
149 * dirent we've already read from the flash.
150 */
151 if (read > sizeof(*rd))
152 memcpy(&fd->name[0], &rd->name[0],
153 min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000154
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100155 /* Do we need to copy any more of the name directly from the flash? */
156 if (rd->nsize + sizeof(*rd) > read) {
157 /* FIXME: point() */
158 int err;
159 int already = read - sizeof(*rd);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000160
161 err = jffs2_flash_read(c, (ref_offset(ref)) + read,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100162 rd->nsize - already, &read, &fd->name[already]);
163 if (unlikely(read != rd->nsize - already) && likely(!err))
164 return -EIO;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000165
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100166 if (unlikely(err)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100167 JFFS2_ERROR("read remainder of name: error %d\n", err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100168 jffs2_free_full_dirent(fd);
169 return -EIO;
170 }
171 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000172
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100173 fd->nhash = full_name_hash(fd->name, rd->nsize);
174 fd->next = NULL;
175 fd->name[rd->nsize] = '\0';
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000176
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100177 /*
178 * Wheee. We now have a complete jffs2_full_dirent structure, with
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000179 * the name in it and everything. Link it into the list
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100180 */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100181 jffs2_add_fd_to_list(c, fd, fdp);
182
183 return 0;
184}
185
186/*
187 * Helper function for jffs2_get_inode_nodes().
188 * It is called every time an inode node is found.
189 *
190 * Returns: 0 on succes;
191 * 1 if the node should be marked obsolete;
192 * negative error code on failure.
193 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100194static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
195 struct jffs2_raw_inode *rd, struct rb_root *tnp, int rdlen,
196 uint32_t *latest_mctime, uint32_t *mctime_ver)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100197{
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100198 struct jffs2_tmp_dnode_info *tn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100199 uint32_t len, csize;
200 int ret = 1;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000201
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100202 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
203 BUG_ON(ref_obsolete(ref));
204
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100205 tn = jffs2_alloc_tmp_dnode_info();
206 if (!tn) {
207 JFFS2_ERROR("failed to allocate tn (%d bytes).\n", sizeof(*tn));
208 return -ENOMEM;
209 }
210
211 tn->partial_crc = 0;
212 csize = je32_to_cpu(rd->csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000213
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100214 /* If we've never checked the CRCs on this node, check them now */
215 if (ref_flags(ref) == REF_UNCHECKED) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100216 uint32_t crc;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100217
218 crc = crc32(0, rd, sizeof(*rd) - 8);
219 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100220 JFFS2_NOTICE("header CRC failed on node at %#08x: read %#08x, calculated %#08x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100221 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100222 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100223 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000224
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100225 /* Sanity checks */
226 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
227 unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100228 JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
Andrew Lunn737b7662005-07-30 16:29:30 +0100229 jffs2_dbg_dump_node(c, ref_offset(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100230 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100231 }
232
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100233 if (jffs2_is_writebuffered(c) && csize != 0) {
234 /* At this point we are supposed to check the data CRC
235 * of our unchecked node. But thus far, we do not
236 * know whether the node is valid or obsolete. To
237 * figure this out, we need to walk all the nodes of
238 * the inode and build the inode fragtree. We don't
239 * want to spend time checking data of nodes which may
240 * later be found to be obsolete. So we put off the full
241 * data CRC checking until we have read all the inode
242 * nodes and have started building the fragtree.
243 *
244 * The fragtree is being built starting with nodes
245 * having the highest version number, so we'll be able
246 * to detect whether a node is valid (i.e., it is not
247 * overlapped by a node with higher version) or not.
248 * And we'll be able to check only those nodes, which
249 * are not obsolete.
250 *
251 * Of course, this optimization only makes sense in case
252 * of NAND flashes (or other flashes whith
253 * !jffs2_can_mark_obsolete()), since on NOR flashes
254 * nodes are marked obsolete physically.
255 *
256 * Since NAND flashes (or other flashes with
257 * jffs2_is_writebuffered(c)) are anyway read by
258 * fractions of c->wbuf_pagesize, and we have just read
259 * the node header, it is likely that the starting part
260 * of the node data is also read when we read the
261 * header. So we don't mind to check the CRC of the
262 * starting part of the data of the node now, and check
263 * the second part later (in jffs2_check_node_data()).
264 * Of course, we will not need to re-read and re-check
265 * the NAND page which we have just read. This is why we
266 * read the whole NAND page at jffs2_get_inode_nodes(),
267 * while we needed only the node header.
268 */
269 unsigned char *buf;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100270
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100271 /* 'buf' will point to the start of data */
272 buf = (unsigned char *)rd + sizeof(*rd);
273 /* len will be the read data length */
274 len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100275 tn->partial_crc = crc32(0, buf, len);
276
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100277 dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100278
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100279 /* If we actually calculated the whole data CRC
280 * and it is wrong, drop the node. */
Artem B. Bityutskiy3c091332005-08-04 12:40:02 +0100281 if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100282 JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
283 ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100284 goto free_out;
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100285 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100286
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100287 } else if (csize == 0) {
288 /*
289 * We checked the header CRC. If the node has no data, adjust
290 * the space accounting now. For other nodes this will be done
291 * later either when the node is marked obsolete or when its
292 * data is checked.
293 */
294 struct jffs2_eraseblock *jeb;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100295
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100296 dbg_readinode("the node has no data.\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100297 jeb = &c->blocks[ref->flash_offset / c->sector_size];
298 len = ref_totlen(c, jeb, ref);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100299
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100300 spin_lock(&c->erase_completion_lock);
301 jeb->used_size += len;
302 jeb->unchecked_size -= len;
303 c->used_size += len;
304 c->unchecked_size -= len;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100305 ref->flash_offset = ref_offset(ref) | REF_NORMAL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100306 spin_unlock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100307 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100308 }
309
310 tn->fn = jffs2_alloc_full_dnode();
311 if (!tn->fn) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100312 JFFS2_ERROR("alloc fn failed\n");
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100313 ret = -ENOMEM;
314 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100315 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000316
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100317 tn->version = je32_to_cpu(rd->version);
318 tn->fn->ofs = je32_to_cpu(rd->offset);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100319 tn->data_crc = je32_to_cpu(rd->data_crc);
320 tn->csize = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100321 tn->fn->raw = ref;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000322
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100323 /* There was a bug where we wrote hole nodes out with
324 csize/dsize swapped. Deal with it */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100325 if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
326 tn->fn->size = csize;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100327 else // normal case...
328 tn->fn->size = je32_to_cpu(rd->dsize);
329
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100330 dbg_readinode("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
Artem B. Bityutskiy280562b2005-08-17 15:57:43 +0100331 ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000332
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100333 jffs2_add_tn_to_tree(tn, tnp);
334
335 return 0;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100336
337free_out:
338 jffs2_free_tmp_dnode_info(tn);
339 return ret;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100340}
341
342/*
343 * Helper function for jffs2_get_inode_nodes().
344 * It is called every time an unknown node is found.
345 *
346 * Returns: 0 on succes;
347 * 1 if the node should be marked obsolete;
348 * negative error code on failure.
349 */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100350static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100351{
352 /* We don't mark unknown nodes as REF_UNCHECKED */
353 BUG_ON(ref_flags(ref) == REF_UNCHECKED);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000354
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100355 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
356
357 if (crc32(0, un, sizeof(struct jffs2_unknown_node) - 4) != je32_to_cpu(un->hdr_crc)) {
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100358 /* Hmmm. This should have been caught at scan time. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100359 JFFS2_NOTICE("node header CRC failed at %#08x. But it must have been OK earlier.\n", ref_offset(ref));
Andrew Lunn737b7662005-07-30 16:29:30 +0100360 jffs2_dbg_dump_node(c, ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100361 return 1;
362 } else {
363 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
364
365 case JFFS2_FEATURE_INCOMPAT:
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100366 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
367 je16_to_cpu(un->nodetype), ref_offset(ref));
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100368 /* EEP */
369 BUG();
370 break;
371
372 case JFFS2_FEATURE_ROCOMPAT:
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100373 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100374 je16_to_cpu(un->nodetype), ref_offset(ref));
375 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
376 break;
377
378 case JFFS2_FEATURE_RWCOMPAT_COPY:
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100379 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100380 je16_to_cpu(un->nodetype), ref_offset(ref));
381 break;
382
383 case JFFS2_FEATURE_RWCOMPAT_DELETE:
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100384 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100385 je16_to_cpu(un->nodetype), ref_offset(ref));
386 return 1;
387 }
388 }
389
390 return 0;
391}
392
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100393/*
394 * Helper function for jffs2_get_inode_nodes().
395 * The function detects whether more data should be read and reads it if yes.
396 *
397 * Returns: 0 on succes;
398 * negative error code on failure.
399 */
400static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
401 int right_size, int *rdlen, unsigned char *buf, unsigned char *bufstart)
402{
403 int right_len, err, len;
404 size_t retlen;
405 uint32_t offs;
406
407 if (jffs2_is_writebuffered(c)) {
408 right_len = c->wbuf_pagesize - (bufstart - buf);
409 if (right_size + (int)(bufstart - buf) > c->wbuf_pagesize)
410 right_len += c->wbuf_pagesize;
411 } else
412 right_len = right_size;
413
414 if (*rdlen == right_len)
415 return 0;
416
417 /* We need to read more data */
418 offs = ref_offset(ref) + *rdlen;
419 if (jffs2_is_writebuffered(c)) {
420 bufstart = buf + c->wbuf_pagesize;
421 len = c->wbuf_pagesize;
422 } else {
423 bufstart = buf + *rdlen;
424 len = right_size - *rdlen;
425 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000426
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100427 dbg_readinode("read more %d bytes\n", len);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100428
429 err = jffs2_flash_read(c, offs, len, &retlen, bufstart);
430 if (err) {
431 JFFS2_ERROR("can not read %d bytes from 0x%08x, "
432 "error code: %d.\n", len, offs, err);
433 return err;
434 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000435
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100436 if (retlen < len) {
437 JFFS2_ERROR("short read at %#08x: %d instead of %d.\n",
438 offs, retlen, len);
439 return -EIO;
440 }
441
442 *rdlen = right_len;
443
444 return 0;
445}
446
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100447/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
448 with this ino, returning the former in order of version */
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100449static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
450 struct rb_root *tnp, struct jffs2_full_dirent **fdp,
451 uint32_t *highest_version, uint32_t *latest_mctime,
452 uint32_t *mctime_ver)
453{
454 struct jffs2_raw_node_ref *ref, *valid_ref;
455 struct rb_root ret_tn = RB_ROOT;
456 struct jffs2_full_dirent *ret_fd = NULL;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100457 unsigned char *buf = NULL;
458 union jffs2_node_union *node;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100459 size_t retlen;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100460 int len, err;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100461
462 *mctime_ver = 0;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000463
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100464 dbg_readinode("ino #%u\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100465
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100466 if (jffs2_is_writebuffered(c)) {
467 /*
468 * If we have the write buffer, we assume the minimal I/O unit
469 * is c->wbuf_pagesize. We implement some optimizations which in
470 * this case and we need a temporary buffer of size =
471 * 2*c->wbuf_pagesize bytes (see comments in read_dnode()).
472 * Basically, we want to read not only the node header, but the
473 * whole wbuf (NAND page in case of NAND) or 2, if the node
474 * header overlaps the border between the 2 wbufs.
475 */
476 len = 2*c->wbuf_pagesize;
477 } else {
478 /*
479 * When there is no write buffer, the size of the temporary
480 * buffer is the size of the larges node header.
481 */
482 len = sizeof(union jffs2_node_union);
483 }
484
485 /* FIXME: in case of NOR and available ->point() this
486 * needs to be fixed. */
487 buf = kmalloc(len, GFP_KERNEL);
488 if (!buf)
489 return -ENOMEM;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000490
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100491 spin_lock(&c->erase_completion_lock);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100492 valid_ref = jffs2_first_valid_node(f->inocache->nodes);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100493 if (!valid_ref && f->inocache->ino != 1)
494 JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100495 while (valid_ref) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100496 unsigned char *bufstart;
497
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100498 /* We can hold a pointer to a non-obsolete node without the spinlock,
499 but _obsolete_ nodes may disappear at any time, if the block
500 they're in gets erased. So if we mark 'ref' obsolete while we're
501 not holding the lock, it can go away immediately. For that reason,
502 we find the next valid node first, before processing 'ref'.
503 */
504 ref = valid_ref;
505 valid_ref = jffs2_first_valid_node(ref->next_in_ino);
506 spin_unlock(&c->erase_completion_lock);
507
508 cond_resched();
509
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100510 /*
511 * At this point we don't know the type of the node we're going
512 * to read, so we do not know the size of its header. In order
513 * to minimize the amount of flash IO we assume the node has
514 * size = JFFS2_MIN_NODE_HEADER.
515 */
516 if (jffs2_is_writebuffered(c)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000517 /*
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100518 * We treat 'buf' as 2 adjacent wbufs. We want to
519 * adjust bufstart such as it points to the
520 * beginning of the node within this wbuf.
521 */
522 bufstart = buf + (ref_offset(ref) % c->wbuf_pagesize);
523 /* We will read either one wbuf or 2 wbufs. */
524 len = c->wbuf_pagesize - (bufstart - buf);
Artem B. Bityutskiy39243502005-08-03 10:26:50 +0100525 if (JFFS2_MIN_NODE_HEADER + (int)(bufstart - buf) > c->wbuf_pagesize) {
526 /* The header spans the border of the first wbuf */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100527 len += c->wbuf_pagesize;
528 }
529 } else {
530 bufstart = buf;
531 len = JFFS2_MIN_NODE_HEADER;
532 }
533
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100534 dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100535
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100536 /* FIXME: point() */
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100537 err = jffs2_flash_read(c, ref_offset(ref), len,
538 &retlen, bufstart);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100539 if (err) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100540 JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100541 goto free_out;
542 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000543
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100544 if (retlen < len) {
545 JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ref_offset(ref), retlen, len);
546 err = -EIO;
547 goto free_out;
548 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000549
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100550 node = (union jffs2_node_union *)bufstart;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000551
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100552 switch (je16_to_cpu(node->u.nodetype)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000553
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100554 case JFFS2_NODETYPE_DIRENT:
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100555
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100556 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) {
557 err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf, bufstart);
558 if (unlikely(err))
559 goto free_out;
560 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000561
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100562 err = read_direntry(c, ref, &node->d, retlen, &ret_fd, latest_mctime, mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100563 if (err == 1) {
564 jffs2_mark_node_obsolete(c, ref);
565 break;
566 } else if (unlikely(err))
567 goto free_out;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000568
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100569 if (je32_to_cpu(node->d.version) > *highest_version)
570 *highest_version = je32_to_cpu(node->d.version);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100571
572 break;
573
574 case JFFS2_NODETYPE_INODE:
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000575
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100576 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) {
577 err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf, bufstart);
578 if (unlikely(err))
579 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100580 }
581
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100582 err = read_dnode(c, ref, &node->i, &ret_tn, len, latest_mctime, mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100583 if (err == 1) {
584 jffs2_mark_node_obsolete(c, ref);
585 break;
586 } else if (unlikely(err))
587 goto free_out;
588
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100589 if (je32_to_cpu(node->i.version) > *highest_version)
590 *highest_version = je32_to_cpu(node->i.version);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000591
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 break;
593
594 default:
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100595 if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) {
596 err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf, bufstart);
597 if (unlikely(err))
598 goto free_out;
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100599 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000600
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100601 err = read_unknown(c, ref, &node->u);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100602 if (err == 1) {
603 jffs2_mark_node_obsolete(c, ref);
604 break;
605 } else if (unlikely(err))
606 goto free_out;
607
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 }
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100609 spin_lock(&c->erase_completion_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 }
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100611
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100612 spin_unlock(&c->erase_completion_lock);
613 *tnp = ret_tn;
614 *fdp = ret_fd;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100615 kfree(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100617 dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100618 f->inocache->ino, *highest_version, *latest_mctime, *mctime_ver);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100619 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100621 free_out:
622 jffs2_free_tmp_dnode_info_list(&ret_tn);
623 jffs2_free_full_dirent_list(ret_fd);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100624 kfree(buf);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100625 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626}
627
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000628static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 struct jffs2_inode_info *f,
630 struct jffs2_raw_inode *latest_node)
631{
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100632 struct jffs2_tmp_dnode_info *tn;
David Woodhouse9dee7502005-07-05 22:03:10 +0100633 struct rb_root tn_list;
634 struct rb_node *rb, *repl_rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 struct jffs2_full_dirent *fd_list;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100636 struct jffs2_full_dnode *fn, *first_fn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 uint32_t crc;
638 uint32_t latest_mctime, mctime_ver;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 size_t retlen;
640 int ret;
641
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100642 dbg_readinode("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
644 /* Grab all nodes relevant to this ino */
645 ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver);
646
647 if (ret) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100648 JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 if (f->inocache->state == INO_STATE_READING)
650 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
651 return ret;
652 }
653 f->dents = fd_list;
654
David Woodhouse9dee7502005-07-05 22:03:10 +0100655 rb = rb_first(&tn_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
David Woodhouse9dee7502005-07-05 22:03:10 +0100657 while (rb) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100658 cond_resched();
David Woodhouse9dee7502005-07-05 22:03:10 +0100659 tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 fn = tn->fn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100661 ret = 1;
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100662 dbg_readinode("consider node ver %u, phys offset "
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100663 "%#08x(%d), range %u-%u.\n", tn->version,
664 ref_offset(fn->raw), ref_flags(fn->raw),
665 fn->ofs, fn->ofs + fn->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
667 if (fn->size) {
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100668 ret = jffs2_add_older_frag_to_fragtree(c, f, tn);
669 /* TODO: the error code isn't checked, check it */
670 jffs2_dbg_fragtree_paranoia_check_nolock(f);
671 BUG_ON(ret < 0);
672 if (!first_fn && ret == 0)
673 first_fn = fn;
674 } else if (!first_fn) {
675 first_fn = fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 f->metadata = fn;
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100677 ret = 0; /* Prevent freeing the metadata update node */
678 } else
679 jffs2_mark_node_obsolete(c, fn->raw);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000680
David Woodhouse9dee7502005-07-05 22:03:10 +0100681 BUG_ON(rb->rb_left);
David Woodhouse9dee7502005-07-05 22:03:10 +0100682 if (rb->rb_parent && rb->rb_parent->rb_left == rb) {
683 /* We were then left-hand child of our parent. We need
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100684 * to move our own right-hand child into our place. */
David Woodhouse9dee7502005-07-05 22:03:10 +0100685 repl_rb = rb->rb_right;
686 if (repl_rb)
687 repl_rb->rb_parent = rb->rb_parent;
688 } else
689 repl_rb = NULL;
690
691 rb = rb_next(rb);
692
693 /* Remove the spent tn from the tree; don't bother rebalancing
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100694 * but put our right-hand child in our own place. */
David Woodhouse9dee7502005-07-05 22:03:10 +0100695 if (tn->rb.rb_parent) {
696 if (tn->rb.rb_parent->rb_left == &tn->rb)
697 tn->rb.rb_parent->rb_left = repl_rb;
698 else if (tn->rb.rb_parent->rb_right == &tn->rb)
699 tn->rb.rb_parent->rb_right = repl_rb;
700 else BUG();
701 } else if (tn->rb.rb_right)
702 tn->rb.rb_right->rb_parent = NULL;
703
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 jffs2_free_tmp_dnode_info(tn);
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100705 if (ret) {
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100706 dbg_readinode("delete dnode %u-%u.\n",
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100707 fn->ofs, fn->ofs + fn->size);
708 jffs2_free_full_dnode(fn);
709 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 }
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100711 jffs2_dbg_fragtree_paranoia_check_nolock(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
Artem B. Bityutskiy1e0da3c2005-08-01 13:05:22 +0100713 BUG_ON(first_fn && ref_obsolete(first_fn->raw));
714
715 fn = first_fn;
716 if (unlikely(!first_fn)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 /* No data nodes for this inode. */
718 if (f->inocache->ino != 1) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100719 JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 if (!fd_list) {
721 if (f->inocache->state == INO_STATE_READING)
722 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
723 return -EIO;
724 }
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100725 JFFS2_NOTICE("but it has children so we fake some modes for it\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 }
727 latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
728 latest_node->version = cpu_to_je32(0);
729 latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
730 latest_node->isize = cpu_to_je32(0);
731 latest_node->gid = cpu_to_je16(0);
732 latest_node->uid = cpu_to_je16(0);
733 if (f->inocache->state == INO_STATE_READING)
734 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
735 return 0;
736 }
737
738 ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node);
739 if (ret || retlen != sizeof(*latest_node)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100740 JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
741 ret, retlen, sizeof(*latest_node));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 /* FIXME: If this fails, there seems to be a memory leak. Find it. */
743 up(&f->sem);
744 jffs2_do_clear_inode(c, f);
745 return ret?ret:-EIO;
746 }
747
748 crc = crc32(0, latest_node, sizeof(*latest_node)-8);
749 if (crc != je32_to_cpu(latest_node->node_crc)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100750 JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
751 f->inocache->ino, ref_offset(fn->raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 up(&f->sem);
753 jffs2_do_clear_inode(c, f);
754 return -EIO;
755 }
756
757 switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
758 case S_IFDIR:
759 if (mctime_ver > je32_to_cpu(latest_node->version)) {
760 /* The times in the latest_node are actually older than
761 mctime in the latest dirent. Cheat. */
762 latest_node->ctime = latest_node->mtime = cpu_to_je32(latest_mctime);
763 }
764 break;
765
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000766
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 case S_IFREG:
768 /* If it was a regular file, truncate it to the latest node's isize */
Artem B. Bityutskiyf302cd02005-07-24 16:29:59 +0100769 jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 break;
771
772 case S_IFLNK:
773 /* Hack to work around broken isize in old symlink code.
774 Remove this when dwmw2 comes to his senses and stops
775 symlinks from being an entirely gratuitous special
776 case. */
777 if (!je32_to_cpu(latest_node->isize))
778 latest_node->isize = latest_node->dsize;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000779
780 if (f->inocache->state != INO_STATE_CHECKING) {
781 /* Symlink's inode data is the target path. Read it and
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100782 * keep in RAM to facilitate quick follow symlink
783 * operation. */
784 f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
785 if (!f->target) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100786 JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000787 up(&f->sem);
788 jffs2_do_clear_inode(c, f);
789 return -ENOMEM;
790 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000791
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000792 ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node),
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100793 je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000794
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000795 if (ret || retlen != je32_to_cpu(latest_node->csize)) {
796 if (retlen != je32_to_cpu(latest_node->csize))
797 ret = -EIO;
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100798 kfree(f->target);
799 f->target = NULL;
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000800 up(&f->sem);
801 jffs2_do_clear_inode(c, f);
802 return -ret;
803 }
804
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100805 f->target[je32_to_cpu(latest_node->csize)] = '\0';
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100806 dbg_readinode("symlink's target '%s' cached\n", f->target);
Artem B. Bityuckiy32f1a952005-03-01 10:50:52 +0000807 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000808
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 /* fall through... */
810
811 case S_IFBLK:
812 case S_IFCHR:
813 /* Certain inode types should have only one data node, and it's
814 kept as the metadata node */
815 if (f->metadata) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100816 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 f->inocache->ino, jemode_to_cpu(latest_node->mode));
818 up(&f->sem);
819 jffs2_do_clear_inode(c, f);
820 return -EIO;
821 }
822 if (!frag_first(&f->fragtree)) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100823 JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 f->inocache->ino, jemode_to_cpu(latest_node->mode));
825 up(&f->sem);
826 jffs2_do_clear_inode(c, f);
827 return -EIO;
828 }
829 /* ASSERT: f->fraglist != NULL */
830 if (frag_next(frag_first(&f->fragtree))) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100831 JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 f->inocache->ino, jemode_to_cpu(latest_node->mode));
833 /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
834 up(&f->sem);
835 jffs2_do_clear_inode(c, f);
836 return -EIO;
837 }
838 /* OK. We're happy */
839 f->metadata = frag_first(&f->fragtree)->node;
840 jffs2_free_node_frag(frag_first(&f->fragtree));
841 f->fragtree = RB_ROOT;
842 break;
843 }
844 if (f->inocache->state == INO_STATE_READING)
845 jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
846
847 return 0;
848}
849
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100850/* Scan the list of all nodes present for this ino, build map of versions, etc. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000851int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100852 uint32_t ino, struct jffs2_raw_inode *latest_node)
853{
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100854 dbg_readinode("read inode #%u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100855
856 retry_inocache:
857 spin_lock(&c->inocache_lock);
858 f->inocache = jffs2_get_ino_cache(c, ino);
859
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100860 if (f->inocache) {
861 /* Check its state. We may need to wait before we can use it */
862 switch(f->inocache->state) {
863 case INO_STATE_UNCHECKED:
864 case INO_STATE_CHECKEDABSENT:
865 f->inocache->state = INO_STATE_READING;
866 break;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000867
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100868 case INO_STATE_CHECKING:
869 case INO_STATE_GC:
870 /* If it's in either of these states, we need
871 to wait for whoever's got it to finish and
872 put it back. */
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100873 dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100874 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
875 goto retry_inocache;
876
877 case INO_STATE_READING:
878 case INO_STATE_PRESENT:
879 /* Eep. This should never happen. It can
880 happen if Linux calls read_inode() again
881 before clear_inode() has finished though. */
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100882 JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100883 /* Fail. That's probably better than allowing it to succeed */
884 f->inocache = NULL;
885 break;
886
887 default:
888 BUG();
889 }
890 }
891 spin_unlock(&c->inocache_lock);
892
893 if (!f->inocache && ino == 1) {
894 /* Special case - no root inode on medium */
895 f->inocache = jffs2_alloc_inode_cache();
896 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100897 JFFS2_ERROR("cannot allocate inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100898 return -ENOMEM;
899 }
Artem B. Bityutskiy733802d2005-09-22 12:25:00 +0100900 dbg_readinode("creating inocache for root inode\n");
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100901 memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
902 f->inocache->ino = f->inocache->nlink = 1;
903 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
904 f->inocache->state = INO_STATE_READING;
905 jffs2_add_ino_cache(c, f->inocache);
906 }
907 if (!f->inocache) {
Artem B. Bityutskiye0d60132005-07-28 15:46:43 +0100908 JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
Artem B. Bityutskiyf97117d2005-07-27 15:46:14 +0100909 return -ENOENT;
910 }
911
912 return jffs2_do_read_inode_internal(c, f, latest_node);
913}
914
915int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
916{
917 struct jffs2_raw_inode n;
918 struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL);
919 int ret;
920
921 if (!f)
922 return -ENOMEM;
923
924 memset(f, 0, sizeof(*f));
925 init_MUTEX_LOCKED(&f->sem);
926 f->inocache = ic;
927
928 ret = jffs2_do_read_inode_internal(c, f, &n);
929 if (!ret) {
930 up(&f->sem);
931 jffs2_do_clear_inode(c, f);
932 }
933 kfree (f);
934 return ret;
935}
936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
938{
939 struct jffs2_full_dirent *fd, *fds;
940 int deleted;
941
942 down(&f->sem);
943 deleted = f->inocache && !f->inocache->nlink;
944
David Woodhouse67e345d2005-02-27 23:01:36 +0000945 if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
946 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
947
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 if (f->metadata) {
949 if (deleted)
950 jffs2_mark_node_obsolete(c, f->metadata->raw);
951 jffs2_free_full_dnode(f->metadata);
952 }
953
954 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
955
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100956 if (f->target) {
957 kfree(f->target);
958 f->target = NULL;
959 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000960
Artem B. Bityutskiy2b79adc2005-07-17 12:13:51 +0100961 fds = f->dents;
962 while(fds) {
963 fd = fds;
964 fds = fd->next;
965 jffs2_free_full_dirent(fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 }
967
David Woodhouse67e345d2005-02-27 23:01:36 +0000968 if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
David Woodhouse67e345d2005-02-27 23:01:36 +0000970 if (f->inocache->nodes == (void *)f->inocache)
971 jffs2_del_ino_cache(c, f->inocache);
972 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
974 up(&f->sem);
975}