blob: 846a79452497b9adcb20b831a8e12c6037038d9d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
David Woodhousec00c3102007-04-25 14:16:47 +01004 * Copyright © 2001-2007 Red Hat, Inc.
David Woodhouse6088c052010-08-08 14:15:22 +01005 * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 *
9 * For licensing information, see the file 'LICENCE' in this directory.
10 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
13#include <linux/kernel.h>
14#include <linux/mtd/mtd.h>
15#include <linux/slab.h>
16#include <linux/pagemap.h>
17#include <linux/crc32.h>
18#include <linux/compiler.h>
19#include <linux/stat.h>
20#include "nodelist.h"
21#include "compr.h"
22
Thomas Gleixner182ec4e2005-11-07 11:16:07 +000023static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 struct jffs2_inode_cache *ic,
25 struct jffs2_raw_node_ref *raw);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +000026static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 struct jffs2_inode_info *f, struct jffs2_full_dnode *fd);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +000028static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +000030static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
32static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
33 struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
34 uint32_t start, uint32_t end);
35static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
36 struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
37 uint32_t start, uint32_t end);
38static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
39 struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f);
40
41/* Called with erase_completion_lock held */
42static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c)
43{
44 struct jffs2_eraseblock *ret;
45 struct list_head *nextlist = NULL;
46 int n = jiffies % 128;
47
48 /* Pick an eraseblock to garbage collect next. This is where we'll
49 put the clever wear-levelling algorithms. Eventually. */
50 /* We possibly want to favour the dirtier blocks more when the
51 number of free blocks is low. */
Artem B. Bityuckiya42163d2005-03-20 17:45:29 +000052again:
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) {
54 D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n"));
55 nextlist = &c->bad_used_list;
56 } else if (n < 50 && !list_empty(&c->erasable_list)) {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +000057 /* Note that most of them will have gone directly to be erased.
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 So don't favour the erasable_list _too_ much. */
59 D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n"));
60 nextlist = &c->erasable_list;
61 } else if (n < 110 && !list_empty(&c->very_dirty_list)) {
62 /* Most of the time, pick one off the very_dirty list */
63 D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n"));
64 nextlist = &c->very_dirty_list;
65 } else if (n < 126 && !list_empty(&c->dirty_list)) {
66 D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n"));
67 nextlist = &c->dirty_list;
68 } else if (!list_empty(&c->clean_list)) {
69 D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n"));
70 nextlist = &c->clean_list;
71 } else if (!list_empty(&c->dirty_list)) {
72 D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n"));
73
74 nextlist = &c->dirty_list;
75 } else if (!list_empty(&c->very_dirty_list)) {
76 D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n"));
77 nextlist = &c->very_dirty_list;
78 } else if (!list_empty(&c->erasable_list)) {
79 D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n"));
80
81 nextlist = &c->erasable_list;
Artem B. Bityuckiya42163d2005-03-20 17:45:29 +000082 } else if (!list_empty(&c->erasable_pending_wbuf_list)) {
83 /* There are blocks are wating for the wbuf sync */
84 D1(printk(KERN_DEBUG "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n"));
Artem B. Bityuckiy3cceb9f2005-03-20 21:43:26 +000085 spin_unlock(&c->erase_completion_lock);
Artem B. Bityuckiya42163d2005-03-20 17:45:29 +000086 jffs2_flush_wbuf_pad(c);
Artem B. Bityuckiy3cceb9f2005-03-20 21:43:26 +000087 spin_lock(&c->erase_completion_lock);
Artem B. Bityuckiya42163d2005-03-20 17:45:29 +000088 goto again;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 } else {
90 /* Eep. All were empty */
91 D1(printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n"));
92 return NULL;
93 }
94
95 ret = list_entry(nextlist->next, struct jffs2_eraseblock, list);
96 list_del(&ret->list);
97 c->gcblock = ret;
98 ret->gc_node = ret->first_node;
99 if (!ret->gc_node) {
100 printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset);
101 BUG();
102 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 /* Have we accidentally picked a clean block with wasted space ? */
105 if (ret->wasted_size) {
106 D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size));
107 ret->dirty_size += ret->wasted_size;
108 c->wasted_size -= ret->wasted_size;
109 c->dirty_size += ret->wasted_size;
110 ret->wasted_size = 0;
111 }
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 return ret;
114}
115
116/* jffs2_garbage_collect_pass
117 * Make a single attempt to progress GC. Move one node, and possibly
118 * start erasing one eraseblock.
119 */
120int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
121{
122 struct jffs2_inode_info *f;
123 struct jffs2_inode_cache *ic;
124 struct jffs2_eraseblock *jeb;
125 struct jffs2_raw_node_ref *raw;
David Woodhouse2665ea82007-10-13 11:31:23 +0100126 uint32_t gcblock_dirty;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 int ret = 0, inum, nlink;
KaiGai Koheiaa98d7c2006-05-13 15:09:47 +0900128 int xattr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
David Woodhouseced22072008-04-22 15:13:40 +0100130 if (mutex_lock_interruptible(&c->alloc_sem))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 return -EINTR;
132
133 for (;;) {
134 spin_lock(&c->erase_completion_lock);
135 if (!c->unchecked_size)
136 break;
137
138 /* We can't start doing GC yet. We haven't finished checking
139 the node CRCs etc. Do it now. */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 /* checked_ino is protected by the alloc_sem */
KaiGai Koheiaa98d7c2006-05-13 15:09:47 +0900142 if (c->checked_ino > c->highest_ino && xattr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n",
144 c->unchecked_size);
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100145 jffs2_dbg_dump_block_lists_nolock(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 spin_unlock(&c->erase_completion_lock);
David Woodhouseced22072008-04-22 15:13:40 +0100147 mutex_unlock(&c->alloc_sem);
David Woodhouse44b998e2007-04-23 12:11:46 +0100148 return -ENOSPC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 }
150
151 spin_unlock(&c->erase_completion_lock);
152
KaiGai Koheiaa98d7c2006-05-13 15:09:47 +0900153 if (!xattr)
154 xattr = jffs2_verify_xattr(c);
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 spin_lock(&c->inocache_lock);
157
158 ic = jffs2_get_ino_cache(c, c->checked_ino++);
159
160 if (!ic) {
161 spin_unlock(&c->inocache_lock);
162 continue;
163 }
164
David Woodhouse27c72b02008-05-01 18:47:17 +0100165 if (!ic->pino_nlink) {
166 D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink/pino zero\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 ic->ino));
168 spin_unlock(&c->inocache_lock);
KaiGai Kohei355ed4e2006-06-24 09:15:36 +0900169 jffs2_xattr_delete_inode(c, ic);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 continue;
171 }
172 switch(ic->state) {
173 case INO_STATE_CHECKEDABSENT:
174 case INO_STATE_PRESENT:
175 D1(printk(KERN_DEBUG "Skipping ino #%u already checked\n", ic->ino));
176 spin_unlock(&c->inocache_lock);
177 continue;
178
179 case INO_STATE_GC:
180 case INO_STATE_CHECKING:
181 printk(KERN_WARNING "Inode #%u is in state %d during CRC check phase!\n", ic->ino, ic->state);
182 spin_unlock(&c->inocache_lock);
183 BUG();
184
185 case INO_STATE_READING:
186 /* We need to wait for it to finish, lest we move on
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000187 and trigger the BUG() above while we haven't yet
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 finished checking all its nodes */
189 D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino));
David Woodhoused96fb992006-04-17 00:19:48 +0100190 /* We need to come back again for the _same_ inode. We've
191 made no progress in this case, but that should be OK */
192 c->checked_ino--;
193
David Woodhouseced22072008-04-22 15:13:40 +0100194 mutex_unlock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
196 return 0;
197
198 default:
199 BUG();
200
201 case INO_STATE_UNCHECKED:
202 ;
203 }
204 ic->state = INO_STATE_CHECKING;
205 spin_unlock(&c->inocache_lock);
206
207 D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%u\n", ic->ino));
208
209 ret = jffs2_do_crccheck_inode(c, ic);
210 if (ret)
211 printk(KERN_WARNING "Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino);
212
213 jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
David Woodhouseced22072008-04-22 15:13:40 +0100214 mutex_unlock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 return ret;
216 }
217
David Woodhouse0717bf82010-05-19 16:37:13 +0100218 /* If there are any blocks which need erasing, erase them now */
219 if (!list_empty(&c->erase_complete_list) ||
220 !list_empty(&c->erase_pending_list)) {
221 spin_unlock(&c->erase_completion_lock);
222 D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() erasing pending blocks\n"));
223 if (jffs2_erase_pending_blocks(c, 1)) {
224 mutex_unlock(&c->alloc_sem);
225 return 0;
226 }
227 D1(printk(KERN_DEBUG "No progress from erasing blocks; doing GC anyway\n"));
228 spin_lock(&c->erase_completion_lock);
229 }
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 /* First, work out which block we're garbage-collecting */
232 jeb = c->gcblock;
233
234 if (!jeb)
235 jeb = jffs2_find_gc_block(c);
236
237 if (!jeb) {
David Woodhouse422b1202008-04-23 15:40:52 +0100238 /* Couldn't find a free block. But maybe we can just erase one and make 'progress'? */
David Woodhouse0717bf82010-05-19 16:37:13 +0100239 if (c->nr_erasing_blocks) {
David Woodhouse422b1202008-04-23 15:40:52 +0100240 spin_unlock(&c->erase_completion_lock);
241 mutex_unlock(&c->alloc_sem);
242 return -EAGAIN;
243 }
244 D1(printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n"));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 spin_unlock(&c->erase_completion_lock);
David Woodhouseced22072008-04-22 15:13:40 +0100246 mutex_unlock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 return -EIO;
248 }
249
250 D1(printk(KERN_DEBUG "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size));
251 D1(if (c->nextblock)
252 printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));
253
254 if (!jeb->used_size) {
David Woodhouseced22072008-04-22 15:13:40 +0100255 mutex_unlock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 goto eraseit;
257 }
258
259 raw = jeb->gc_node;
David Woodhouse2665ea82007-10-13 11:31:23 +0100260 gcblock_dirty = jeb->dirty_size;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000261
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 while(ref_obsolete(raw)) {
263 D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw)));
David Woodhouse99988f72006-05-24 09:04:17 +0100264 raw = ref_next(raw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 if (unlikely(!raw)) {
266 printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n");
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000267 printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size);
269 jeb->gc_node = raw;
270 spin_unlock(&c->erase_completion_lock);
David Woodhouseced22072008-04-22 15:13:40 +0100271 mutex_unlock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 BUG();
273 }
274 }
275 jeb->gc_node = raw;
276
277 D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", ref_offset(raw)));
278
279 if (!raw->next_in_ino) {
280 /* Inode-less node. Clean marker, snapshot or something like that */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 spin_unlock(&c->erase_completion_lock);
David Woodhouse61715862006-05-21 00:02:06 +0100282 if (ref_flags(raw) == REF_PRISTINE) {
283 /* It's an unknown node with JFFS2_FEATURE_RWCOMPAT_COPY */
284 jffs2_garbage_collect_pristine(c, NULL, raw);
285 } else {
286 /* Just mark it obsolete */
287 jffs2_mark_node_obsolete(c, raw);
288 }
David Woodhouseced22072008-04-22 15:13:40 +0100289 mutex_unlock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 goto eraseit_lock;
291 }
292
293 ic = jffs2_raw_ref_to_ic(raw);
294
KaiGai Kohei084702e2006-05-13 15:16:13 +0900295#ifdef CONFIG_JFFS2_FS_XATTR
KaiGai Koheiaa98d7c2006-05-13 15:09:47 +0900296 /* When 'ic' refers xattr_datum/xattr_ref, this node is GCed as xattr.
KaiGai Kohei084702e2006-05-13 15:16:13 +0900297 * We can decide whether this node is inode or xattr by ic->class. */
298 if (ic->class == RAWNODE_CLASS_XATTR_DATUM
299 || ic->class == RAWNODE_CLASS_XATTR_REF) {
KaiGai Kohei084702e2006-05-13 15:16:13 +0900300 spin_unlock(&c->erase_completion_lock);
301
302 if (ic->class == RAWNODE_CLASS_XATTR_DATUM) {
KaiGai Koheic9f700f2006-06-11 10:35:15 +0900303 ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic, raw);
KaiGai Kohei084702e2006-05-13 15:16:13 +0900304 } else {
KaiGai Koheic9f700f2006-06-11 10:35:15 +0900305 ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic, raw);
KaiGai Kohei084702e2006-05-13 15:16:13 +0900306 }
David Woodhouse2665ea82007-10-13 11:31:23 +0100307 goto test_gcnode;
KaiGai Kohei084702e2006-05-13 15:16:13 +0900308 }
309#endif
KaiGai Koheiaa98d7c2006-05-13 15:09:47 +0900310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 /* We need to hold the inocache. Either the erase_completion_lock or
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000312 the inocache_lock are sufficient; we trade down since the inocache_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 causes less contention. */
314 spin_lock(&c->inocache_lock);
315
316 spin_unlock(&c->erase_completion_lock);
317
318 D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino));
319
320 /* Three possibilities:
321 1. Inode is already in-core. We must iget it and do proper
322 updating to its fragtree, etc.
323 2. Inode is not in-core, node is REF_PRISTINE. We lock the
324 inocache to prevent a read_inode(), copy the node intact.
325 3. Inode is not in-core, node is not pristine. We must iget()
326 and take the slow path.
327 */
328
329 switch(ic->state) {
330 case INO_STATE_CHECKEDABSENT:
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000331 /* It's been checked, but it's not currently in-core.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 We can just copy any pristine nodes, but have
333 to prevent anyone else from doing read_inode() while
334 we're at it, so we set the state accordingly */
335 if (ref_flags(raw) == REF_PRISTINE)
336 ic->state = INO_STATE_GC;
337 else {
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000338 D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 ic->ino));
340 }
341 break;
342
343 case INO_STATE_PRESENT:
344 /* It's in-core. GC must iget() it. */
345 break;
346
347 case INO_STATE_UNCHECKED:
348 case INO_STATE_CHECKING:
349 case INO_STATE_GC:
350 /* Should never happen. We should have finished checking
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000351 by the time we actually start doing any GC, and since
352 we're holding the alloc_sem, no other garbage collection
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 can happen.
354 */
355 printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
356 ic->ino, ic->state);
David Woodhouseced22072008-04-22 15:13:40 +0100357 mutex_unlock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 spin_unlock(&c->inocache_lock);
359 BUG();
360
361 case INO_STATE_READING:
362 /* Someone's currently trying to read it. We must wait for
363 them to finish and then go through the full iget() route
364 to do the GC. However, sometimes read_inode() needs to get
365 the alloc_sem() (for marking nodes invalid) so we must
366 drop the alloc_sem before sleeping. */
367
David Woodhouseced22072008-04-22 15:13:40 +0100368 mutex_unlock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n",
370 ic->ino, ic->state));
371 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000372 /* And because we dropped the alloc_sem we must start again from the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 beginning. Ponder chance of livelock here -- we're returning success
374 without actually making any progress.
375
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000376 Q: What are the chances that the inode is back in INO_STATE_READING
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 again by the time we next enter this function? And that this happens
378 enough times to cause a real delay?
379
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000380 A: Small enough that I don't care :)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 */
382 return 0;
383 }
384
385 /* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000386 node intact, and we don't have to muck about with the fragtree etc.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 because we know it's not in-core. If it _was_ in-core, we go through
388 all the iget() crap anyway */
389
390 if (ic->state == INO_STATE_GC) {
391 spin_unlock(&c->inocache_lock);
392
393 ret = jffs2_garbage_collect_pristine(c, ic, raw);
394
395 spin_lock(&c->inocache_lock);
396 ic->state = INO_STATE_CHECKEDABSENT;
397 wake_up(&c->inocache_wq);
398
399 if (ret != -EBADFD) {
400 spin_unlock(&c->inocache_lock);
David Woodhouse2665ea82007-10-13 11:31:23 +0100401 goto test_gcnode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 }
403
404 /* Fall through if it wanted us to, with inocache_lock held */
405 }
406
407 /* Prevent the fairly unlikely race where the gcblock is
408 entirely obsoleted by the final close of a file which had
409 the only valid nodes in the block, followed by erasure,
410 followed by freeing of the ic because the erased block(s)
411 held _all_ the nodes of that inode.... never been seen but
412 it's vaguely possible. */
413
414 inum = ic->ino;
David Woodhouse27c72b02008-05-01 18:47:17 +0100415 nlink = ic->pino_nlink;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 spin_unlock(&c->inocache_lock);
417
David Woodhouse1b690b42008-05-01 16:59:24 +0100418 f = jffs2_gc_fetch_inode(c, inum, !nlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 if (IS_ERR(f)) {
420 ret = PTR_ERR(f);
421 goto release_sem;
422 }
423 if (!f) {
424 ret = 0;
425 goto release_sem;
426 }
427
428 ret = jffs2_garbage_collect_live(c, jeb, raw, f);
429
430 jffs2_gc_release_inode(c, f);
431
David Woodhouse2665ea82007-10-13 11:31:23 +0100432 test_gcnode:
433 if (jeb->dirty_size == gcblock_dirty && !ref_obsolete(jeb->gc_node)) {
434 /* Eep. This really should never happen. GC is broken */
435 printk(KERN_ERR "Error garbage collecting node at %08x!\n", ref_offset(jeb->gc_node));
436 ret = -ENOSPC;
David Woodhouse4fc8a602007-10-13 14:29:39 +0100437 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 release_sem:
David Woodhouseced22072008-04-22 15:13:40 +0100439 mutex_unlock(&c->alloc_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
441 eraseit_lock:
442 /* If we've finished this block, start it erasing */
443 spin_lock(&c->erase_completion_lock);
444
445 eraseit:
446 if (c->gcblock && !c->gcblock->used_size) {
447 D1(printk(KERN_DEBUG "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset));
448 /* We're GC'ing an empty block? */
449 list_add_tail(&c->gcblock->list, &c->erase_pending_list);
450 c->gcblock = NULL;
451 c->nr_erasing_blocks++;
David Woodhouseae3b6ba2010-05-19 17:05:14 +0100452 jffs2_garbage_collect_trigger(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 }
454 spin_unlock(&c->erase_completion_lock);
455
456 return ret;
457}
458
459static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
460 struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f)
461{
462 struct jffs2_node_frag *frag;
463 struct jffs2_full_dnode *fn = NULL;
464 struct jffs2_full_dirent *fd;
465 uint32_t start = 0, end = 0, nrfrags = 0;
466 int ret = 0;
467
David Woodhouseced22072008-04-22 15:13:40 +0100468 mutex_lock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
470 /* Now we have the lock for this inode. Check that it's still the one at the head
471 of the list. */
472
473 spin_lock(&c->erase_completion_lock);
474
475 if (c->gcblock != jeb) {
476 spin_unlock(&c->erase_completion_lock);
477 D1(printk(KERN_DEBUG "GC block is no longer gcblock. Restart\n"));
478 goto upnout;
479 }
480 if (ref_obsolete(raw)) {
481 spin_unlock(&c->erase_completion_lock);
482 D1(printk(KERN_DEBUG "node to be GC'd was obsoleted in the meantime.\n"));
483 /* They'll call again */
484 goto upnout;
485 }
486 spin_unlock(&c->erase_completion_lock);
487
488 /* OK. Looks safe. And nobody can get us now because we have the semaphore. Move the block */
489 if (f->metadata && f->metadata->raw == raw) {
490 fn = f->metadata;
491 ret = jffs2_garbage_collect_metadata(c, jeb, f, fn);
492 goto upnout;
493 }
494
495 /* FIXME. Read node and do lookup? */
496 for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) {
497 if (frag->node && frag->node->raw == raw) {
498 fn = frag->node;
499 end = frag->ofs + frag->size;
500 if (!nrfrags++)
501 start = frag->ofs;
502 if (nrfrags == frag->node->frags)
503 break; /* We've found them all */
504 }
505 }
506 if (fn) {
507 if (ref_flags(raw) == REF_PRISTINE) {
508 ret = jffs2_garbage_collect_pristine(c, f->inocache, raw);
509 if (!ret) {
510 /* Urgh. Return it sensibly. */
511 frag->node->raw = f->inocache->nodes;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000512 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 if (ret != -EBADFD)
514 goto upnout;
515 }
516 /* We found a datanode. Do the GC */
517 if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) {
518 /* It crosses a page boundary. Therefore, it must be a hole. */
519 ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end);
520 } else {
521 /* It could still be a hole. But we GC the page this way anyway */
522 ret = jffs2_garbage_collect_dnode(c, jeb, f, fn, start, end);
523 }
524 goto upnout;
525 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000526
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 /* Wasn't a dnode. Try dirent */
528 for (fd = f->dents; fd; fd=fd->next) {
529 if (fd->raw == raw)
530 break;
531 }
532
533 if (fd && fd->ino) {
534 ret = jffs2_garbage_collect_dirent(c, jeb, f, fd);
535 } else if (fd) {
536 ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd);
537 } else {
538 printk(KERN_WARNING "Raw node at 0x%08x wasn't in node lists for ino #%u\n",
539 ref_offset(raw), f->inocache->ino);
540 if (ref_obsolete(raw)) {
541 printk(KERN_WARNING "But it's obsolete so we don't mind too much\n");
542 } else {
Artem B. Bityutskiye0c8e422005-07-24 16:14:17 +0100543 jffs2_dbg_dump_node(c, ref_offset(raw));
544 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 }
546 }
547 upnout:
David Woodhouseced22072008-04-22 15:13:40 +0100548 mutex_unlock(&f->sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
550 return ret;
551}
552
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000553static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 struct jffs2_inode_cache *ic,
555 struct jffs2_raw_node_ref *raw)
556{
557 union jffs2_node_union *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 size_t retlen;
559 int ret;
560 uint32_t phys_ofs, alloclen;
561 uint32_t crc, rawlen;
562 int retried = 0;
563
564 D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw)));
565
David Woodhouse61715862006-05-21 00:02:06 +0100566 alloclen = rawlen = ref_totlen(c, c->gcblock, raw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
568 /* Ask for a small amount of space (or the totlen if smaller) because we
569 don't want to force wastage of the end of a block if splitting would
570 work. */
David Woodhouse61715862006-05-21 00:02:06 +0100571 if (ic && alloclen > sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN)
572 alloclen = sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN;
573
David Woodhouse9fe48542006-05-23 00:38:06 +0100574 ret = jffs2_reserve_space_gc(c, alloclen, &alloclen, rawlen);
David Woodhouse61715862006-05-21 00:02:06 +0100575 /* 'rawlen' is not the exact summary size; it is only an upper estimation */
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100576
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 if (ret)
578 return ret;
579
580 if (alloclen < rawlen) {
581 /* Doesn't fit untouched. We'll go the old route and split it */
582 return -EBADFD;
583 }
584
585 node = kmalloc(rawlen, GFP_KERNEL);
586 if (!node)
David Woodhouseef53cb02007-07-10 10:01:22 +0100587 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
589 ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)node);
590 if (!ret && retlen != rawlen)
591 ret = -EIO;
592 if (ret)
593 goto out_node;
594
595 crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4);
596 if (je32_to_cpu(node->u.hdr_crc) != crc) {
597 printk(KERN_WARNING "Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
598 ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc);
599 goto bail;
600 }
601
602 switch(je16_to_cpu(node->u.nodetype)) {
603 case JFFS2_NODETYPE_INODE:
604 crc = crc32(0, node, sizeof(node->i)-8);
605 if (je32_to_cpu(node->i.node_crc) != crc) {
606 printk(KERN_WARNING "Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
607 ref_offset(raw), je32_to_cpu(node->i.node_crc), crc);
608 goto bail;
609 }
610
611 if (je32_to_cpu(node->i.dsize)) {
612 crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize));
613 if (je32_to_cpu(node->i.data_crc) != crc) {
614 printk(KERN_WARNING "Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
615 ref_offset(raw), je32_to_cpu(node->i.data_crc), crc);
616 goto bail;
617 }
618 }
619 break;
620
621 case JFFS2_NODETYPE_DIRENT:
622 crc = crc32(0, node, sizeof(node->d)-8);
623 if (je32_to_cpu(node->d.node_crc) != crc) {
624 printk(KERN_WARNING "Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
625 ref_offset(raw), je32_to_cpu(node->d.node_crc), crc);
626 goto bail;
627 }
628
David Woodhouseb534e702007-10-13 11:35:58 +0100629 if (strnlen(node->d.name, node->d.nsize) != node->d.nsize) {
630 printk(KERN_WARNING "Name in dirent node at 0x%08x contains zeroes\n", ref_offset(raw));
631 goto bail;
632 }
633
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 if (node->d.nsize) {
635 crc = crc32(0, node->d.name, node->d.nsize);
636 if (je32_to_cpu(node->d.name_crc) != crc) {
David Woodhouseb534e702007-10-13 11:35:58 +0100637 printk(KERN_WARNING "Name CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 ref_offset(raw), je32_to_cpu(node->d.name_crc), crc);
639 goto bail;
640 }
641 }
642 break;
643 default:
David Woodhouse61715862006-05-21 00:02:06 +0100644 /* If it's inode-less, we don't _know_ what it is. Just copy it intact */
645 if (ic) {
646 printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
647 ref_offset(raw), je16_to_cpu(node->u.nodetype));
648 goto bail;
649 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 }
651
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 /* OK, all the CRCs are good; this node can just be copied as-is. */
653 retry:
David Woodhouse2f785402006-05-24 02:04:45 +0100654 phys_ofs = write_ofs(c);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
656 ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
657
658 if (ret || (retlen != rawlen)) {
659 printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
David Woodhouseef53cb02007-07-10 10:01:22 +0100660 rawlen, phys_ofs, ret, retlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 if (retlen) {
David Woodhouse2f785402006-05-24 02:04:45 +0100662 jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 } else {
David Woodhouse2f785402006-05-24 02:04:45 +0100664 printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", phys_ofs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 }
David Woodhouse2f785402006-05-24 02:04:45 +0100666 if (!retried) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 /* Try to reallocate space and retry */
668 uint32_t dummy;
669 struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size];
670
671 retried = 1;
672
673 D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n"));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000674
Artem B. Bityutskiy730554d2005-07-17 07:56:26 +0100675 jffs2_dbg_acct_sanity_check(c,jeb);
676 jffs2_dbg_acct_paranoia_check(c, jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
David Woodhouse9fe48542006-05-23 00:38:06 +0100678 ret = jffs2_reserve_space_gc(c, rawlen, &dummy, rawlen);
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100679 /* this is not the exact summary size of it,
680 it is only an upper estimation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
682 if (!ret) {
683 D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs));
684
Artem B. Bityutskiy730554d2005-07-17 07:56:26 +0100685 jffs2_dbg_acct_sanity_check(c,jeb);
686 jffs2_dbg_acct_paranoia_check(c, jeb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
688 goto retry;
689 }
690 D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 }
692
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 if (!ret)
694 ret = -EIO;
695 goto out_node;
696 }
David Woodhouse2f785402006-05-24 02:04:45 +0100697 jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 jffs2_mark_node_obsolete(c, raw);
700 D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw)));
701
702 out_node:
703 kfree(node);
704 return ret;
705 bail:
706 ret = -EBADFD;
707 goto out_node;
708}
709
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000710static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
712{
713 struct jffs2_full_dnode *new_fn;
714 struct jffs2_raw_inode ri;
Artem B. Bityuckiy8557fd52005-04-09 11:47:03 +0100715 struct jffs2_node_frag *last_frag;
David Woodhouseaef9ab42006-05-19 00:28:49 +0100716 union jffs2_device_node dev;
David Woodhouse2e16cfc2009-12-16 03:27:20 +0000717 char *mdata = NULL;
718 int mdatalen = 0;
David Woodhouse9fe48542006-05-23 00:38:06 +0100719 uint32_t alloclen, ilen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 int ret;
721
722 if (S_ISBLK(JFFS2_F_I_MODE(f)) ||
723 S_ISCHR(JFFS2_F_I_MODE(f)) ) {
724 /* For these, we don't actually need to read the old node */
David Woodhouseaef9ab42006-05-19 00:28:49 +0100725 mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 mdata = (char *)&dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen));
728 } else if (S_ISLNK(JFFS2_F_I_MODE(f))) {
729 mdatalen = fn->size;
730 mdata = kmalloc(fn->size, GFP_KERNEL);
731 if (!mdata) {
732 printk(KERN_WARNING "kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n");
733 return -ENOMEM;
734 }
735 ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen);
736 if (ret) {
737 printk(KERN_WARNING "read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n", ret);
738 kfree(mdata);
739 return ret;
740 }
741 D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen));
742
743 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000744
David Woodhouse9fe48542006-05-23 00:38:06 +0100745 ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen,
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100746 JFFS2_SUMMARY_INODE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 if (ret) {
748 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
749 sizeof(ri)+ mdatalen, ret);
750 goto out;
751 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000752
Artem B. Bityuckiy8557fd52005-04-09 11:47:03 +0100753 last_frag = frag_last(&f->fragtree);
754 if (last_frag)
755 /* Fetch the inode length from the fragtree rather then
756 * from i_size since i_size may have not been updated yet */
757 ilen = last_frag->ofs + last_frag->size;
758 else
759 ilen = JFFS2_F_I_SIZE(f);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000760
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 memset(&ri, 0, sizeof(ri));
762 ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
763 ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
764 ri.totlen = cpu_to_je32(sizeof(ri) + mdatalen);
765 ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
766
767 ri.ino = cpu_to_je32(f->inocache->ino);
768 ri.version = cpu_to_je32(++f->highest_version);
769 ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
770 ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
771 ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
Artem B. Bityuckiy8557fd52005-04-09 11:47:03 +0100772 ri.isize = cpu_to_je32(ilen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
774 ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
775 ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
776 ri.offset = cpu_to_je32(0);
777 ri.csize = cpu_to_je32(mdatalen);
778 ri.dsize = cpu_to_je32(mdatalen);
779 ri.compr = JFFS2_COMPR_NONE;
780 ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
781 ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
782
David Woodhouse9fe48542006-05-23 00:38:06 +0100783 new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
785 if (IS_ERR(new_fn)) {
786 printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
787 ret = PTR_ERR(new_fn);
788 goto out;
789 }
790 jffs2_mark_node_obsolete(c, fn->raw);
791 jffs2_free_full_dnode(fn);
792 f->metadata = new_fn;
793 out:
794 if (S_ISLNK(JFFS2_F_I_MODE(f)))
795 kfree(mdata);
796 return ret;
797}
798
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000799static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
801{
802 struct jffs2_full_dirent *new_fd;
803 struct jffs2_raw_dirent rd;
David Woodhouse9fe48542006-05-23 00:38:06 +0100804 uint32_t alloclen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 int ret;
806
807 rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
808 rd.nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
809 rd.nsize = strlen(fd->name);
810 rd.totlen = cpu_to_je32(sizeof(rd) + rd.nsize);
811 rd.hdr_crc = cpu_to_je32(crc32(0, &rd, sizeof(struct jffs2_unknown_node)-4));
812
813 rd.pino = cpu_to_je32(f->inocache->ino);
814 rd.version = cpu_to_je32(++f->highest_version);
815 rd.ino = cpu_to_je32(fd->ino);
Artem B. Bityutskiy3a69e0c2005-08-17 14:46:26 +0100816 /* If the times on this inode were set by explicit utime() they can be different,
817 so refrain from splatting them. */
818 if (JFFS2_F_I_MTIME(f) == JFFS2_F_I_CTIME(f))
819 rd.mctime = cpu_to_je32(JFFS2_F_I_MTIME(f));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000820 else
Artem B. Bityutskiy3a69e0c2005-08-17 14:46:26 +0100821 rd.mctime = cpu_to_je32(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 rd.type = fd->type;
823 rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8));
824 rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000825
David Woodhouse9fe48542006-05-23 00:38:06 +0100826 ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen,
Ferenc Havasie631ddb2005-09-07 09:35:26 +0100827 JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 if (ret) {
829 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
830 sizeof(rd)+rd.nsize, ret);
831 return ret;
832 }
David Woodhouse9fe48542006-05-23 00:38:06 +0100833 new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
835 if (IS_ERR(new_fd)) {
836 printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd));
837 return PTR_ERR(new_fd);
838 }
839 jffs2_add_fd_to_list(c, new_fd, &f->dents);
840 return 0;
841}
842
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000843static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
845{
846 struct jffs2_full_dirent **fdp = &f->dents;
847 int found = 0;
848
849 /* On a medium where we can't actually mark nodes obsolete
850 pernamently, such as NAND flash, we need to work out
851 whether this deletion dirent is still needed to actively
852 delete a 'real' dirent with the same name that's still
853 somewhere else on the flash. */
854 if (!jffs2_can_mark_obsolete(c)) {
855 struct jffs2_raw_dirent *rd;
856 struct jffs2_raw_node_ref *raw;
857 int ret;
858 size_t retlen;
859 int name_len = strlen(fd->name);
860 uint32_t name_crc = crc32(0, fd->name, name_len);
861 uint32_t rawlen = ref_totlen(c, jeb, fd->raw);
862
863 rd = kmalloc(rawlen, GFP_KERNEL);
864 if (!rd)
865 return -ENOMEM;
866
867 /* Prevent the erase code from nicking the obsolete node refs while
868 we're looking at them. I really don't like this extra lock but
869 can't see any alternative. Suggestions on a postcard to... */
David Woodhouseced22072008-04-22 15:13:40 +0100870 mutex_lock(&c->erase_free_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
872 for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) {
873
Artem Bityutskiyaba54da2006-12-19 15:45:23 +0200874 cond_resched();
875
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 /* We only care about obsolete ones */
877 if (!(ref_obsolete(raw)))
878 continue;
879
880 /* Any dirent with the same name is going to have the same length... */
881 if (ref_totlen(c, NULL, raw) != rawlen)
882 continue;
883
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000884 /* Doesn't matter if there's one in the same erase block. We're going to
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 delete it too at the same time. */
Andrew Victor3be36672005-02-09 09:09:05 +0000886 if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 continue;
888
889 D1(printk(KERN_DEBUG "Check potential deletion dirent at %08x\n", ref_offset(raw)));
890
891 /* This is an obsolete node belonging to the same directory, and it's of the right
892 length. We need to take a closer look...*/
893 ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd);
894 if (ret) {
895 printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Read error (%d) reading obsolete node at %08x\n", ret, ref_offset(raw));
896 /* If we can't read it, we don't need to continue to obsolete it. Continue */
897 continue;
898 }
899 if (retlen != rawlen) {
900 printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %u) reading header from obsolete node at %08x\n",
901 retlen, rawlen, ref_offset(raw));
902 continue;
903 }
904
905 if (je16_to_cpu(rd->nodetype) != JFFS2_NODETYPE_DIRENT)
906 continue;
907
908 /* If the name CRC doesn't match, skip */
909 if (je32_to_cpu(rd->name_crc) != name_crc)
910 continue;
911
912 /* If the name length doesn't match, or it's another deletion dirent, skip */
913 if (rd->nsize != name_len || !je32_to_cpu(rd->ino))
914 continue;
915
916 /* OK, check the actual name now */
917 if (memcmp(rd->name, fd->name, name_len))
918 continue;
919
920 /* OK. The name really does match. There really is still an older node on
921 the flash which our deletion dirent obsoletes. So we have to write out
922 a new deletion dirent to replace it */
David Woodhouseced22072008-04-22 15:13:40 +0100923 mutex_unlock(&c->erase_free_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
925 D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
926 ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino)));
927 kfree(rd);
928
929 return jffs2_garbage_collect_dirent(c, jeb, f, fd);
930 }
931
David Woodhouseced22072008-04-22 15:13:40 +0100932 mutex_unlock(&c->erase_free_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 kfree(rd);
934 }
935
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000936 /* FIXME: If we're deleting a dirent which contains the current mtime and ctime,
Artem B. Bityutskiy3a69e0c2005-08-17 14:46:26 +0100937 we should update the metadata node with those times accordingly */
938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 /* No need for it any more. Just mark it obsolete and remove it from the list */
940 while (*fdp) {
941 if ((*fdp) == fd) {
942 found = 1;
943 *fdp = fd->next;
944 break;
945 }
946 fdp = &(*fdp)->next;
947 }
948 if (!found) {
949 printk(KERN_WARNING "Deletion dirent \"%s\" not found in list for ino #%u\n", fd->name, f->inocache->ino);
950 }
951 jffs2_mark_node_obsolete(c, fd->raw);
952 jffs2_free_full_dirent(fd);
953 return 0;
954}
955
956static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
957 struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
958 uint32_t start, uint32_t end)
959{
960 struct jffs2_raw_inode ri;
961 struct jffs2_node_frag *frag;
962 struct jffs2_full_dnode *new_fn;
David Woodhouse9fe48542006-05-23 00:38:06 +0100963 uint32_t alloclen, ilen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 int ret;
965
966 D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
967 f->inocache->ino, start, end));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000968
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 memset(&ri, 0, sizeof(ri));
970
971 if(fn->frags > 1) {
972 size_t readlen;
973 uint32_t crc;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000974 /* It's partially obsoleted by a later write. So we have to
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 write it out again with the _same_ version as before */
976 ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri);
977 if (readlen != sizeof(ri) || ret) {
978 printk(KERN_WARNING "Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n", ret, readlen);
979 goto fill;
980 }
981 if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) {
982 printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n",
983 ref_offset(fn->raw),
984 je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE);
985 return -EIO;
986 }
987 if (je32_to_cpu(ri.totlen) != sizeof(ri)) {
988 printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n",
989 ref_offset(fn->raw),
990 je32_to_cpu(ri.totlen), sizeof(ri));
991 return -EIO;
992 }
993 crc = crc32(0, &ri, sizeof(ri)-8);
994 if (crc != je32_to_cpu(ri.node_crc)) {
995 printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n",
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000996 ref_offset(fn->raw),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 je32_to_cpu(ri.node_crc), crc);
998 /* FIXME: We could possibly deal with this by writing new holes for each frag */
Thomas Gleixner182ec4e2005-11-07 11:16:07 +0000999 printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 start, end, f->inocache->ino);
1001 goto fill;
1002 }
1003 if (ri.compr != JFFS2_COMPR_ZERO) {
1004 printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001005 printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 start, end, f->inocache->ino);
1007 goto fill;
1008 }
1009 } else {
1010 fill:
1011 ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
1012 ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
1013 ri.totlen = cpu_to_je32(sizeof(ri));
1014 ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
1015
1016 ri.ino = cpu_to_je32(f->inocache->ino);
1017 ri.version = cpu_to_je32(++f->highest_version);
1018 ri.offset = cpu_to_je32(start);
1019 ri.dsize = cpu_to_je32(end - start);
1020 ri.csize = cpu_to_je32(0);
1021 ri.compr = JFFS2_COMPR_ZERO;
1022 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001023
Artem B. Bityuckiy8557fd52005-04-09 11:47:03 +01001024 frag = frag_last(&f->fragtree);
1025 if (frag)
1026 /* Fetch the inode length from the fragtree rather then
1027 * from i_size since i_size may have not been updated yet */
1028 ilen = frag->ofs + frag->size;
1029 else
1030 ilen = JFFS2_F_I_SIZE(f);
1031
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
1033 ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
1034 ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
Artem B. Bityuckiy8557fd52005-04-09 11:47:03 +01001035 ri.isize = cpu_to_je32(ilen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
1037 ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
1038 ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
1039 ri.data_crc = cpu_to_je32(0);
1040 ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
1041
David Woodhouse9fe48542006-05-23 00:38:06 +01001042 ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen,
1043 JFFS2_SUMMARY_INODE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 if (ret) {
1045 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
1046 sizeof(ri), ret);
1047 return ret;
1048 }
David Woodhouse9fe48542006-05-23 00:38:06 +01001049 new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050
1051 if (IS_ERR(new_fn)) {
1052 printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn));
1053 return PTR_ERR(new_fn);
1054 }
1055 if (je32_to_cpu(ri.version) == f->highest_version) {
1056 jffs2_add_full_dnode_to_inode(c, f, new_fn);
1057 if (f->metadata) {
1058 jffs2_mark_node_obsolete(c, f->metadata->raw);
1059 jffs2_free_full_dnode(f->metadata);
1060 f->metadata = NULL;
1061 }
1062 return 0;
1063 }
1064
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001065 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 * We should only get here in the case where the node we are
1067 * replacing had more than one frag, so we kept the same version
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001068 * number as before. (Except in case of error -- see 'goto fill;'
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 * above.)
1070 */
1071 D1(if(unlikely(fn->frags <= 1)) {
1072 printk(KERN_WARNING "jffs2_garbage_collect_hole: Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n",
1073 fn->frags, je32_to_cpu(ri.version), f->highest_version,
1074 je32_to_cpu(ri.ino));
1075 });
1076
1077 /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */
1078 mark_ref_normal(new_fn->raw);
1079
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001080 for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 frag; frag = frag_next(frag)) {
1082 if (frag->ofs > fn->size + fn->ofs)
1083 break;
1084 if (frag->node == fn) {
1085 frag->node = new_fn;
1086 new_fn->frags++;
1087 fn->frags--;
1088 }
1089 }
1090 if (fn->frags) {
1091 printk(KERN_WARNING "jffs2_garbage_collect_hole: Old node still has frags!\n");
1092 BUG();
1093 }
1094 if (!new_fn->frags) {
1095 printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n");
1096 BUG();
1097 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001098
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 jffs2_mark_node_obsolete(c, fn->raw);
1100 jffs2_free_full_dnode(fn);
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001101
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 return 0;
1103}
1104
David Woodhouse25dc30b2008-04-22 12:12:25 +01001105static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *orig_jeb,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
1107 uint32_t start, uint32_t end)
1108{
1109 struct jffs2_full_dnode *new_fn;
1110 struct jffs2_raw_inode ri;
David Woodhouse9fe48542006-05-23 00:38:06 +01001111 uint32_t alloclen, offset, orig_end, orig_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 int ret = 0;
1113 unsigned char *comprbuf = NULL, *writebuf;
1114 unsigned long pg;
1115 unsigned char *pg_ptr;
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001116
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 memset(&ri, 0, sizeof(ri));
1118
1119 D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
1120 f->inocache->ino, start, end));
1121
1122 orig_end = end;
1123 orig_start = start;
1124
1125 if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) {
1126 /* Attempt to do some merging. But only expand to cover logically
1127 adjacent frags if the block containing them is already considered
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001128 to be dirty. Otherwise we end up with GC just going round in
1129 circles dirtying the nodes it already wrote out, especially
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 on NAND where we have small eraseblocks and hence a much higher
1131 chance of nodes having to be split to cross boundaries. */
1132
1133 struct jffs2_node_frag *frag;
1134 uint32_t min, max;
1135
1136 min = start & ~(PAGE_CACHE_SIZE-1);
1137 max = min + PAGE_CACHE_SIZE;
1138
1139 frag = jffs2_lookup_node_frag(&f->fragtree, start);
1140
1141 /* BUG_ON(!frag) but that'll happen anyway... */
1142
1143 BUG_ON(frag->ofs != start);
1144
1145 /* First grow down... */
1146 while((frag = frag_prev(frag)) && frag->ofs >= min) {
1147
1148 /* If the previous frag doesn't even reach the beginning, there's
1149 excessive fragmentation. Just merge. */
1150 if (frag->ofs > min) {
1151 D1(printk(KERN_DEBUG "Expanding down to cover partial frag (0x%x-0x%x)\n",
1152 frag->ofs, frag->ofs+frag->size));
1153 start = frag->ofs;
1154 continue;
1155 }
1156 /* OK. This frag holds the first byte of the page. */
1157 if (!frag->node || !frag->node->raw) {
1158 D1(printk(KERN_DEBUG "First frag in page is hole (0x%x-0x%x). Not expanding down.\n",
1159 frag->ofs, frag->ofs+frag->size));
1160 break;
1161 } else {
1162
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001163 /* OK, it's a frag which extends to the beginning of the page. Does it live
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 in a block which is still considered clean? If so, don't obsolete it.
1165 If not, cover it anyway. */
1166
1167 struct jffs2_raw_node_ref *raw = frag->node->raw;
1168 struct jffs2_eraseblock *jeb;
1169
1170 jeb = &c->blocks[raw->flash_offset / c->sector_size];
1171
1172 if (jeb == c->gcblock) {
1173 D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n",
1174 frag->ofs, frag->ofs+frag->size, ref_offset(raw)));
1175 start = frag->ofs;
1176 break;
1177 }
1178 if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
1179 D1(printk(KERN_DEBUG "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n",
1180 frag->ofs, frag->ofs+frag->size, jeb->offset));
1181 break;
1182 }
1183
1184 D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n",
1185 frag->ofs, frag->ofs+frag->size, jeb->offset));
1186 start = frag->ofs;
1187 break;
1188 }
1189 }
1190
1191 /* ... then up */
1192
1193 /* Find last frag which is actually part of the node we're to GC. */
1194 frag = jffs2_lookup_node_frag(&f->fragtree, end-1);
1195
1196 while((frag = frag_next(frag)) && frag->ofs+frag->size <= max) {
1197
1198 /* If the previous frag doesn't even reach the beginning, there's lots
1199 of fragmentation. Just merge. */
1200 if (frag->ofs+frag->size < max) {
1201 D1(printk(KERN_DEBUG "Expanding up to cover partial frag (0x%x-0x%x)\n",
1202 frag->ofs, frag->ofs+frag->size));
1203 end = frag->ofs + frag->size;
1204 continue;
1205 }
1206
1207 if (!frag->node || !frag->node->raw) {
1208 D1(printk(KERN_DEBUG "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n",
1209 frag->ofs, frag->ofs+frag->size));
1210 break;
1211 } else {
1212
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001213 /* OK, it's a frag which extends to the beginning of the page. Does it live
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 in a block which is still considered clean? If so, don't obsolete it.
1215 If not, cover it anyway. */
1216
1217 struct jffs2_raw_node_ref *raw = frag->node->raw;
1218 struct jffs2_eraseblock *jeb;
1219
1220 jeb = &c->blocks[raw->flash_offset / c->sector_size];
1221
1222 if (jeb == c->gcblock) {
1223 D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n",
1224 frag->ofs, frag->ofs+frag->size, ref_offset(raw)));
1225 end = frag->ofs + frag->size;
1226 break;
1227 }
1228 if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
1229 D1(printk(KERN_DEBUG "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n",
1230 frag->ofs, frag->ofs+frag->size, jeb->offset));
1231 break;
1232 }
1233
1234 D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n",
1235 frag->ofs, frag->ofs+frag->size, jeb->offset));
1236 end = frag->ofs + frag->size;
1237 break;
1238 }
1239 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001240 D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 orig_start, orig_end, start, end));
1242
Artem B. Bityuckiy8557fd52005-04-09 11:47:03 +01001243 D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 BUG_ON(end < orig_end);
1245 BUG_ON(start > orig_start);
1246 }
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001247
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 /* First, use readpage() to read the appropriate page into the page cache */
1249 /* Q: What happens if we actually try to GC the _same_ page for which commit_write()
1250 * triggered garbage collection in the first place?
1251 * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
1252 * page OK. We'll actually write it out again in commit_write, which is a little
1253 * suboptimal, but at least we're correct.
1254 */
1255 pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
1256
1257 if (IS_ERR(pg_ptr)) {
1258 printk(KERN_WARNING "read_cache_page() returned error: %ld\n", PTR_ERR(pg_ptr));
1259 return PTR_ERR(pg_ptr);
1260 }
1261
1262 offset = start;
1263 while(offset < orig_end) {
1264 uint32_t datalen;
1265 uint32_t cdatalen;
1266 uint16_t comprtype = JFFS2_COMPR_NONE;
1267
David Woodhouse9fe48542006-05-23 00:38:06 +01001268 ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN,
Ferenc Havasie631ddb2005-09-07 09:35:26 +01001269 &alloclen, JFFS2_SUMMARY_INODE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270
1271 if (ret) {
1272 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n",
1273 sizeof(ri)+ JFFS2_MIN_DATA_LEN, ret);
1274 break;
1275 }
1276 cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
1277 datalen = end - offset;
1278
1279 writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1));
1280
1281 comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
1282
1283 ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
1284 ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
1285 ri.totlen = cpu_to_je32(sizeof(ri) + cdatalen);
1286 ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
1287
1288 ri.ino = cpu_to_je32(f->inocache->ino);
1289 ri.version = cpu_to_je32(++f->highest_version);
1290 ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
1291 ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
1292 ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
1293 ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f));
1294 ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
1295 ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
1296 ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
1297 ri.offset = cpu_to_je32(offset);
1298 ri.csize = cpu_to_je32(cdatalen);
1299 ri.dsize = cpu_to_je32(datalen);
1300 ri.compr = comprtype & 0xff;
1301 ri.usercompr = (comprtype >> 8) & 0xff;
1302 ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
1303 ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
Thomas Gleixner182ec4e2005-11-07 11:16:07 +00001304
David Woodhouse9fe48542006-05-23 00:38:06 +01001305 new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, ALLOC_GC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306
1307 jffs2_free_comprbuf(comprbuf, writebuf);
1308
1309 if (IS_ERR(new_fn)) {
1310 printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
1311 ret = PTR_ERR(new_fn);
1312 break;
1313 }
1314 ret = jffs2_add_full_dnode_to_inode(c, f, new_fn);
1315 offset += datalen;
1316 if (f->metadata) {
1317 jffs2_mark_node_obsolete(c, f->metadata->raw);
1318 jffs2_free_full_dnode(f->metadata);
1319 f->metadata = NULL;
1320 }
1321 }
1322
1323 jffs2_gc_release_page(c, pg_ptr, &pg);
1324 return ret;
1325}