blob: c1d8b5ed9ab95221b89b2af4bbcdd985de16775f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
Artem B. Bityuckiy4132ace2005-05-06 10:30:30 +010010 * $Id: nodemgmt.c,v 1.122 2005/05/06 09:30:27 dedekind Exp $
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/mtd/mtd.h>
17#include <linux/compiler.h>
18#include <linux/sched.h> /* For cond_resched() */
19#include "nodelist.h"
20
21/**
22 * jffs2_reserve_space - request physical space to write nodes to flash
23 * @c: superblock info
24 * @minsize: Minimum acceptable size of allocation
25 * @ofs: Returned value of node offset
26 * @len: Returned value of allocation length
27 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
28 *
29 * Requests a block of physical space on the flash. Returns zero for success
30 * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
31 * or other error if appropriate.
32 *
33 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
34 * allocation semaphore, to prevent more than one allocation from being
35 * active at any time. The semaphore is later released by jffs2_commit_allocation()
36 *
37 * jffs2_reserve_space() may trigger garbage collection in order to make room
38 * for the requested allocation.
39 */
40
41static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len);
42
43int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio)
44{
45 int ret = -EAGAIN;
46 int blocksneeded = c->resv_blocks_write;
47 /* align it */
48 minsize = PAD(minsize);
49
50 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51 down(&c->alloc_sem);
52
53 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
54
55 spin_lock(&c->erase_completion_lock);
56
57 /* this needs a little more thought (true <tglx> :)) */
58 while(ret == -EAGAIN) {
59 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
60 int ret;
61 uint32_t dirty, avail;
62
63 /* calculate real dirty size
64 * dirty_size contains blocks on erase_pending_list
65 * those blocks are counted in c->nr_erasing_blocks.
66 * If one block is actually erased, it is not longer counted as dirty_space
67 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68 * with c->nr_erasing_blocks * c->sector_size again.
69 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70 * This helps us to force gc and pick eventually a clean block to spread the load.
71 * We add unchecked_size here, as we hopefully will find some space to use.
72 * This will affect the sum only once, as gc first finishes checking
73 * of nodes.
74 */
75 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76 if (dirty < c->nospc_dirty_size) {
77 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
Artem B. Bityuckiy4132ace2005-05-06 10:30:30 +010078 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 break;
80 }
81 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
82 dirty, c->unchecked_size, c->sector_size));
83
84 spin_unlock(&c->erase_completion_lock);
85 up(&c->alloc_sem);
86 return -ENOSPC;
87 }
88
89 /* Calc possibly available space. Possibly available means that we
90 * don't know, if unchecked size contains obsoleted nodes, which could give us some
91 * more usable space. This will affect the sum only once, as gc first finishes checking
92 * of nodes.
93 + Return -ENOSPC, if the maximum possibly available space is less or equal than
94 * blocksneeded * sector_size.
95 * This blocks endless gc looping on a filesystem, which is nearly full, even if
96 * the check above passes.
97 */
98 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
99 if ( (avail / c->sector_size) <= blocksneeded) {
100 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
Artem B. Bityuckiy4132ace2005-05-06 10:30:30 +0100101 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 break;
103 }
104
105 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
106 avail, blocksneeded * c->sector_size));
107 spin_unlock(&c->erase_completion_lock);
108 up(&c->alloc_sem);
109 return -ENOSPC;
110 }
111
112 up(&c->alloc_sem);
113
114 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
115 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
116 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
117 spin_unlock(&c->erase_completion_lock);
118
119 ret = jffs2_garbage_collect_pass(c);
120 if (ret)
121 return ret;
122
123 cond_resched();
124
125 if (signal_pending(current))
126 return -EINTR;
127
128 down(&c->alloc_sem);
129 spin_lock(&c->erase_completion_lock);
130 }
131
132 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
133 if (ret) {
134 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
135 }
136 }
137 spin_unlock(&c->erase_completion_lock);
138 if (ret)
139 up(&c->alloc_sem);
140 return ret;
141}
142
143int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
144{
145 int ret = -EAGAIN;
146 minsize = PAD(minsize);
147
148 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
149
150 spin_lock(&c->erase_completion_lock);
151 while(ret == -EAGAIN) {
152 ret = jffs2_do_reserve_space(c, minsize, ofs, len);
153 if (ret) {
154 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
155 }
156 }
157 spin_unlock(&c->erase_completion_lock);
158 return ret;
159}
160
161/* Called with alloc sem _and_ erase_completion_lock */
162static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
163{
164 struct jffs2_eraseblock *jeb = c->nextblock;
165
166 restart:
167 if (jeb && minsize > jeb->free_size) {
168 /* Skip the end of this block and file it as having some dirty space */
169 /* If there's a pending write to it, flush now */
170 if (jffs2_wbuf_dirty(c)) {
171 spin_unlock(&c->erase_completion_lock);
172 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
173 jffs2_flush_wbuf_pad(c);
174 spin_lock(&c->erase_completion_lock);
175 jeb = c->nextblock;
176 goto restart;
177 }
178 c->wasted_size += jeb->free_size;
179 c->free_size -= jeb->free_size;
180 jeb->wasted_size += jeb->free_size;
181 jeb->free_size = 0;
182
183 /* Check, if we have a dirty block now, or if it was dirty already */
184 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
185 c->dirty_size += jeb->wasted_size;
186 c->wasted_size -= jeb->wasted_size;
187 jeb->dirty_size += jeb->wasted_size;
188 jeb->wasted_size = 0;
189 if (VERYDIRTY(c, jeb->dirty_size)) {
190 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
191 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
192 list_add_tail(&jeb->list, &c->very_dirty_list);
193 } else {
194 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
195 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
196 list_add_tail(&jeb->list, &c->dirty_list);
197 }
198 } else {
199 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
200 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
201 list_add_tail(&jeb->list, &c->clean_list);
202 }
203 c->nextblock = jeb = NULL;
204 }
205
206 if (!jeb) {
207 struct list_head *next;
208 /* Take the next block off the 'free' list */
209
210 if (list_empty(&c->free_list)) {
211
212 if (!c->nr_erasing_blocks &&
213 !list_empty(&c->erasable_list)) {
214 struct jffs2_eraseblock *ejeb;
215
216 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
217 list_del(&ejeb->list);
218 list_add_tail(&ejeb->list, &c->erase_pending_list);
219 c->nr_erasing_blocks++;
220 jffs2_erase_pending_trigger(c);
221 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
222 ejeb->offset));
223 }
224
225 if (!c->nr_erasing_blocks &&
226 !list_empty(&c->erasable_pending_wbuf_list)) {
227 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
228 /* c->nextblock is NULL, no update to c->nextblock allowed */
229 spin_unlock(&c->erase_completion_lock);
230 jffs2_flush_wbuf_pad(c);
231 spin_lock(&c->erase_completion_lock);
232 /* Have another go. It'll be on the erasable_list now */
233 return -EAGAIN;
234 }
235
236 if (!c->nr_erasing_blocks) {
237 /* Ouch. We're in GC, or we wouldn't have got here.
238 And there's no space left. At all. */
239 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
240 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
241 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
242 return -ENOSPC;
243 }
244
245 spin_unlock(&c->erase_completion_lock);
246 /* Don't wait for it; just erase one right now */
247 jffs2_erase_pending_blocks(c, 1);
248 spin_lock(&c->erase_completion_lock);
249
250 /* An erase may have failed, decreasing the
251 amount of free space available. So we must
252 restart from the beginning */
253 return -EAGAIN;
254 }
255
256 next = c->free_list.next;
257 list_del(next);
258 c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
259 c->nr_free_blocks--;
260
261 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
262 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
263 goto restart;
264 }
265 }
266 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
267 enough space */
268 *ofs = jeb->offset + (c->sector_size - jeb->free_size);
269 *len = jeb->free_size;
270
271 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
272 !jeb->first_node->next_in_ino) {
273 /* Only node in it beforehand was a CLEANMARKER node (we think).
274 So mark it obsolete now that there's going to be another node
275 in the block. This will reduce used_size to zero but We've
276 already set c->nextblock so that jffs2_mark_node_obsolete()
277 won't try to refile it to the dirty_list.
278 */
279 spin_unlock(&c->erase_completion_lock);
280 jffs2_mark_node_obsolete(c, jeb->first_node);
281 spin_lock(&c->erase_completion_lock);
282 }
283
284 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
285 return 0;
286}
287
288/**
289 * jffs2_add_physical_node_ref - add a physical node reference to the list
290 * @c: superblock info
291 * @new: new node reference to add
292 * @len: length of this physical node
293 * @dirty: dirty flag for new node
294 *
295 * Should only be used to report nodes for which space has been allocated
296 * by jffs2_reserve_space.
297 *
298 * Must be called with the alloc_sem held.
299 */
300
301int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
302{
303 struct jffs2_eraseblock *jeb;
304 uint32_t len;
305
306 jeb = &c->blocks[new->flash_offset / c->sector_size];
307 len = ref_totlen(c, jeb, new);
308
309 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
310#if 1
Estelle Hammache3118db32005-01-24 21:30:25 +0000311 /* we could get some obsolete nodes after nextblock was refiled
312 in wbuf.c */
Estelle Hammache9b88f472005-01-28 18:53:05 +0000313 if ((c->nextblock || !ref_obsolete(new))
314 &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 printk(KERN_WARNING "argh. node added in wrong place\n");
316 jffs2_free_raw_node_ref(new);
317 return -EINVAL;
318 }
319#endif
320 spin_lock(&c->erase_completion_lock);
321
322 if (!jeb->first_node)
323 jeb->first_node = new;
324 if (jeb->last_node)
325 jeb->last_node->next_phys = new;
326 jeb->last_node = new;
327
328 jeb->free_size -= len;
329 c->free_size -= len;
330 if (ref_obsolete(new)) {
331 jeb->dirty_size += len;
332 c->dirty_size += len;
333 } else {
334 jeb->used_size += len;
335 c->used_size += len;
336 }
337
Estelle Hammache9b88f472005-01-28 18:53:05 +0000338 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
340 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
341 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
342 if (jffs2_wbuf_dirty(c)) {
343 /* Flush the last write in the block if it's outstanding */
344 spin_unlock(&c->erase_completion_lock);
345 jffs2_flush_wbuf_pad(c);
346 spin_lock(&c->erase_completion_lock);
347 }
348
349 list_add_tail(&jeb->list, &c->clean_list);
350 c->nextblock = NULL;
351 }
352 ACCT_SANITY_CHECK(c,jeb);
353 D1(ACCT_PARANOIA_CHECK(jeb));
354
355 spin_unlock(&c->erase_completion_lock);
356
357 return 0;
358}
359
360
361void jffs2_complete_reservation(struct jffs2_sb_info *c)
362{
363 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
364 jffs2_garbage_collect_trigger(c);
365 up(&c->alloc_sem);
366}
367
368static inline int on_list(struct list_head *obj, struct list_head *head)
369{
370 struct list_head *this;
371
372 list_for_each(this, head) {
373 if (this == obj) {
374 D1(printk("%p is on list at %p\n", obj, head));
375 return 1;
376
377 }
378 }
379 return 0;
380}
381
382void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
383{
384 struct jffs2_eraseblock *jeb;
385 int blocknr;
386 struct jffs2_unknown_node n;
387 int ret, addedsize;
388 size_t retlen;
389
390 if(!ref) {
391 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
392 return;
393 }
394 if (ref_obsolete(ref)) {
395 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
396 return;
397 }
398 blocknr = ref->flash_offset / c->sector_size;
399 if (blocknr >= c->nr_blocks) {
400 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
401 BUG();
402 }
403 jeb = &c->blocks[blocknr];
404
405 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
Artem B. Bityuckiy31fbdf72005-02-28 08:21:09 +0000406 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 /* Hm. This may confuse static lock analysis. If any of the above
408 three conditions is false, we're going to return from this
409 function without actually obliterating any nodes or freeing
410 any jffs2_raw_node_refs. So we don't need to stop erases from
411 happening, or protect against people holding an obsolete
412 jffs2_raw_node_ref without the erase_completion_lock. */
413 down(&c->erase_free_sem);
414 }
415
416 spin_lock(&c->erase_completion_lock);
417
418 if (ref_flags(ref) == REF_UNCHECKED) {
419 D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
420 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
421 ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
422 BUG();
423 })
424 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
425 jeb->unchecked_size -= ref_totlen(c, jeb, ref);
426 c->unchecked_size -= ref_totlen(c, jeb, ref);
427 } else {
428 D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
429 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
430 ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
431 BUG();
432 })
433 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
434 jeb->used_size -= ref_totlen(c, jeb, ref);
435 c->used_size -= ref_totlen(c, jeb, ref);
436 }
437
438 // Take care, that wasted size is taken into concern
439 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
Artem B. Bityuckiy6f401a402005-04-06 17:02:55 +0100440 D1(printk(KERN_DEBUG "Dirtying\n"));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 addedsize = ref_totlen(c, jeb, ref);
442 jeb->dirty_size += ref_totlen(c, jeb, ref);
443 c->dirty_size += ref_totlen(c, jeb, ref);
444
445 /* Convert wasted space to dirty, if not a bad block */
446 if (jeb->wasted_size) {
447 if (on_list(&jeb->list, &c->bad_used_list)) {
448 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
449 jeb->offset));
450 addedsize = 0; /* To fool the refiling code later */
451 } else {
452 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
453 jeb->wasted_size, jeb->offset));
454 addedsize += jeb->wasted_size;
455 jeb->dirty_size += jeb->wasted_size;
456 c->dirty_size += jeb->wasted_size;
457 c->wasted_size -= jeb->wasted_size;
458 jeb->wasted_size = 0;
459 }
460 }
461 } else {
Artem B. Bityuckiy6f401a402005-04-06 17:02:55 +0100462 D1(printk(KERN_DEBUG "Wasting\n"));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 addedsize = 0;
464 jeb->wasted_size += ref_totlen(c, jeb, ref);
465 c->wasted_size += ref_totlen(c, jeb, ref);
466 }
467 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
468
469 ACCT_SANITY_CHECK(c, jeb);
470
471 D1(ACCT_PARANOIA_CHECK(jeb));
472
Artem B. Bityuckiy31fbdf72005-02-28 08:21:09 +0000473 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
474 /* Flash scanning is in progress. Don't muck about with the block
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 lists because they're not ready yet, and don't actually
476 obliterate nodes that look obsolete. If they weren't
477 marked obsolete on the flash at the time they _became_
478 obsolete, there was probably a reason for that. */
479 spin_unlock(&c->erase_completion_lock);
480 /* We didn't lock the erase_free_sem */
481 return;
482 }
483
484 if (jeb == c->nextblock) {
485 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
486 } else if (!jeb->used_size && !jeb->unchecked_size) {
487 if (jeb == c->gcblock) {
488 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
489 c->gcblock = NULL;
490 } else {
491 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
492 list_del(&jeb->list);
493 }
494 if (jffs2_wbuf_dirty(c)) {
495 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
496 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
497 } else {
498 if (jiffies & 127) {
499 /* Most of the time, we just erase it immediately. Otherwise we
500 spend ages scanning it on mount, etc. */
501 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
502 list_add_tail(&jeb->list, &c->erase_pending_list);
503 c->nr_erasing_blocks++;
504 jffs2_erase_pending_trigger(c);
505 } else {
506 /* Sometimes, however, we leave it elsewhere so it doesn't get
507 immediately reused, and we spread the load a bit. */
508 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
509 list_add_tail(&jeb->list, &c->erasable_list);
510 }
511 }
512 D1(printk(KERN_DEBUG "Done OK\n"));
513 } else if (jeb == c->gcblock) {
514 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
515 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
516 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
517 list_del(&jeb->list);
518 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
519 list_add_tail(&jeb->list, &c->dirty_list);
520 } else if (VERYDIRTY(c, jeb->dirty_size) &&
521 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
522 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
523 list_del(&jeb->list);
524 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
525 list_add_tail(&jeb->list, &c->very_dirty_list);
526 } else {
527 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
528 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
529 }
530
531 spin_unlock(&c->erase_completion_lock);
532
Artem B. Bityuckiy31fbdf72005-02-28 08:21:09 +0000533 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
534 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 /* We didn't lock the erase_free_sem */
536 return;
537 }
538
539 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
540 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
541 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
542 by jffs2_free_all_node_refs() in erase.c. Which is nice. */
543
544 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
545 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
546 if (ret) {
547 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
548 goto out_erase_sem;
549 }
550 if (retlen != sizeof(n)) {
551 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
552 goto out_erase_sem;
553 }
554 if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
555 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
556 goto out_erase_sem;
557 }
558 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
559 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
560 goto out_erase_sem;
561 }
562 /* XXX FIXME: This is ugly now */
563 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
564 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
565 if (ret) {
566 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
567 goto out_erase_sem;
568 }
569 if (retlen != sizeof(n)) {
570 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
571 goto out_erase_sem;
572 }
573
574 /* Nodes which have been marked obsolete no longer need to be
575 associated with any inode. Remove them from the per-inode list.
576
577 Note we can't do this for NAND at the moment because we need
578 obsolete dirent nodes to stay on the lists, because of the
579 horridness in jffs2_garbage_collect_deletion_dirent(). Also
580 because we delete the inocache, and on NAND we need that to
581 stay around until all the nodes are actually erased, in order
582 to stop us from giving the same inode number to another newly
583 created inode. */
584 if (ref->next_in_ino) {
585 struct jffs2_inode_cache *ic;
586 struct jffs2_raw_node_ref **p;
587
588 spin_lock(&c->erase_completion_lock);
589
590 ic = jffs2_raw_ref_to_ic(ref);
591 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
592 ;
593
594 *p = ref->next_in_ino;
595 ref->next_in_ino = NULL;
596
Artem B. Bityuckiy437316d2005-03-20 17:46:23 +0000597 if (ic->nodes == (void *)ic && ic->nlink == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 jffs2_del_ino_cache(c, ic);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
600 spin_unlock(&c->erase_completion_lock);
601 }
602
603
604 /* Merge with the next node in the physical list, if there is one
605 and if it's also obsolete and if it doesn't belong to any inode */
606 if (ref->next_phys && ref_obsolete(ref->next_phys) &&
607 !ref->next_phys->next_in_ino) {
608 struct jffs2_raw_node_ref *n = ref->next_phys;
609
610 spin_lock(&c->erase_completion_lock);
611
612 ref->__totlen += n->__totlen;
613 ref->next_phys = n->next_phys;
614 if (jeb->last_node == n) jeb->last_node = ref;
615 if (jeb->gc_node == n) {
616 /* gc will be happy continuing gc on this node */
617 jeb->gc_node=ref;
618 }
619 spin_unlock(&c->erase_completion_lock);
620
621 jffs2_free_raw_node_ref(n);
622 }
623
624 /* Also merge with the previous node in the list, if there is one
625 and that one is obsolete */
626 if (ref != jeb->first_node ) {
627 struct jffs2_raw_node_ref *p = jeb->first_node;
628
629 spin_lock(&c->erase_completion_lock);
630
631 while (p->next_phys != ref)
632 p = p->next_phys;
633
634 if (ref_obsolete(p) && !ref->next_in_ino) {
635 p->__totlen += ref->__totlen;
636 if (jeb->last_node == ref) {
637 jeb->last_node = p;
638 }
639 if (jeb->gc_node == ref) {
640 /* gc will be happy continuing gc on this node */
641 jeb->gc_node=p;
642 }
643 p->next_phys = ref->next_phys;
644 jffs2_free_raw_node_ref(ref);
645 }
646 spin_unlock(&c->erase_completion_lock);
647 }
648 out_erase_sem:
649 up(&c->erase_free_sem);
650}
651
652#if CONFIG_JFFS2_FS_DEBUG >= 2
653void jffs2_dump_block_lists(struct jffs2_sb_info *c)
654{
655
656
657 printk(KERN_DEBUG "jffs2_dump_block_lists:\n");
658 printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size);
659 printk(KERN_DEBUG "used_size: %08x\n", c->used_size);
660 printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size);
661 printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size);
662 printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size);
663 printk(KERN_DEBUG "free_size: %08x\n", c->free_size);
664 printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size);
665 printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size);
666 printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size);
667 printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write);
668
669 if (c->nextblock) {
670 printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
671 c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size);
672 } else {
673 printk(KERN_DEBUG "nextblock: NULL\n");
674 }
675 if (c->gcblock) {
676 printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
677 c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
678 } else {
679 printk(KERN_DEBUG "gcblock: NULL\n");
680 }
681 if (list_empty(&c->clean_list)) {
682 printk(KERN_DEBUG "clean_list: empty\n");
683 } else {
684 struct list_head *this;
685 int numblocks = 0;
686 uint32_t dirty = 0;
687
688 list_for_each(this, &c->clean_list) {
689 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
690 numblocks ++;
691 dirty += jeb->wasted_size;
692 printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
693 }
694 printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks);
695 }
696 if (list_empty(&c->very_dirty_list)) {
697 printk(KERN_DEBUG "very_dirty_list: empty\n");
698 } else {
699 struct list_head *this;
700 int numblocks = 0;
701 uint32_t dirty = 0;
702
703 list_for_each(this, &c->very_dirty_list) {
704 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
705 numblocks ++;
706 dirty += jeb->dirty_size;
707 printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
708 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
709 }
710 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
711 numblocks, dirty, dirty / numblocks);
712 }
713 if (list_empty(&c->dirty_list)) {
714 printk(KERN_DEBUG "dirty_list: empty\n");
715 } else {
716 struct list_head *this;
717 int numblocks = 0;
718 uint32_t dirty = 0;
719
720 list_for_each(this, &c->dirty_list) {
721 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
722 numblocks ++;
723 dirty += jeb->dirty_size;
724 printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
725 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
726 }
727 printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
728 numblocks, dirty, dirty / numblocks);
729 }
730 if (list_empty(&c->erasable_list)) {
731 printk(KERN_DEBUG "erasable_list: empty\n");
732 } else {
733 struct list_head *this;
734
735 list_for_each(this, &c->erasable_list) {
736 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
737 printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
738 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
739 }
740 }
741 if (list_empty(&c->erasing_list)) {
742 printk(KERN_DEBUG "erasing_list: empty\n");
743 } else {
744 struct list_head *this;
745
746 list_for_each(this, &c->erasing_list) {
747 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
748 printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
749 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
750 }
751 }
752 if (list_empty(&c->erase_pending_list)) {
753 printk(KERN_DEBUG "erase_pending_list: empty\n");
754 } else {
755 struct list_head *this;
756
757 list_for_each(this, &c->erase_pending_list) {
758 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
759 printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
760 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
761 }
762 }
763 if (list_empty(&c->erasable_pending_wbuf_list)) {
764 printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n");
765 } else {
766 struct list_head *this;
767
768 list_for_each(this, &c->erasable_pending_wbuf_list) {
769 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
770 printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
771 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
772 }
773 }
774 if (list_empty(&c->free_list)) {
775 printk(KERN_DEBUG "free_list: empty\n");
776 } else {
777 struct list_head *this;
778
779 list_for_each(this, &c->free_list) {
780 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
781 printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
782 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
783 }
784 }
785 if (list_empty(&c->bad_list)) {
786 printk(KERN_DEBUG "bad_list: empty\n");
787 } else {
788 struct list_head *this;
789
790 list_for_each(this, &c->bad_list) {
791 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
792 printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
793 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
794 }
795 }
796 if (list_empty(&c->bad_used_list)) {
797 printk(KERN_DEBUG "bad_used_list: empty\n");
798 } else {
799 struct list_head *this;
800
801 list_for_each(this, &c->bad_used_list) {
802 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
803 printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
804 jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
805 }
806 }
807}
808#endif /* CONFIG_JFFS2_FS_DEBUG */
809
810int jffs2_thread_should_wake(struct jffs2_sb_info *c)
811{
812 int ret = 0;
813 uint32_t dirty;
814
815 if (c->unchecked_size) {
816 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
817 c->unchecked_size, c->checked_ino));
818 return 1;
819 }
820
821 /* dirty_size contains blocks on erase_pending_list
822 * those blocks are counted in c->nr_erasing_blocks.
823 * If one block is actually erased, it is not longer counted as dirty_space
824 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
825 * with c->nr_erasing_blocks * c->sector_size again.
826 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
827 * This helps us to force gc and pick eventually a clean block to spread the load.
828 */
829 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
830
831 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
832 (dirty > c->nospc_dirty_size))
833 ret = 1;
834
835 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
836 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
837
838 return ret;
839}