[LogFS] Prevent memory corruption on large deletes

Removing sufficiently large files would create aliases for a large
number of segments.  This in turn results in a large number of journal
entries and an overflow of s_je_array.

Cheap fix is to add a BUG_ON, turning memory corruption into something
annoying, but less dangerous.  Real fix is to count the number of
affected segments and prevent the problem completely.

Signed-off-by: Joern Engel <joern@logfs.org>
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 3659c37..7e0c39c 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -1219,6 +1219,18 @@
 	mempool_free(shadow, super->s_shadow_pool);
 }
 
+static void mark_segment(struct shadow_tree *tree, u32 segno)
+{
+	int err;
+
+	if (!btree_lookup32(&tree->segment_map, segno)) {
+		err = btree_insert32(&tree->segment_map, segno, (void *)1,
+				GFP_NOFS);
+		BUG_ON(err);
+		tree->no_shadowed_segments++;
+	}
+}
+
 /**
  * fill_shadow_tree - Propagate shadow tree changes due to a write
  * @inode:	Inode owning the page
@@ -1266,6 +1278,8 @@
 
 		super->s_dirty_used_bytes += shadow->new_len;
 		super->s_dirty_free_bytes += shadow->old_len;
+		mark_segment(tree, shadow->old_ofs >> super->s_segshift);
+		mark_segment(tree, shadow->new_ofs >> super->s_segshift);
 	}
 }