[POWERPC] rheap - eliminates internal fragments caused by alignment

The patch adds fragments caused by rh_alloc_align() back to free list, instead
of allocating the whole chunk of memory.  This will greatly improve memory
utilization managed by rheap.

It solves MURAM not enough problem with 3 UCCs enabled on MPC8323.

Signed-off-by: Li Yang <leoli@freescale.com>
Acked-by: Joakim Tjernlund <joakim.tjernlund@transmode.se> 
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c
index 180ee29..2f24ea0 100644
--- a/arch/powerpc/lib/rheap.c
+++ b/arch/powerpc/lib/rheap.c
@@ -437,27 +437,26 @@
 	struct list_head *l;
 	rh_block_t *blk;
 	rh_block_t *newblk;
-	unsigned long start;
+	unsigned long start, sp_size;
 
 	/* Validate size, and alignment must be power of two */
 	if (size <= 0 || (alignment & (alignment - 1)) != 0)
 		return (unsigned long) -EINVAL;
 
-	/* given alignment larger that default rheap alignment */
-	if (alignment > info->alignment)
-		size += alignment - 1;
-
 	/* Align to configured alignment */
 	size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
 
-	if (assure_empty(info, 1) < 0)
+	if (assure_empty(info, 2) < 0)
 		return (unsigned long) -ENOMEM;
 
 	blk = NULL;
 	list_for_each(l, &info->free_list) {
 		blk = list_entry(l, rh_block_t, list);
-		if (size <= blk->size)
-			break;
+		if (size <= blk->size) {
+			start = (blk->start + alignment - 1) & ~(alignment - 1);
+			if (start + size <= blk->start + blk->size)
+				break;
+		}
 		blk = NULL;
 	}
 
@@ -470,25 +469,36 @@
 		list_del(&blk->list);
 		newblk = blk;
 	} else {
+		/* Fragment caused, split if needed */
+		/* Create block for fragment in the beginning */
+		sp_size = start - blk->start;
+		if (sp_size) {
+			rh_block_t *spblk;
+
+			spblk = get_slot(info);
+			spblk->start = blk->start;
+			spblk->size = sp_size;
+			/* add before the blk */
+			list_add(&spblk->list, blk->list.prev);
+		}
 		newblk = get_slot(info);
-		newblk->start = blk->start;
+		newblk->start = start;
 		newblk->size = size;
 
-		/* blk still in free list, with updated start, size */
-		blk->start += size;
-		blk->size -= size;
+		/* blk still in free list, with updated start and size
+		 * for fragment in the end */
+		blk->start = start + size;
+		blk->size -= sp_size + size;
+		/* No fragment in the end, remove blk */
+		if (blk->size == 0) {
+			list_del(&blk->list);
+			release_slot(info, blk);
+		}
 	}
 
 	newblk->owner = owner;
-	start = newblk->start;
 	attach_taken_block(info, newblk);
 
-	/* for larger alignment return fixed up pointer  */
-	/* this is no problem with the deallocator since */
-	/* we scan for pointers that lie in the blocks   */
-	if (alignment > info->alignment)
-		start = (start + alignment - 1) & ~(alignment - 1);
-
 	return start;
 }