gpu: ion: Use the ion_page_pool from the system heap

With this change the system heap will use pagepools to avoid
having to invalidate memory when it is allocated, a
significant performance improvement on some systems.

Change-Id: I2edc9fb401b458f2d573da2d5ceb1a3238df76b9
Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
Git-commit: 520cd0b06527c14c6ded903d0c8299652098afec
Git-repo: https://android.googlesource.com/kernel/common
[lauraa@codeaurora.org: conflicts in ion.c and ion_system_heap.c
due to debugfs differences. Dropped all the debugfs code for now
until we can sync back up. conflicts in ion_priv.h due to CMA
differences]
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index 4094631..ff70ce7 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -22,6 +22,7 @@
 #include <linux/ion.h>
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
+#include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/seq_file.h>
@@ -34,34 +35,106 @@
 static atomic_t system_heap_allocated;
 static atomic_t system_contig_heap_allocated;
 
+static const unsigned int orders[] = {8, 4, 0};
+static const int num_orders = ARRAY_SIZE(orders);
+static int order_to_index(unsigned int order)
+{
+	int i;
+	for (i = 0; i < num_orders; i++)
+		if (order == orders[i])
+			return i;
+	BUG();
+	return -1;
+}
+
+static unsigned int order_to_size(int order)
+{
+	return PAGE_SIZE << order;
+}
+
+struct ion_system_heap {
+	struct ion_heap heap;
+	struct ion_page_pool **pools;
+};
+
 struct page_info {
 	struct page *page;
-	unsigned long order;
+	unsigned int order;
 	struct list_head list;
 };
 
-static unsigned int orders[] = {8, 4, 0};
+static struct page *alloc_buffer_page(struct ion_system_heap *heap,
+				      struct ion_buffer *buffer,
+				      unsigned long order)
+{
+	bool cached = ion_buffer_cached(buffer);
+	bool split_pages = ion_buffer_fault_user_mappings(buffer);
+	struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+	struct page *page;
 
-static struct page_info *alloc_largest_available(unsigned long size,
-						 bool split_pages,
+	if (!cached)
+		page = ion_page_pool_alloc(pool);
+	else
+		page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
+				   __GFP_NOWARN | __GFP_NORETRY, order);
+	if (!page)
+		return 0;
+	if (split_pages)
+		split_page(page, order);
+	return page;
+}
+
+static void free_buffer_page(struct ion_system_heap *heap,
+			     struct ion_buffer *buffer, struct page *page,
+			     unsigned int order)
+{
+	bool cached = ion_buffer_cached(buffer);
+	bool split_pages = ion_buffer_fault_user_mappings(buffer);
+	int i;
+
+	if (!cached) {
+		struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+		/* zero the pages before returning them to the pool for
+		   security.  This uses vmap as we want to set the pgprot so
+		   the writes to occur to noncached mappings, as the pool's
+		   purpose is to keep the pages out of the cache */
+		for (i = 0; i < order / PAGE_SIZE; i++) {
+			struct page *sub_page = page + i;
+			void *addr = vmap(&sub_page, 1, VM_MAP,
+					  pgprot_writecombine(PAGE_KERNEL));
+			memset(addr, 0, PAGE_SIZE);
+			vunmap(addr);
+		}
+		ion_page_pool_free(pool, page);
+	} else if (split_pages) {
+		for (i = 0; i < (1 << order); i++)
+			__free_page(page + i);
+	} else {
+		__free_pages(page, order);
+	}
+}
+
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+						 struct ion_buffer *buffer,
+						 unsigned long size,
 						 unsigned int max_order)
 {
 	struct page *page;
 	struct page_info *info;
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(orders); i++) {
-		if (size < (1 << orders[i]) * PAGE_SIZE)
+	for (i = 0; i < num_orders; i++) {
+		if (size < order_to_size(orders[i]))
 			continue;
 		if (max_order < orders[i])
 			continue;
-		page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
-				   __GFP_NOWARN | __GFP_NORETRY, orders[i]);
+
+		page = alloc_buffer_page(heap, buffer, orders[i]);
 		if (!page)
 			continue;
-		if (split_pages)
-			split_page(page, orders[i]);
-		info = kmalloc(sizeof(struct page_info *), GFP_KERNEL);
+
+		info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
 		info->page = page;
 		info->order = orders[i];
 		return info;
@@ -74,6 +147,9 @@
 				     unsigned long size, unsigned long align,
 				     unsigned long flags)
 {
+	struct ion_system_heap *sys_heap = container_of(heap,
+							struct ion_system_heap,
+							heap);
 	struct sg_table *table;
 	struct scatterlist *sg;
 	int ret;
@@ -81,15 +157,12 @@
 	struct page_info *info, *tmp_info;
 	int i = 0;
 	long size_remaining = PAGE_ALIGN(size);
-	bool split_pages = ion_buffer_fault_user_mappings(buffer);
-
-
 	unsigned int max_order = orders[0];
+	bool split_pages = ion_buffer_fault_user_mappings(buffer);
 
 	INIT_LIST_HEAD(&pages);
 	while (size_remaining > 0) {
-		info = alloc_largest_available(size_remaining, split_pages,
-					       max_order);
+		info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
 		if (!info)
 			goto err;
 		list_add_tail(&info->list, &pages);
@@ -114,7 +187,6 @@
 	sg = table->sgl;
 	list_for_each_entry_safe(info, tmp_info, &pages, list) {
 		struct page *page = info->page;
-
 		if (split_pages) {
 			for (i = 0; i < (1 << info->order); i++) {
 				sg_set_page(sg, page + i, PAGE_SIZE, 0);
@@ -139,12 +211,7 @@
 	kfree(table);
 err:
 	list_for_each_entry(info, &pages, list) {
-		if (split_pages)
-			for (i = 0; i < (1 << info->order); i++)
-				__free_page(info->page + i);
-		else
-			__free_pages(info->page, info->order);
-
+		free_buffer_page(sys_heap, buffer, info->page, info->order);
 		kfree(info);
 	}
 	return -ENOMEM;
@@ -152,15 +219,19 @@
 
 void ion_system_heap_free(struct ion_buffer *buffer)
 {
-	int i;
-	struct scatterlist *sg;
+	struct ion_heap *heap = buffer->heap;
+	struct ion_system_heap *sys_heap = container_of(heap,
+							struct ion_system_heap,
+							heap);
 	struct sg_table *table = buffer->priv_virt;
+	struct scatterlist *sg;
+	LIST_HEAD(pages);
+	int i;
 
 	for_each_sg(table->sgl, sg, table->nents, i)
-		__free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
-	if (buffer->sg_table)
-		sg_free_table(buffer->sg_table);
-	kfree(buffer->sg_table);
+		free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
+	sg_free_table(table);
+	kfree(table);
 	atomic_sub(buffer->size, &system_heap_allocated);
 }
 
@@ -253,7 +324,7 @@
 	return 0;
 }
 
-static struct ion_heap_ops vmalloc_ops = {
+static struct ion_heap_ops system_heap_ops = {
 	.allocate = ion_system_heap_allocate,
 	.free = ion_system_heap_free,
 	.map_dma = ion_system_heap_map_dma,
@@ -266,19 +337,49 @@
 
 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
 {
-	struct ion_heap *heap;
+	struct ion_system_heap *heap;
+	int i;
 
-	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+	heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
 	if (!heap)
 		return ERR_PTR(-ENOMEM);
-	heap->ops = &vmalloc_ops;
-	heap->type = ION_HEAP_TYPE_SYSTEM;
-	return heap;
+	heap->heap.ops = &system_heap_ops;
+	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+	heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
+			      GFP_KERNEL);
+	if (!heap->pools)
+		goto err_alloc_pools;
+	for (i = 0; i < num_orders; i++) {
+		struct ion_page_pool *pool;
+		pool = ion_page_pool_create(GFP_HIGHUSER | __GFP_ZERO |
+					   __GFP_NOWARN | __GFP_NORETRY,
+					   orders[i]);
+		if (!pool)
+			goto err_create_pool;
+		heap->pools[i] = pool;
+	}
+	return &heap->heap;
+err_create_pool:
+	for (i = 0; i < num_orders; i++)
+		if (heap->pools[i])
+			ion_page_pool_destroy(heap->pools[i]);
+	kfree(heap->pools);
+err_alloc_pools:
+	kfree(heap);
+	return ERR_PTR(-ENOMEM);
 }
 
 void ion_system_heap_destroy(struct ion_heap *heap)
 {
-	kfree(heap);
+	struct ion_system_heap *sys_heap = container_of(heap,
+							struct ion_system_heap,
+							heap);
+	int i;
+
+	for (i = 0; i < num_orders; i++)
+		ion_page_pool_destroy(sys_heap->pools[i]);
+	kfree(sys_heap->pools);
+	kfree(sys_heap);
 }
 
 static int ion_system_contig_heap_allocate(struct ion_heap *heap,