gpu: ion: Modify zeroing code so it only allocates address space once
vmap/vunmap spend a significant amount of time allocating the
address space to map into. Rather than allocating address space
for each page, instead allocate once for the entire allocation
and then just map and unmap each page into that address space.
Change-Id: I59fe2c0c5d0b43f5197bba7570157dc3949c8d88
Signed-off-by: Rebecca Schultz Zavin <rschultz@google.com>
Git-commit: f5a3b2b65a6c0a620059144719792e55255b29d9
Git-repo: https://android.googlesource.com/kernel/common
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index 47cf5fe..2687dd1 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -102,7 +102,7 @@
static void free_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer, struct page *page,
- unsigned int order)
+ unsigned int order, struct vm_struct *vm_struct)
{
bool cached = ion_buffer_cached(buffer);
bool split_pages = ion_buffer_fault_user_mappings(buffer);
@@ -116,10 +116,13 @@
purpose is to keep the pages out of the cache */
for (i = 0; i < (1 << order); i++) {
struct page *sub_page = page + i;
- void *addr = vmap(&sub_page, 1, VM_MAP,
- pgprot_writecombine(PAGE_KERNEL));
- memset(addr, 0, PAGE_SIZE);
- vunmap(addr);
+ struct page **pages = &sub_page;
+ map_vm_area(vm_struct,
+ pgprot_writecombine(PAGE_KERNEL),
+ &pages);
+ memset(vm_struct->addr, 0, PAGE_SIZE);
+ unmap_kernel_range((unsigned long)vm_struct->addr,
+ PAGE_SIZE);
}
ion_page_pool_free(pool, page);
} else if (split_pages) {
@@ -175,6 +178,8 @@
long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
bool split_pages = ion_buffer_fault_user_mappings(buffer);
+ struct vm_struct *vm_struct;
+ pte_t *ptes;
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
@@ -223,10 +228,13 @@
err1:
kfree(table);
err:
+ vm_struct = get_vm_area(PAGE_SIZE, &ptes);
list_for_each_entry(info, &pages, list) {
- free_buffer_page(sys_heap, buffer, info->page, info->order);
+ free_buffer_page(sys_heap, buffer, info->page, info->order,
+ vm_struct);
kfree(info);
}
+ free_vm_area(vm_struct);
return -ENOMEM;
}
@@ -239,10 +247,16 @@
struct sg_table *table = buffer->sg_table;
struct scatterlist *sg;
LIST_HEAD(pages);
+ struct vm_struct *vm_struct;
+ pte_t *ptes;
int i;
+ vm_struct = get_vm_area(PAGE_SIZE, &ptes);
+
for_each_sg(table->sgl, sg, table->nents, i)
- free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
+ free_buffer_page(sys_heap, buffer, sg_page(sg),
+ get_order(sg_dma_len(sg)), vm_struct);
+ free_vm_area(vm_struct);
sg_free_table(table);
kfree(table);
atomic_sub(buffer->size, &system_heap_allocated);