Merge "gpu: ion: Do fallback when allocating large sizes"
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index bc9bddd..b1c1c5d 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -42,6 +42,7 @@
*/
struct ion_iommu_priv_data {
struct page **pages;
+ unsigned int pages_uses_vmalloc;
int nrpages;
unsigned long size;
};
@@ -118,6 +119,7 @@
unsigned int npages_to_vmap, total_pages, num_large_pages = 0;
unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0];
+ unsigned int page_tbl_size;
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
@@ -139,8 +141,24 @@
data->size = PFN_ALIGN(size);
data->nrpages = data->size >> PAGE_SHIFT;
- data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
- GFP_KERNEL);
+ data->pages_uses_vmalloc = 0;
+ page_tbl_size = sizeof(struct page *) * data->nrpages;
+
+ if (page_tbl_size > SZ_8K) {
+ /*
+ * Do fallback to ensure we have a balance between
+ * performance and availability.
+ */
+ data->pages = kmalloc(page_tbl_size,
+ __GFP_COMP | __GFP_NORETRY |
+ __GFP_NO_KSWAPD | __GFP_NOWARN);
+ if (!data->pages) {
+ data->pages = vmalloc(page_tbl_size);
+ data->pages_uses_vmalloc = 1;
+ }
+ } else {
+ data->pages = kmalloc(page_tbl_size, GFP_KERNEL);
+ }
if (!data->pages) {
ret = -ENOMEM;
goto err_free_data;
@@ -222,7 +240,10 @@
kfree(buffer->sg_table);
buffer->sg_table = 0;
err1:
- kfree(data->pages);
+ if (data->pages_uses_vmalloc)
+ vfree(data->pages);
+ else
+ kfree(data->pages);
err_free_data:
kfree(data);
@@ -253,7 +274,10 @@
sg_free_table(table);
kfree(table);
table = 0;
- kfree(data->pages);
+ if (data->pages_uses_vmalloc)
+ vfree(data->pages);
+ else
+ kfree(data->pages);
kfree(data);
}