gpu: ion: Pull in patches for 3.4
Pull in Ion patches for 3.4 upgrade. Inclues the following
patches from google:
commit 7191e9ba2508ca6f1eaab251cf3f0a2318eebe26
Author: Rebecca Schultz Zavin <rebecca@android.com>
Date: Mon Jan 30 14:18:08 2012 -0800
ion: Switch map/unmap dma api to sg_tables
Switch these api's from scatterlists to sg_tables
Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
commit 6f9e56945d4ee3a2621968caa72b135cb07e49c4
Author: Rebecca Schultz Zavin <rebecca@android.com>
Date: Tue Jan 31 09:40:30 2012 -0800
ion: Add reserve function to ion
Rather than requiring each platform call memblock_remove or reserve
from the board file, add this to ion
Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
commit 9ae7e01de1cf03c77054da44d135a7e85863fcb0
Author: KyongHo Cho <pullip.cho@samsung.com>
Date: Wed Sep 7 11:27:07 2011 +0900
gpu: ion: several bugfixes and enhancements of ION
1. Verifying if the size of memory allocation in ion_alloc() is aligned
by PAGE_SIZE at least. If it is not, this change makes the size to be
aligned by PAGE_SIZE.
2. Unmaps all mappings to the kernel and DMA address spaces when
destroying ion_buffer in ion_buffer_destroy(). This prevents leaks in
those virtual address spaces.
3. Makes the return value of ion_alloc() to be explicit Linux error code
when it fails to allocate a buffer.
4. Makes ion_alloc() implementation simpler. Removes 'goto' statement and
relavant call to ion_buffer_put().
5. Checks if the task is valid before calling put_task_struct() due
to failure on creating a ion client in ion_client_create().
6. Returns error when buffer allocation requested by userspace is failed.
Signed-off-by: KyongHo Cho <pullip.cho@samsung.com>
commit 043a61468f395dd6d4fc518299726955e9999c59
Author: Rebecca Schultz Zavin <rebecca@android.com>
Date: Wed Feb 1 11:09:46 2012 -0800
ion: Switch ion to use dma-buf
Ion now uses dma-buf file descriptors to share
buffers with userspace. Ion becomes a dma-buf
exporter and any driver that can import dma-bufs
can now import ion file descriptors.
Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
commit 0d1259b5f84969bd00811ff9faa1c44cdb9fdbb5
Author: Rebecca Schultz Zavin <rebecca@android.com>
Date: Mon Apr 30 16:45:38 2012 -0700
gpu: ion: Use alloc_pages instead of vmalloc from the system heap
With this change the ion_system_heap will only use kernel address
space when the memory is mapped into the kernel (rare case).
Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
commit be4a1ee79a89da3ca705aecc2ac92cbeedd032bd
Author: Rebecca Schultz Zavin <rebecca@android.com>
Date: Thu Apr 26 20:44:10 2012 -0700
gpu: ion: support begin/end and kmap/kunmap dma_buf ops
These ops were added in the 3.4 kernel. This patch adds support
for them to ion. Previous ion_map/unmap_kernel api is also
retained in addition to this new api.
Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
commit 46d71337f9aa84694e4e6cca7f3beb6b033bbf76
Author: Rebecca Schultz Zavin <rebecca@android.com>
Date: Mon May 7 16:06:32 2012 -0700
gpu: ion: Allocate the sg_table at creation time rather than dynamically
Rather than calling map_dma on the allocations dynamically, this patch
switches to creating the sg_table at the time the buffer is created.
This is necessary because in future updates the sg_table will be used
for cache maintenance.
Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
commit 903f6c716db3d4e26952aae9717f81dd5bc9e4ba
Author: Rebecca Schultz Zavin <rebecca@android.com>
Date: Wed May 23 12:55:55 2012 -0700
gpu: ion: Get an sg_table from an ion handle
This patch adds an interface to return and sg_table given a
valid ion handle.
Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
The commit also includes fixups needed for MSM specific code.
Change-Id: Idbcfa9d6af0febf06f56daaa6beaa59cc08e4351
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 9ea6f2b..d0f101c 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -35,7 +35,6 @@
struct page **pages;
int nrpages;
unsigned long size;
- struct scatterlist *iommu_sglist;
};
static int ion_iommu_heap_allocate(struct ion_heap *heap,
@@ -47,6 +46,10 @@
struct ion_iommu_priv_data *data = NULL;
if (msm_use_iommu()) {
+ struct scatterlist *sg;
+ struct sg_table *table;
+ unsigned int i;
+
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -59,25 +62,26 @@
ret = -ENOMEM;
goto err1;
}
- data->iommu_sglist = vmalloc(sizeof(*data->iommu_sglist) *
- data->nrpages);
- if (!data->iommu_sglist) {
+
+ table = buffer->sg_table =
+ kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+
+ if (!table) {
ret = -ENOMEM;
goto err1;
}
+ ret = sg_alloc_table(table, data->nrpages, GFP_KERNEL);
+ if (ret)
+ goto err2;
- sg_init_table(data->iommu_sglist, data->nrpages);
-
- for (i = 0; i < data->nrpages; i++) {
+ for_each_sg(table->sgl, sg, table->nents, i) {
data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!data->pages[i])
- goto err2;
+ goto err3;
- sg_set_page(&data->iommu_sglist[i], data->pages[i],
- PAGE_SIZE, 0);
+ sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
}
-
buffer->priv_virt = data;
return 0;
@@ -86,9 +90,11 @@
}
+err3:
+ sg_free_table(buffer->sg_table);
err2:
- vfree(data->iommu_sglist);
- data->iommu_sglist = NULL;
+ kfree(buffer->sg_table);
+ buffer->sg_table = 0;
for (i = 0; i < data->nrpages; i++) {
if (data->pages[i])
@@ -111,16 +117,12 @@
for (i = 0; i < data->nrpages; i++)
__free_page(data->pages[i]);
- vfree(data->iommu_sglist);
- data->iommu_sglist = NULL;
-
kfree(data->pages);
kfree(data);
}
void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer,
- unsigned long flags)
+ struct ion_buffer *buffer)
{
struct ion_iommu_priv_data *data = buffer->priv_virt;
pgprot_t page_prot = PAGE_KERNEL;
@@ -128,7 +130,7 @@
if (!data)
return NULL;
- if (!ION_IS_CACHED(flags))
+ if (!ION_IS_CACHED(buffer->flags))
page_prot = pgprot_noncached(page_prot);
buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
@@ -147,7 +149,7 @@
}
int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma, unsigned long flags)
+ struct vm_area_struct *vma)
{
struct ion_iommu_priv_data *data = buffer->priv_virt;
int i;
@@ -155,7 +157,7 @@
if (!data)
return -EINVAL;
- if (!ION_IS_CACHED(flags))
+ if (!ION_IS_CACHED(buffer->flags))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
curr_addr = vma->vm_start;
@@ -183,7 +185,6 @@
struct iommu_domain *domain;
int ret = 0;
unsigned long extra;
- struct ion_iommu_priv_data *buffer_data = buffer->priv_virt;
int prot = IOMMU_WRITE | IOMMU_READ;
prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
@@ -207,7 +208,8 @@
}
ret = iommu_map_range(domain, data->iova_addr,
- buffer_data->iommu_sglist, buffer->size, prot);
+ buffer->sg_table->sgl,
+ buffer->size, prot);
if (ret) {
pr_err("%s: could not map %lx in domain %p\n",
__func__, data->iova_addr, domain);
@@ -299,16 +301,19 @@
return 0;
}
-static struct scatterlist *ion_iommu_heap_map_dma(struct ion_heap *heap,
+static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
- struct ion_iommu_priv_data *data = buffer->priv_virt;
- return data->iommu_sglist;
+ return buffer->sg_table;
}
static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
+ if (buffer->sg_table)
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
+ buffer->sg_table = 0;
}
static struct ion_heap_ops iommu_heap_ops = {