gpu: ion: several bugfixes and enhancements of ION
1. Verifying if the size of memory allocation in ion_alloc() is aligned
by PAGE_SIZE at least. If it is not, this change makes the size to be
aligned by PAGE_SIZE.
2. Unmaps all mappings to the kernel and DMA address spaces when
destroying ion_buffer in ion_buffer_destroy(). This prevents leaks in
those virtual address spaces.
3. Makes the return value of ion_alloc() to be explicit Linux error code
when it fails to allocate a buffer.
4. Makes ion_alloc() implementation simpler. Removes 'goto' statement and
relavant call to ion_buffer_put().
5. Checks if the task is valid before calling put_task_struct() due
to failure on creating a ion client in ion_client_create().
6. Returns error when buffer allocation requested by userspace is failed.
Change-Id: I4fa9859f4a0b665fcb44e5c0da43c569732e93ae
Signed-off-by: KyongHo Cho <pullip.cho@samsung.com>
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index afd6ffc..8bb1be0 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -166,6 +166,12 @@
struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
struct ion_device *dev = buffer->dev;
+ if (WARN_ON(buffer->kmap_cnt > 0))
+ buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+
+ if (WARN_ON(buffer->dmap_cnt > 0))
+ buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+
buffer->heap->ops->free(buffer);
mutex_lock(&dev->lock);
rb_erase(&buffer->node, &dev->buffers);
@@ -296,6 +302,11 @@
* request of the caller allocate from it. Repeat until allocate has
* succeeded or all heaps have been tried
*/
+ if (WARN_ON(!len))
+ return ERR_PTR(-EINVAL);
+
+ len = PAGE_ALIGN(len);
+
mutex_lock(&dev->lock);
for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
@@ -311,27 +322,26 @@
}
mutex_unlock(&dev->lock);
- if (IS_ERR_OR_NULL(buffer))
+ if (buffer == NULL)
+ return ERR_PTR(-ENODEV);
+
+ if (IS_ERR(buffer))
return ERR_PTR(PTR_ERR(buffer));
handle = ion_handle_create(client, buffer);
- if (IS_ERR_OR_NULL(handle))
- goto end;
-
/*
* ion_buffer_create will create a buffer with a ref_cnt of 1,
* and ion_handle_create will take a second reference, drop one here
*/
ion_buffer_put(buffer);
- mutex_lock(&client->lock);
- ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- return handle;
+ if (!IS_ERR(handle)) {
+ mutex_lock(&client->lock);
+ ion_handle_add(client, handle);
+ mutex_unlock(&client->lock);
+ }
-end:
- ion_buffer_put(buffer);
return handle;
}
@@ -680,7 +690,8 @@
client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
if (!client) {
- put_task_struct(current->group_leader);
+ if (task)
+ put_task_struct(current->group_leader);
return ERR_PTR(-ENOMEM);
}
@@ -798,6 +809,15 @@
vma->vm_private_data = NULL;
return;
}
+
+ if (!ion_handle_validate(client, handle)) {
+ ion_client_put(client);
+ vma->vm_private_data = NULL;
+ return;
+ }
+
+ ion_handle_get(handle);
+
pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
__func__, __LINE__,
atomic_read(&client->ref.refcount),
@@ -919,7 +939,7 @@
struct file *file;
if (fd < 0)
- return -ENFILE;
+ return fd;
file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
handle->buffer, O_RDWR);
@@ -948,8 +968,14 @@
return -EFAULT;
data.handle = ion_alloc(client, data.len, data.align,
data.flags);
- if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+
+ if (IS_ERR(data.handle))
+ return PTR_ERR(data.handle);
+
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ ion_free(client, data.handle);
return -EFAULT;
+ }
break;
}
case ION_IOC_FREE: