gpu: ion: Move iommu mapping and unmapping out of Ion
Ion is not the right location for the iommu mapping to
be managed. Pull it out of Ion code and in to msm specific
files.
Change-Id: If9307d552c47f19e2c14036926f25587d0f899c6
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index d3434d8..829319b 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -112,8 +112,6 @@
!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
}
-static void ion_iommu_release(struct kref *kref);
-
/* this function should only be called while dev->lock is held */
static void ion_buffer_add(struct ion_device *dev,
struct ion_buffer *buffer)
@@ -140,61 +138,6 @@
rb_insert_color(&buffer->node, &dev->buffers);
}
-static void ion_iommu_add(struct ion_buffer *buffer,
- struct ion_iommu_map *iommu)
-{
- struct rb_node **p = &buffer->iommu_maps.rb_node;
- struct rb_node *parent = NULL;
- struct ion_iommu_map *entry;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_iommu_map, node);
-
- if (iommu->key < entry->key) {
- p = &(*p)->rb_left;
- } else if (iommu->key > entry->key) {
- p = &(*p)->rb_right;
- } else {
- pr_err("%s: buffer %p already has mapping for domain %d"
- " and partition %d\n", __func__,
- buffer,
- iommu_map_domain(iommu),
- iommu_map_partition(iommu));
- BUG();
- }
- }
-
- rb_link_node(&iommu->node, parent, p);
- rb_insert_color(&iommu->node, &buffer->iommu_maps);
-
-}
-
-static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
- unsigned int domain_no,
- unsigned int partition_no)
-{
- struct rb_node **p = &buffer->iommu_maps.rb_node;
- struct rb_node *parent = NULL;
- struct ion_iommu_map *entry;
- uint64_t key = domain_no;
- key = key << 32 | partition_no;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_iommu_map, node);
-
- if (key < entry->key)
- p = &(*p)->rb_left;
- else if (key > entry->key)
- p = &(*p)->rb_right;
- else
- return entry;
- }
-
- return NULL;
-}
-
static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
/* this function should only be called while dev->lock is held */
@@ -275,38 +218,6 @@
return ERR_PTR(ret);
}
-/**
- * Check for delayed IOMMU unmapping. Also unmap any outstanding
- * mappings which would otherwise have been leaked.
- */
-static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
-{
- struct ion_iommu_map *iommu_map;
- struct rb_node *node;
- const struct rb_root *rb = &(buffer->iommu_maps);
- unsigned long ref_count;
- unsigned int delayed_unmap;
-
- mutex_lock(&buffer->lock);
-
- while ((node = rb_first(rb)) != 0) {
- iommu_map = rb_entry(node, struct ion_iommu_map, node);
- ref_count = atomic_read(&iommu_map->ref.refcount);
- delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
-
- if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
- pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
- __func__, iommu_map->domain_info[DI_DOMAIN_NUM],
- iommu_map->domain_info[DI_PARTITION_NUM]);
- }
- /* set ref count to 1 to force release */
- kref_init(&iommu_map->ref);
- kref_put(&iommu_map->ref, ion_iommu_release);
- }
-
- mutex_unlock(&buffer->lock);
-}
-
static void ion_delayed_unsecure(struct ion_buffer *buffer)
{
if (buffer->heap->ops->unsecure_buffer)
@@ -323,7 +234,6 @@
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
ion_delayed_unsecure(buffer);
- ion_iommu_delayed_unmap(buffer);
buffer->heap->ops->free(buffer);
mutex_lock(&dev->lock);
rb_erase(&buffer->node, &dev->buffers);
@@ -654,212 +564,6 @@
ion_buffer_kmap_put(buffer);
}
-static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
- int domain_num, int partition_num, unsigned long align,
- unsigned long iova_length, unsigned long flags,
- unsigned long *iova)
-{
- struct ion_iommu_map *data;
- int ret;
-
- data = kmalloc(sizeof(*data), GFP_ATOMIC);
-
- if (!data)
- return ERR_PTR(-ENOMEM);
-
- data->buffer = buffer;
- iommu_map_domain(data) = domain_num;
- iommu_map_partition(data) = partition_num;
-
- ret = buffer->heap->ops->map_iommu(buffer, data,
- domain_num,
- partition_num,
- align,
- iova_length,
- flags);
-
- if (ret)
- goto out;
-
- kref_init(&data->ref);
- *iova = data->iova_addr;
-
- ion_iommu_add(buffer, data);
-
- return data;
-
-out:
- kfree(data);
- return ERR_PTR(ret);
-}
-
-int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
- int domain_num, int partition_num, unsigned long align,
- unsigned long iova_length, unsigned long *iova,
- unsigned long *buffer_size,
- unsigned long flags, unsigned long iommu_flags)
-{
- struct ion_buffer *buffer;
- struct ion_iommu_map *iommu_map;
- int ret = 0;
-
- if (IS_ERR_OR_NULL(client)) {
- pr_err("%s: client pointer is invalid\n", __func__);
- return -EINVAL;
- }
- if (IS_ERR_OR_NULL(handle)) {
- pr_err("%s: handle pointer is invalid\n", __func__);
- return -EINVAL;
- }
- if (IS_ERR_OR_NULL(handle->buffer)) {
- pr_err("%s: buffer pointer is invalid\n", __func__);
- return -EINVAL;
- }
-
- if (ION_IS_CACHED(flags)) {
- pr_err("%s: Cannot map iommu as cached.\n", __func__);
- return -EINVAL;
- }
-
- mutex_lock(&client->lock);
- if (!ion_handle_validate(client, handle)) {
- pr_err("%s: invalid handle passed to map_kernel.\n",
- __func__);
- mutex_unlock(&client->lock);
- return -EINVAL;
- }
-
- buffer = handle->buffer;
- mutex_lock(&buffer->lock);
-
- if (!handle->buffer->heap->ops->map_iommu) {
- pr_err("%s: map_iommu is not implemented by this heap.\n",
- __func__);
- ret = -ENODEV;
- goto out;
- }
-
- /*
- * If clients don't want a custom iova length, just use whatever
- * the buffer size is
- */
- if (!iova_length)
- iova_length = buffer->size;
-
- if (buffer->size > iova_length) {
- pr_debug("%s: iova length %lx is not at least buffer size"
- " %x\n", __func__, iova_length, buffer->size);
- ret = -EINVAL;
- goto out;
- }
-
- if (buffer->size & ~PAGE_MASK) {
- pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
- buffer->size, PAGE_SIZE);
- ret = -EINVAL;
- goto out;
- }
-
- if (iova_length & ~PAGE_MASK) {
- pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
- iova_length, PAGE_SIZE);
- ret = -EINVAL;
- goto out;
- }
-
- iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
- if (!iommu_map) {
- iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
- align, iova_length, flags, iova);
- if (!IS_ERR_OR_NULL(iommu_map)) {
- iommu_map->flags = iommu_flags;
-
- if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
- kref_get(&iommu_map->ref);
- } else {
- ret = PTR_ERR(iommu_map);
- }
- } else {
- if (iommu_map->flags != iommu_flags) {
- pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
- __func__, handle,
- iommu_map->flags, iommu_flags);
- ret = -EINVAL;
- } else if (iommu_map->mapped_size != iova_length) {
- pr_err("%s: handle %p is already mapped with length"
- " %x, trying to map with length %lx\n",
- __func__, handle, iommu_map->mapped_size,
- iova_length);
- ret = -EINVAL;
- } else {
- kref_get(&iommu_map->ref);
- *iova = iommu_map->iova_addr;
- }
- }
- if (!ret)
- buffer->iommu_map_cnt++;
- *buffer_size = buffer->size;
-out:
- mutex_unlock(&buffer->lock);
- mutex_unlock(&client->lock);
- return ret;
-}
-EXPORT_SYMBOL(ion_map_iommu);
-
-static void ion_iommu_release(struct kref *kref)
-{
- struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
- ref);
- struct ion_buffer *buffer = map->buffer;
-
- rb_erase(&map->node, &buffer->iommu_maps);
- buffer->heap->ops->unmap_iommu(map);
- kfree(map);
-}
-
-void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
- int domain_num, int partition_num)
-{
- struct ion_iommu_map *iommu_map;
- struct ion_buffer *buffer;
-
- if (IS_ERR_OR_NULL(client)) {
- pr_err("%s: client pointer is invalid\n", __func__);
- return;
- }
- if (IS_ERR_OR_NULL(handle)) {
- pr_err("%s: handle pointer is invalid\n", __func__);
- return;
- }
- if (IS_ERR_OR_NULL(handle->buffer)) {
- pr_err("%s: buffer pointer is invalid\n", __func__);
- return;
- }
-
- mutex_lock(&client->lock);
- buffer = handle->buffer;
-
- mutex_lock(&buffer->lock);
-
- iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
-
- if (!iommu_map) {
- WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
- domain_num, partition_num, buffer);
- goto out;
- }
-
- kref_put(&iommu_map->ref, ion_iommu_release);
-
- buffer->iommu_map_cnt--;
-out:
- mutex_unlock(&buffer->lock);
-
- mutex_unlock(&client->lock);
-
-}
-EXPORT_SYMBOL(ion_unmap_iommu);
-
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
{
struct ion_buffer *buffer;
@@ -948,7 +652,6 @@
{
struct ion_client *client = s->private;
struct rb_node *n;
- struct rb_node *n2;
seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
"heap_name", "size_in_bytes", "handle refcount",
@@ -973,15 +676,6 @@
else
seq_printf(s, " : %12s", "N/A");
- for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
- n2 = rb_next(n2)) {
- struct ion_iommu_map *imap =
- rb_entry(n2, struct ion_iommu_map, node);
- seq_printf(s, " : [%d,%d] - %8lx",
- imap->domain_info[DI_DOMAIN_NUM],
- imap->domain_info[DI_PARTITION_NUM],
- imap->iova_addr);
- }
seq_printf(s, "\n");
}
mutex_unlock(&client->lock);
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index aeffb52..e0b15e7 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -24,11 +24,9 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/iommu.h>
#include <linux/seq_file.h>
#include "ion_priv.h"
-#include <mach/iommu_domains.h>
#include <asm/mach/map.h>
#include <asm/cacheflush.h>
#include <linux/msm_ion.h>
@@ -303,110 +301,6 @@
return 0;
}
-int ion_carveout_heap_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- struct iommu_domain *domain;
- int ret = 0;
- unsigned long extra;
- struct scatterlist *sglist = 0;
- int prot = IOMMU_WRITE | IOMMU_READ;
- prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
-
- data->mapped_size = iova_length;
-
- if (!msm_use_iommu()) {
- data->iova_addr = buffer->priv_phys;
- return 0;
- }
-
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
-
- sglist = vmalloc(sizeof(*sglist));
- if (!sglist)
- goto out1;
-
- sg_init_table(sglist, 1);
- sglist->length = buffer->size;
- sglist->offset = 0;
- sglist->dma_address = buffer->priv_phys;
-
- ret = iommu_map_range(domain, data->iova_addr, sglist,
- buffer->size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- if (extra) {
- unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- unsigned long phys_addr = sg_phys(sglist);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- vfree(sglist);
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- vfree(sglist);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
-out:
-
- return ret;
-}
-
-void ion_carveout_heap_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- if (!msm_use_iommu())
- return;
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
static struct ion_heap_ops carveout_heap_ops = {
.allocate = ion_carveout_heap_allocate,
.free = ion_carveout_heap_free,
@@ -418,8 +312,6 @@
.unmap_dma = ion_carveout_heap_unmap_dma,
.cache_op = ion_carveout_cache_ops,
.print_debug = ion_carveout_print_debug,
- .map_iommu = ion_carveout_heap_map_iommu,
- .unmap_iommu = ion_carveout_heap_unmap_iommu,
};
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c
index 4f12e38..64de755 100644
--- a/drivers/gpu/ion/ion_cma_heap.c
+++ b/drivers/gpu/ion/ion_cma_heap.c
@@ -178,100 +178,6 @@
return;
}
-int ion_cma_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- int ret = 0;
- struct iommu_domain *domain;
- unsigned long extra;
- unsigned long extra_iova_addr;
- struct ion_cma_buffer_info *info = buffer->priv_virt;
- struct sg_table *table = info->table;
- int prot = IOMMU_WRITE | IOMMU_READ;
-
- data->mapped_size = iova_length;
-
- if (!msm_use_iommu()) {
- data->iova_addr = info->handle;
- return 0;
- }
-
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -EINVAL;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr, table->sgl,
- buffer->size, prot);
-
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- extra_iova_addr = data->iova_addr + buffer->size;
- if (extra) {
- unsigned long phys_addr = sg_phys(table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
-
-void ion_cma_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- if (!msm_use_iommu())
- return;
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
int ion_cma_cache_ops(struct ion_heap *heap,
struct ion_buffer *buffer, void *vaddr,
unsigned int offset, unsigned int length,
@@ -358,8 +264,6 @@
.map_user = ion_cma_mmap,
.map_kernel = ion_cma_map_kernel,
.unmap_kernel = ion_cma_unmap_kernel,
- .map_iommu = ion_cma_map_iommu,
- .unmap_iommu = ion_cma_unmap_iommu,
.cache_op = ion_cma_cache_ops,
.print_debug = ion_cma_print_debug,
};
diff --git a/drivers/gpu/ion/ion_cma_secure_heap.c b/drivers/gpu/ion/ion_cma_secure_heap.c
index 0fbcfbf..633da03 100644
--- a/drivers/gpu/ion/ion_cma_secure_heap.c
+++ b/drivers/gpu/ion/ion_cma_secure_heap.c
@@ -212,100 +212,6 @@
return;
}
-int ion_secure_cma_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- int ret = 0;
- struct iommu_domain *domain;
- unsigned long extra;
- unsigned long extra_iova_addr;
- struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
- struct sg_table *table = info->table;
- int prot = IOMMU_WRITE | IOMMU_READ;
-
- data->mapped_size = iova_length;
-
- if (!msm_use_iommu()) {
- data->iova_addr = info->handle;
- return 0;
- }
-
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -EINVAL;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr, table->sgl,
- buffer->size, prot);
-
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- extra_iova_addr = data->iova_addr + buffer->size;
- if (extra) {
- unsigned long phys_addr = sg_phys(table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
-
-void ion_secure_cma_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- if (!msm_use_iommu())
- return;
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
int ion_secure_cma_cache_ops(struct ion_heap *heap,
struct ion_buffer *buffer, void *vaddr,
unsigned int offset, unsigned int length,
@@ -354,8 +260,6 @@
.map_user = ion_secure_cma_mmap,
.map_kernel = ion_secure_cma_map_kernel,
.unmap_kernel = ion_secure_cma_unmap_kernel,
- .map_iommu = ion_secure_cma_map_iommu,
- .unmap_iommu = ion_secure_cma_unmap_iommu,
.cache_op = ion_secure_cma_cache_ops,
.print_debug = ion_secure_cma_print_debug,
.secure_buffer = ion_cp_secure_buffer,
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index 88addab..42164bc 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -67,10 +67,6 @@
* kernel space (un-cached).
* @umap_count: the total number of times this heap has been mapped in
* user space.
- * @iommu_iova: saved iova when mapping full heap at once.
- * @iommu_partition: partition used to map full heap.
- * @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
- * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
* @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
*/
struct ion_cp_heap {
@@ -90,11 +86,6 @@
unsigned long kmap_cached_count;
unsigned long kmap_uncached_count;
unsigned long umap_count;
- unsigned long iommu_iova[MAX_DOMAINS];
- unsigned long iommu_partition[MAX_DOMAINS];
- void *reserved_vrange;
- int iommu_map_all;
- int iommu_2x_map_domain;
unsigned int has_outer_cache;
atomic_t protect_cnt;
void *cpu_addr;
@@ -361,29 +352,6 @@
return offset;
}
-static void iommu_unmap_all(unsigned long domain_num,
- struct ion_cp_heap *cp_heap)
-{
- unsigned long left_to_unmap = cp_heap->total_size;
- unsigned long page_size = SZ_64K;
-
- struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
- if (domain) {
- unsigned long temp_iova = cp_heap->iommu_iova[domain_num];
-
- while (left_to_unmap) {
- iommu_unmap(domain, temp_iova, page_size);
- temp_iova += page_size;
- left_to_unmap -= page_size;
- }
- if (domain_num == cp_heap->iommu_2x_map_domain)
- msm_iommu_unmap_extra(domain, temp_iova,
- cp_heap->total_size, SZ_64K);
- } else {
- pr_err("Unable to get IOMMU domain %lu\n", domain_num);
- }
-}
-
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
unsigned long size)
{
@@ -401,25 +369,6 @@
cp_heap->heap_protected == HEAP_NOT_PROTECTED)
ion_on_last_free(heap);
- /* Unmap everything if we previously mapped the whole heap at once. */
- if (!cp_heap->allocated_bytes) {
- unsigned int i;
- for (i = 0; i < MAX_DOMAINS; ++i) {
- if (cp_heap->iommu_iova[i]) {
- unsigned long vaddr_len = cp_heap->total_size;
-
- if (i == cp_heap->iommu_2x_map_domain)
- vaddr_len <<= 1;
- iommu_unmap_all(i, cp_heap);
-
- msm_free_iova_address(cp_heap->iommu_iova[i], i,
- cp_heap->iommu_partition[i],
- vaddr_len);
- }
- cp_heap->iommu_iova[i] = 0;
- cp_heap->iommu_partition[i] = 0;
- }
- }
mutex_unlock(&cp_heap->lock);
}
@@ -859,205 +808,6 @@
return ret_value;
}
-static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
- int partition, unsigned long prot)
-{
- unsigned long left_to_map = cp_heap->total_size;
- unsigned long page_size = SZ_64K;
- int ret_value = 0;
- unsigned long virt_addr_len = cp_heap->total_size;
- struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
-
- /* If we are mapping into the video domain we need to map twice the
- * size of the heap to account for prefetch issue in video core.
- */
- if (domain_num == cp_heap->iommu_2x_map_domain)
- virt_addr_len <<= 1;
-
- if (cp_heap->total_size & (SZ_64K-1)) {
- pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
- ret_value = -EINVAL;
- }
- if (cp_heap->base & (SZ_64K-1)) {
- pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
- ret_value = -EINVAL;
- }
- if (!ret_value && domain) {
- unsigned long temp_phys = cp_heap->base;
- unsigned long temp_iova;
-
- ret_value = msm_allocate_iova_address(domain_num, partition,
- virt_addr_len, SZ_64K,
- &temp_iova);
-
- if (ret_value) {
- pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
- __func__, domain_num, partition);
- goto out;
- }
- cp_heap->iommu_iova[domain_num] = temp_iova;
-
- while (left_to_map) {
- int ret = iommu_map(domain, temp_iova, temp_phys,
- page_size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p, error: %d\n",
- __func__, temp_iova, domain, ret);
- ret_value = -EAGAIN;
- goto free_iova;
- }
- temp_iova += page_size;
- temp_phys += page_size;
- left_to_map -= page_size;
- }
- if (domain_num == cp_heap->iommu_2x_map_domain)
- ret_value = msm_iommu_map_extra(domain, temp_iova,
- cp_heap->base,
- cp_heap->total_size,
- SZ_64K, prot);
- if (ret_value)
- goto free_iova;
- } else {
- pr_err("Unable to get IOMMU domain %lu\n", domain_num);
- ret_value = -ENOMEM;
- }
- goto out;
-
-free_iova:
- msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
- partition, virt_addr_len);
-out:
- return ret_value;
-}
-
-static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- struct iommu_domain *domain;
- int ret = 0;
- unsigned long extra;
- struct ion_cp_heap *cp_heap =
- container_of(buffer->heap, struct ion_cp_heap, heap);
- int prot = IOMMU_WRITE | IOMMU_READ;
- struct ion_cp_buffer *buf = buffer->priv_virt;
- prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
-
- data->mapped_size = iova_length;
-
- if (!msm_use_iommu()) {
- data->iova_addr = buf->buffer;
- return 0;
- }
-
- if (cp_heap->iommu_iova[domain_num]) {
- /* Already mapped. */
- unsigned long offset = buf->buffer - cp_heap->base;
- data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
- return 0;
- } else if (cp_heap->iommu_map_all) {
- ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
- if (!ret) {
- unsigned long offset =
- buf->buffer - cp_heap->base;
- data->iova_addr =
- cp_heap->iommu_iova[domain_num] + offset;
- cp_heap->iommu_partition[domain_num] = partition_num;
- /*
- clear delayed map flag so that we don't interfere
- with this feature (we are already delaying).
- */
- data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
- return 0;
- } else {
- cp_heap->iommu_iova[domain_num] = 0;
- cp_heap->iommu_partition[domain_num] = 0;
- return ret;
- }
- }
-
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr, buffer->sg_table->sgl,
- buffer->size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- if (extra) {
- unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- unsigned long phys_addr = sg_phys(buffer->sg_table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
-static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
- struct ion_cp_heap *cp_heap =
- container_of(data->buffer->heap, struct ion_cp_heap, heap);
-
- if (!msm_use_iommu())
- return;
-
-
- domain_num = iommu_map_domain(data);
-
- /* If we are mapping everything we'll wait to unmap until everything
- is freed. */
- if (cp_heap->iommu_iova[domain_num])
- return;
-
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
static struct ion_heap_ops cp_heap_ops = {
.allocate = ion_cp_heap_allocate,
.free = ion_cp_heap_free,
@@ -1072,8 +822,6 @@
.print_debug = ion_cp_print_debug,
.secure_heap = ion_cp_secure_heap,
.unsecure_heap = ion_cp_unsecure_heap,
- .map_iommu = ion_cp_heap_map_iommu,
- .unmap_iommu = ion_cp_heap_unmap_iommu,
.secure_buffer = ion_cp_secure_buffer,
.unsecure_buffer = ion_cp_unsecure_buffer,
};
@@ -1120,10 +868,6 @@
if (extra_data->release_region)
cp_heap->heap_release_region =
extra_data->release_region;
- cp_heap->iommu_map_all =
- extra_data->iommu_map_all;
- cp_heap->iommu_2x_map_domain =
- extra_data->iommu_2x_map_domain;
cp_heap->cma = extra_data->is_cma;
cp_heap->allow_non_secure_allocation =
extra_data->allow_nonsecure_alloc;
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 512ebf3..1b4ad33 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -315,102 +315,6 @@
return 0;
}
-int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- struct iommu_domain *domain;
- int ret = 0;
- unsigned long extra;
- int prot = IOMMU_WRITE | IOMMU_READ;
- prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
-
- BUG_ON(!msm_use_iommu());
-
- data->mapped_size = iova_length;
- extra = iova_length - buffer->size;
-
- /* Use the biggest alignment to allow bigger IOMMU mappings.
- * Use the first entry since the first entry will always be the
- * biggest entry. To take advantage of bigger mapping sizes both the
- * VA and PA addresses have to be aligned to the biggest size.
- */
- if (buffer->sg_table->sgl->length > align)
- align = buffer->sg_table->sgl->length;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr,
- buffer->sg_table->sgl,
- buffer->size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- if (extra) {
- unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- unsigned long phys_addr = sg_phys(buffer->sg_table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- buffer->size);
-
-out:
-
- return ret;
-}
-
-void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- BUG_ON(!msm_use_iommu());
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
void *vaddr, unsigned int offset, unsigned int length,
unsigned int cmd)
@@ -483,8 +387,6 @@
.map_user = ion_iommu_heap_map_user,
.map_kernel = ion_iommu_heap_map_kernel,
.unmap_kernel = ion_iommu_heap_unmap_kernel,
- .map_iommu = ion_iommu_heap_map_iommu,
- .unmap_iommu = ion_iommu_heap_unmap_iommu,
.cache_op = ion_iommu_cache_ops,
.map_dma = ion_iommu_heap_map_dma,
.unmap_dma = ion_iommu_heap_unmap_dma,
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 8d45f9d..109e3ff 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -65,8 +65,6 @@
struct sg_table *sg_table;
unsigned long *dirty;
struct list_head vmas;
- unsigned int iommu_map_cnt;
- struct rb_root iommu_maps;
int marked;
};
@@ -101,14 +99,6 @@
int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
void *vaddr, unsigned int offset,
unsigned int length, unsigned int cmd);
- int (*map_iommu)(struct ion_buffer *buffer,
- struct ion_iommu_map *map_data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags);
- void (*unmap_iommu)(struct ion_iommu_map *data);
int (*print_debug)(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *mem_map);
int (*secure_heap)(struct ion_heap *heap, int version, void *data);
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index ceb30a4..ee24736 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -24,9 +24,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/iommu.h>
#include <linux/seq_file.h>
-#include <mach/iommu_domains.h>
#include "ion_priv.h"
#include <mach/memory.h>
#include <asm/cacheflush.h>
@@ -210,32 +208,6 @@
vunmap(buffer->vaddr);
}
-void ion_system_heap_unmap_iommu(struct ion_iommu_map *data)
-{
- unsigned int domain_num;
- unsigned int partition_num;
- struct iommu_domain *domain;
-
- if (!msm_use_iommu())
- return;
-
- domain_num = iommu_map_domain(data);
- partition_num = iommu_map_partition(data);
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
- return;
- }
-
- iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-
- return;
-}
-
int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
@@ -331,81 +303,6 @@
return 0;
}
-int ion_system_heap_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- int ret = 0;
- struct iommu_domain *domain;
- unsigned long extra;
- unsigned long extra_iova_addr;
- struct sg_table *table = buffer->priv_virt;
- int prot = IOMMU_WRITE | IOMMU_READ;
- prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
-
- if (!ION_IS_CACHED(flags))
- return -EINVAL;
-
- if (!msm_use_iommu())
- return -EINVAL;
-
- data->mapped_size = iova_length;
- extra = iova_length - buffer->size;
-
- /* Use the biggest alignment to allow bigger IOMMU mappings.
- * Use the first entry since the first entry will always be the
- * biggest entry. To take advantage of bigger mapping sizes both the
- * VA and PA addresses have to be aligned to the biggest size.
- */
- if (table->sgl->length > align)
- align = table->sgl->length;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
-
- ret = iommu_map_range(domain, data->iova_addr, table->sgl,
- buffer->size, prot);
-
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- extra_iova_addr = data->iova_addr + buffer->size;
- if (extra) {
- unsigned long phys_addr = sg_phys(table->sgl);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- return ret;
-
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-out1:
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
static struct ion_heap_ops vmalloc_ops = {
.allocate = ion_system_heap_allocate,
.free = ion_system_heap_free,
@@ -416,8 +313,6 @@
.map_user = ion_system_heap_map_user,
.cache_op = ion_system_heap_cache_ops,
.print_debug = ion_system_print_debug,
- .map_iommu = ion_system_heap_map_iommu,
- .unmap_iommu = ion_system_heap_unmap_iommu,
};
struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
@@ -558,84 +453,6 @@
return 0;
}
-int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
- struct ion_iommu_map *data,
- unsigned int domain_num,
- unsigned int partition_num,
- unsigned long align,
- unsigned long iova_length,
- unsigned long flags)
-{
- int ret = 0;
- struct iommu_domain *domain;
- unsigned long extra;
- struct scatterlist *sglist = 0;
- struct page *page = 0;
- int prot = IOMMU_WRITE | IOMMU_READ;
- prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
-
- if (!ION_IS_CACHED(flags))
- return -EINVAL;
-
- if (!msm_use_iommu()) {
- data->iova_addr = virt_to_phys(buffer->vaddr);
- return 0;
- }
-
- data->mapped_size = iova_length;
- extra = iova_length - buffer->size;
-
- ret = msm_allocate_iova_address(domain_num, partition_num,
- data->mapped_size, align,
- &data->iova_addr);
-
- if (ret)
- goto out;
-
- domain = msm_get_iommu_domain(domain_num);
-
- if (!domain) {
- ret = -ENOMEM;
- goto out1;
- }
- page = virt_to_page(buffer->vaddr);
-
- sglist = vmalloc(sizeof(*sglist));
- if (!sglist)
- goto out1;
-
- sg_init_table(sglist, 1);
- sg_set_page(sglist, page, buffer->size, 0);
-
- ret = iommu_map_range(domain, data->iova_addr, sglist,
- buffer->size, prot);
- if (ret) {
- pr_err("%s: could not map %lx in domain %p\n",
- __func__, data->iova_addr, domain);
- goto out1;
- }
-
- if (extra) {
- unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- unsigned long phys_addr = sg_phys(sglist);
- ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
- extra, SZ_4K, prot);
- if (ret)
- goto out2;
- }
- vfree(sglist);
- return ret;
-out2:
- iommu_unmap_range(domain, data->iova_addr, buffer->size);
-
-out1:
- vfree(sglist);
- msm_free_iova_address(data->iova_addr, domain_num, partition_num,
- data->mapped_size);
-out:
- return ret;
-}
-
void *ion_system_contig_heap_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
@@ -659,8 +476,6 @@
.map_user = ion_system_contig_heap_map_user,
.cache_op = ion_system_contig_heap_cache_ops,
.print_debug = ion_system_contig_print_debug,
- .map_iommu = ion_system_contig_heap_map_iommu,
- .unmap_iommu = ion_system_heap_unmap_iommu,
};
struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap)
diff --git a/drivers/gpu/ion/msm/Makefile b/drivers/gpu/ion/msm/Makefile
index 1893405..becdb02 100644
--- a/drivers/gpu/ion/msm/Makefile
+++ b/drivers/gpu/ion/msm/Makefile
@@ -1 +1 @@
-obj-y += msm_ion.o ion_cp_common.o
+obj-y += msm_ion.o ion_cp_common.o ion_iommu_map.o
diff --git a/drivers/gpu/ion/msm/ion_iommu_map.c b/drivers/gpu/ion/msm/ion_iommu_map.c
new file mode 100644
index 0000000..ae4ae37
--- /dev/null
+++ b/drivers/gpu/ion/msm/ion_iommu_map.c
@@ -0,0 +1,538 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/iommu.h>
+#include <linux/ion.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include <mach/iommu_domains.h>
+
+enum {
+ DI_PARTITION_NUM = 0,
+ DI_DOMAIN_NUM = 1,
+ DI_MAX,
+};
+
+#define iommu_map_domain(__m) ((__m)->domain_info[1])
+#define iommu_map_partition(__m) ((__m)->domain_info[0])
+
+/**
+ * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
+ * @iova_addr - iommu virtual address
+ * @node - rb node to exist in the buffer's tree of iommu mappings
+ * @domain_info - contains the partition number and domain number
+ * domain_info[1] = domain number
+ * domain_info[0] = partition number
+ * @ref - for reference counting this mapping
+ * @mapped_size - size of the iova space mapped
+ * (may not be the same as the buffer size)
+ * @flags - iommu domain/partition specific flags.
+ *
+ * Represents a mapping of one ion buffer to a particular iommu domain
+ * and address range. There may exist other mappings of this buffer in
+ * different domains or address ranges. All mappings will have the same
+ * cacheability and security.
+ */
+struct ion_iommu_map {
+ unsigned long iova_addr;
+ struct rb_node node;
+ union {
+ int domain_info[DI_MAX];
+ uint64_t key;
+ };
+ struct ion_iommu_meta *meta;
+ struct kref ref;
+ int mapped_size;
+ unsigned long flags;
+};
+
+
+struct ion_iommu_meta {
+ struct rb_node node;
+ struct ion_handle *handle;
+ struct rb_root iommu_maps;
+ struct kref ref;
+ struct sg_table *table;
+ unsigned long size;
+ struct mutex lock;
+};
+
+static struct rb_root iommu_root;
+DEFINE_MUTEX(msm_iommu_map_mutex);
+
+static void ion_iommu_meta_add(struct ion_iommu_meta *meta)
+{
+ struct rb_root *root = &iommu_root;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_meta *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_meta, node);
+
+ if (meta->handle < entry->handle) {
+ p = &(*p)->rb_left;
+ } else if (meta->handle > entry->handle) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: handle %p already exists\n", __func__,
+ entry->handle);
+ BUG();
+ }
+ }
+
+ rb_link_node(&meta->node, parent, p);
+ rb_insert_color(&meta->node, root);
+}
+
+
+static struct ion_iommu_meta *ion_iommu_meta_lookup(struct ion_handle *handle)
+{
+ struct rb_root *root = &iommu_root;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_meta *entry = NULL;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_meta, node);
+
+ if (handle < entry->handle)
+ p = &(*p)->rb_left;
+ else if (handle > entry->handle)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+
+ return NULL;
+}
+
+
+
+static void ion_iommu_add(struct ion_iommu_meta *meta,
+ struct ion_iommu_map *iommu)
+{
+ struct rb_node **p = &meta->iommu_maps.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_map *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_map, node);
+
+ if (iommu->key < entry->key) {
+ p = &(*p)->rb_left;
+ } else if (iommu->key > entry->key) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: handle %p already has mapping for domain %d and partition %d\n",
+ __func__,
+ meta->handle,
+ iommu_map_domain(iommu),
+ iommu_map_partition(iommu));
+ BUG();
+ }
+ }
+
+ rb_link_node(&iommu->node, parent, p);
+ rb_insert_color(&iommu->node, &meta->iommu_maps);
+}
+
+
+static struct ion_iommu_map *ion_iommu_lookup(
+ struct ion_iommu_meta *meta,
+ unsigned int domain_no,
+ unsigned int partition_no)
+{
+ struct rb_node **p = &meta->iommu_maps.rb_node;
+ struct rb_node *parent = NULL;
+ struct ion_iommu_map *entry;
+ uint64_t key = domain_no;
+ key = key << 32 | partition_no;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct ion_iommu_map, node);
+
+ if (key < entry->key)
+ p = &(*p)->rb_left;
+ else if (key > entry->key)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+
+ return NULL;
+}
+
+static int ion_iommu_map_iommu(struct ion_iommu_meta *meta,
+ struct ion_iommu_map *data,
+ unsigned int domain_num,
+ unsigned int partition_num,
+ unsigned long align,
+ unsigned long iova_length,
+ unsigned long flags)
+{
+ struct iommu_domain *domain;
+ int ret = 0;
+ unsigned long extra, size;
+ struct sg_table *table;
+ int prot = IOMMU_WRITE | IOMMU_READ;
+
+
+ size = meta->size;
+ data->mapped_size = iova_length;
+ extra = iova_length - size;
+ table = meta->table;
+
+ /* Use the biggest alignment to allow bigger IOMMU mappings.
+ * Use the first entry since the first entry will always be the
+ * biggest entry. To take advantage of bigger mapping sizes both the
+ * VA and PA addresses have to be aligned to the biggest size.
+ */
+ if (table->sgl->length > align)
+ align = table->sgl->length;
+
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ data->mapped_size, align,
+ &data->iova_addr);
+
+ if (ret)
+ goto out;
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ ret = iommu_map_range(domain, data->iova_addr,
+ table->sgl,
+ size, prot);
+ if (ret) {
+ pr_err("%s: could not map %lx in domain %p\n",
+ __func__, data->iova_addr, domain);
+ goto out1;
+ }
+
+ if (extra) {
+ unsigned long extra_iova_addr = data->iova_addr + size;
+ unsigned long phys_addr = sg_phys(table->sgl);
+ ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
+ extra, SZ_4K, prot);
+ if (ret)
+ goto out2;
+ }
+ return ret;
+
+out2:
+ iommu_unmap_range(domain, data->iova_addr, size);
+out1:
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ size);
+
+out:
+
+ return ret;
+}
+
+static void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
+{
+ unsigned int domain_num;
+ unsigned int partition_num;
+ struct iommu_domain *domain;
+
+ BUG_ON(!msm_use_iommu());
+
+ domain_num = iommu_map_domain(data);
+ partition_num = iommu_map_partition(data);
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+ return;
+ }
+
+ iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
+ msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+ data->mapped_size);
+
+ return;
+}
+
+
+
+static struct ion_iommu_map *__ion_iommu_map(struct ion_iommu_meta *meta,
+ int domain_num, int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long flags,
+ unsigned long *iova)
+{
+ struct ion_iommu_map *data;
+ int ret;
+
+ data = kmalloc(sizeof(*data), GFP_ATOMIC);
+
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ iommu_map_domain(data) = domain_num;
+ iommu_map_partition(data) = partition_num;
+
+ ret = ion_iommu_map_iommu(meta, data,
+ domain_num,
+ partition_num,
+ align,
+ iova_length,
+ flags);
+
+ if (ret)
+ goto out;
+
+ kref_init(&data->ref);
+ *iova = data->iova_addr;
+ data->meta = meta;
+
+ ion_iommu_add(meta, data);
+
+ return data;
+
+out:
+ kfree(data);
+ return ERR_PTR(ret);
+}
+
+static struct ion_iommu_meta *ion_iommu_meta_create(struct ion_handle *handle,
+ struct sg_table *table,
+ unsigned long size)
+{
+ struct ion_iommu_meta *meta;
+
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+
+ if (!meta)
+ return ERR_PTR(-ENOMEM);
+
+ meta->handle = handle;
+ meta->table = table;
+ meta->size = size;
+ kref_init(&meta->ref);
+ mutex_init(&meta->lock);
+ ion_iommu_meta_add(meta);
+
+ return meta;
+}
+
+static void ion_iommu_meta_destroy(struct kref *kref)
+{
+ struct ion_iommu_meta *meta = container_of(kref, struct ion_iommu_meta,
+ ref);
+
+
+ rb_erase(&meta->node, &iommu_root);
+ kfree(meta);
+}
+
+static void ion_iommu_meta_put(struct ion_iommu_meta *meta)
+{
+ /*
+ * Need to lock here to prevent race against map/unmap
+ */
+ mutex_lock(&msm_iommu_map_mutex);
+ kref_put(&meta->ref, ion_iommu_meta_destroy);
+ mutex_unlock(&msm_iommu_map_mutex);
+}
+
+int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
+ int domain_num, int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags, unsigned long iommu_flags)
+{
+ struct ion_iommu_map *iommu_map;
+ struct ion_iommu_meta *iommu_meta = NULL;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int ret = 0;
+ int i;
+ unsigned long size = 0;
+
+ if (IS_ERR_OR_NULL(client)) {
+ pr_err("%s: client pointer is invalid\n", __func__);
+ return -EINVAL;
+ }
+ if (IS_ERR_OR_NULL(handle)) {
+ pr_err("%s: handle pointer is invalid\n", __func__);
+ return -EINVAL;
+ }
+
+ table = ion_sg_table(client, handle);
+
+ if (IS_ERR_OR_NULL(table))
+ return PTR_ERR(table);
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ size += sg_dma_len(sg);
+
+ if (!msm_use_iommu()) {
+ unsigned long pa = sg_dma_address(table->sgl);
+ if (pa == 0)
+ pa = sg_phys(table->sgl);
+ *iova = pa;
+ *buffer_size = size;
+ }
+ /*
+ * If clients don't want a custom iova length, just use whatever
+ * the buffer size is
+ */
+ if (!iova_length)
+ iova_length = size;
+
+ if (size > iova_length) {
+ pr_debug("%s: iova length %lx is not at least buffer size %lx\n",
+ __func__, iova_length, size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (size & ~PAGE_MASK) {
+ pr_debug("%s: buffer size %lx is not aligned to %lx", __func__,
+ size, PAGE_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (iova_length & ~PAGE_MASK) {
+ pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
+ iova_length, PAGE_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&msm_iommu_map_mutex);
+ iommu_meta = ion_iommu_meta_lookup(handle);
+
+ if (!iommu_meta)
+ iommu_meta = ion_iommu_meta_create(handle, table, size);
+ else
+ kref_get(&iommu_meta->ref);
+
+ mutex_unlock(&msm_iommu_map_mutex);
+
+ iommu_map = ion_iommu_lookup(iommu_meta, domain_num, partition_num);
+ if (!iommu_map) {
+ iommu_map = __ion_iommu_map(iommu_meta, domain_num,
+ partition_num, align, iova_length,
+ flags, iova);
+ if (!IS_ERR_OR_NULL(iommu_map)) {
+ iommu_map->flags = iommu_flags;
+ ret = 0;
+ } else {
+ ret = PTR_ERR(iommu_map);
+ goto out;
+ }
+ } else {
+ if (iommu_map->flags != iommu_flags) {
+ pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
+ __func__, handle,
+ iommu_map->flags, iommu_flags);
+ ret = -EINVAL;
+ goto out;
+ } else if (iommu_map->mapped_size != iova_length) {
+ pr_err("%s: handle %p is already mapped with length %x, trying to map with length %lx\n",
+ __func__, handle, iommu_map->mapped_size,
+ iova_length);
+ ret = -EINVAL;
+ goto out;
+ } else {
+ kref_get(&iommu_map->ref);
+ *iova = iommu_map->iova_addr;
+ }
+ }
+ *buffer_size = size;
+ return ret;
+
+out:
+
+ ion_iommu_meta_put(iommu_meta);
+ return ret;
+}
+EXPORT_SYMBOL(ion_map_iommu);
+
+
+static void ion_iommu_map_release(struct kref *kref)
+{
+ struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
+ ref);
+ struct ion_iommu_meta *meta = map->meta;
+
+ rb_erase(&map->node, &meta->iommu_maps);
+ ion_iommu_heap_unmap_iommu(map);
+ kfree(map);
+}
+
+void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
+ int domain_num, int partition_num)
+{
+ struct ion_iommu_map *iommu_map;
+ struct ion_iommu_meta *meta;
+
+ if (IS_ERR_OR_NULL(client)) {
+ pr_err("%s: client pointer is invalid\n", __func__);
+ return;
+ }
+ if (IS_ERR_OR_NULL(handle)) {
+ pr_err("%s: handle pointer is invalid\n", __func__);
+ return;
+ }
+
+
+ mutex_lock(&msm_iommu_map_mutex);
+ meta = ion_iommu_meta_lookup(handle);
+ if (!meta) {
+ WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
+ domain_num, partition_num, handle);
+ mutex_lock(&msm_iommu_map_mutex);
+ goto out;
+
+ }
+ mutex_unlock(&msm_iommu_map_mutex);
+
+ mutex_lock(&meta->lock);
+ iommu_map = ion_iommu_lookup(meta, domain_num, partition_num);
+
+ if (!iommu_map) {
+ WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
+ domain_num, partition_num, handle);
+ mutex_unlock(&meta->lock);
+ goto out;
+ }
+
+ kref_put(&iommu_map->ref, ion_iommu_map_release);
+ mutex_unlock(&meta->lock);
+
+ ion_iommu_meta_put(meta);
+
+out:
+ return;
+}
+EXPORT_SYMBOL(ion_unmap_iommu);
+
+
diff --git a/drivers/gpu/ion/msm_ion_priv.h b/drivers/gpu/ion/msm_ion_priv.h
index 2729ce2..2de4e8a 100644
--- a/drivers/gpu/ion/msm_ion_priv.h
+++ b/drivers/gpu/ion/msm_ion_priv.h
@@ -26,42 +26,6 @@
#include <linux/iommu.h>
#include <linux/seq_file.h>
-enum {
- DI_PARTITION_NUM = 0,
- DI_DOMAIN_NUM = 1,
- DI_MAX,
-};
-
-/**
- * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
- * @iova_addr - iommu virtual address
- * @node - rb node to exist in the buffer's tree of iommu mappings
- * @domain_info - contains the partition number and domain number
- * domain_info[1] = domain number
- * domain_info[0] = partition number
- * @ref - for reference counting this mapping
- * @mapped_size - size of the iova space mapped
- * (may not be the same as the buffer size)
- * @flags - iommu domain/partition specific flags.
- *
- * Represents a mapping of one ion buffer to a particular iommu domain
- * and address range. There may exist other mappings of this buffer in
- * different domains or address ranges. All mappings will have the same
- * cacheability and security.
- */
-struct ion_iommu_map {
- unsigned long iova_addr;
- struct rb_node node;
- union {
- int domain_info[DI_MAX];
- uint64_t key;
- };
- struct ion_buffer *buffer;
- struct kref ref;
- int mapped_size;
- unsigned long flags;
-};
-
/**
* struct mem_map_data - represents information about the memory map for a heap
* @node: rb node used to store in the tree of mem_map_data
@@ -79,9 +43,6 @@
const char *client_name;
};
-#define iommu_map_domain(__m) ((__m)->domain_info[1])
-#define iommu_map_partition(__m) ((__m)->domain_info[0])
-
struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
void ion_iommu_heap_destroy(struct ion_heap *);