gpu: ion: Map everything into IOMMU with 64K pages.
Due to performance issues with 4K pages and the IOMMU we should use
64K pages. However, minimum allocation from ION is 4K so instead of
mapping on demand we need to map the full heap into the IOMMU when
the first request for mapping comes. Only unmap everything from the
IOMMU when the last buffer is freed.
CRs-fixed: 348606
Change-Id: Ic1793f5caaff2f69bf1fb7e8c8b3bd03378131b8
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c
index 4afb9bd..34c16d1 100644
--- a/arch/arm/mach-msm/iommu_domains.c
+++ b/arch/arm/mach-msm/iommu_domains.c
@@ -22,8 +22,8 @@
#include <mach/iommu_domains.h>
#include <mach/socinfo.h>
-/* dummy 4k for overmapping */
-char iommu_dummy[2*PAGE_SIZE-4];
+/* dummy 64K for overmapping */
+char iommu_dummy[2*SZ_64K-4];
struct msm_iommu_domain_state {
struct msm_iommu_domain *domains;
@@ -35,34 +35,50 @@
int msm_iommu_map_extra(struct iommu_domain *domain,
unsigned long start_iova,
unsigned long size,
+ unsigned long page_size,
int cached)
{
- int i, ret = 0;
- struct scatterlist *sglist;
- unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
- struct page *dummy_page = phys_to_page(
- PFN_ALIGN(virt_to_phys(iommu_dummy)));
+ int i, ret_value = 0;
+ unsigned long order = get_order(page_size);
+ unsigned long aligned_size = ALIGN(size, page_size);
+ unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
+ unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
+ unsigned long temp_iova = start_iova;
- sglist = vmalloc(sizeof(*sglist) * nrpages);
- if (!sglist) {
- ret = -ENOMEM;
- goto err1;
+ for (i = 0; i < nrpages; i++) {
+ int ret = iommu_map(domain, temp_iova, phy_addr, order, cached);
+ if (ret) {
+ pr_err("%s: could not map %lx in domain %p, error: %d\n",
+ __func__, start_iova, domain, ret);
+ ret_value = -EAGAIN;
+ goto out;
+ }
+ temp_iova += page_size;
}
-
- sg_init_table(sglist, nrpages);
-
- for (i = 0; i < nrpages; i++)
- sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
-
- ret = iommu_map_range(domain, start_iova, sglist, size, cached);
- if (ret) {
- pr_err("%s: could not map extra %lx in domain %p\n",
- __func__, start_iova, domain);
+ return ret_value;
+out:
+ for (; i > 0; --i) {
+ temp_iova -= page_size;
+ iommu_unmap(domain, start_iova, order);
}
+ return ret_value;
+}
- vfree(sglist);
-err1:
- return ret;
+void msm_iommu_unmap_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ unsigned long size,
+ unsigned long page_size)
+{
+ int i;
+ unsigned long order = get_order(page_size);
+ unsigned long aligned_size = ALIGN(size, page_size);
+ unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
+ unsigned long temp_iova = start_iova;
+
+ for (i = 0; i < nrpages; ++i) {
+ iommu_unmap(domain, temp_iova, order);
+ temp_iova += page_size;
+ }
}
static int msm_iommu_map_iova_phys(struct iommu_domain *domain,