gpu: ion: Map everything into IOMMU with 64K pages.
Due to performance issues with 4K pages and the IOMMU we should use
64K pages. However, minimum allocation from ION is 4K so instead of
mapping on demand we need to map the full heap into the IOMMU when
the first request for mapping comes. Only unmap everything from the
IOMMU when the last buffer is freed.
CRs-fixed: 348606
Change-Id: Ic1793f5caaff2f69bf1fb7e8c8b3bd03378131b8
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index c26b8b9..c103fa8 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -84,6 +84,7 @@
#include <mach/msm_rtb.h>
#include <mach/msm_cache_dump.h>
#include <mach/scm.h>
+#include <mach/iommu_domains.h>
#include <linux/fmem.h>
@@ -146,7 +147,7 @@
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
#define MSM_PMEM_KERNEL_EBI1_SIZE 0x65000
#ifdef CONFIG_MSM_IOMMU
-#define MSM_ION_MM_SIZE 0x3800000
+#define MSM_ION_MM_SIZE 0x3800000 /* Need to be multiple of 64K */
#define MSM_ION_SF_SIZE 0x0
#define MSM_ION_QSECOM_SIZE 0x780000 /* (7.5MB) */
#define MSM_ION_HEAP_NUM 7
@@ -349,10 +350,12 @@
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
static struct ion_cp_heap_pdata cp_mm_msm8960_ion_pdata = {
.permission_type = IPT_TYPE_MM_CARVEOUT,
- .align = PAGE_SIZE,
+ .align = SZ_64K,
.reusable = FMEM_ENABLED,
.mem_is_fmem = FMEM_ENABLED,
.fixed_position = FIXED_MIDDLE,
+ .iommu_map_all = 1,
+ .iommu_2x_map_domain = VIDEO_DOMAIN,
};
static struct ion_cp_heap_pdata cp_mfc_msm8960_ion_pdata = {
@@ -545,6 +548,7 @@
msm8960_fmem_pdata.size = 0;
msm8960_fmem_pdata.reserved_size_low = 0;
msm8960_fmem_pdata.reserved_size_high = 0;
+ msm8960_fmem_pdata.align = PAGE_SIZE;
fixed_low_size = 0;
fixed_middle_size = 0;
fixed_high_size = 0;
@@ -571,8 +575,11 @@
}
for (i = 0; i < msm8960_ion_pdata.nr; ++i) {
- const struct ion_platform_heap *heap =
+ struct ion_platform_heap *heap =
&(msm8960_ion_pdata.heaps[i]);
+ int align = SZ_4K;
+ int iommu_map_all = 0;
+ int adjacent_mem_id = INVALID_HEAP_ID;
if (heap->extra_data) {
int fixed_position = NOT_FIXED;
@@ -584,17 +591,35 @@
heap->extra_data)->mem_is_fmem;
fixed_position = ((struct ion_cp_heap_pdata *)
heap->extra_data)->fixed_position;
+ align = ((struct ion_cp_heap_pdata *)
+ heap->extra_data)->align;
+ iommu_map_all =
+ ((struct ion_cp_heap_pdata *)
+ heap->extra_data)->iommu_map_all;
break;
case ION_HEAP_TYPE_CARVEOUT:
mem_is_fmem = ((struct ion_co_heap_pdata *)
heap->extra_data)->mem_is_fmem;
fixed_position = ((struct ion_co_heap_pdata *)
heap->extra_data)->fixed_position;
+ adjacent_mem_id = ((struct ion_co_heap_pdata *)
+ heap->extra_data)->adjacent_mem_id;
break;
default:
break;
}
+ if (iommu_map_all) {
+ if (heap->size & (SZ_64K-1)) {
+ heap->size = ALIGN(heap->size, SZ_64K);
+ pr_info("Heap %s not aligned to 64K. Adjusting size to %x\n",
+ heap->name, heap->size);
+ }
+ }
+
+ if (mem_is_fmem && adjacent_mem_id != INVALID_HEAP_ID)
+ msm8960_fmem_pdata.align = align;
+
if (fixed_position != NOT_FIXED)
fixed_size += heap->size;
else
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 477b17d..13866e7 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -85,6 +85,7 @@
#include <mach/rpm-regulator.h>
#include <mach/restart.h>
#include <mach/board-msm8660.h>
+#include <mach/iommu_domains.h>
#include "devices.h"
#include "devices-msm8x60.h"
@@ -2646,7 +2647,7 @@
#define MSM_ION_SF_SIZE 0x4000000 /* 64MB */
#define MSM_ION_CAMERA_SIZE MSM_PMEM_ADSP_SIZE
#define MSM_ION_MM_FW_SIZE 0x200000 /* (2MB) */
-#define MSM_ION_MM_SIZE 0x3600000 /* (54MB) */
+#define MSM_ION_MM_SIZE 0x3600000 /* (54MB) Must be a multiple of 64K */
#define MSM_ION_MFC_SIZE SZ_8K
#ifdef CONFIG_FB_MSM_OVERLAY1_WRITEBACK
#define MSM_ION_WB_SIZE 0xC00000 /* 12MB */
@@ -5262,10 +5263,12 @@
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
static struct ion_cp_heap_pdata cp_mm_ion_pdata = {
.permission_type = IPT_TYPE_MM_CARVEOUT,
- .align = PAGE_SIZE,
+ .align = SZ_64K,
.request_region = request_smi_region,
.release_region = release_smi_region,
.setup_region = setup_smi_region,
+ .iommu_map_all = 1,
+ .iommu_2x_map_domain = VIDEO_DOMAIN,
};
static struct ion_cp_heap_pdata cp_mfc_ion_pdata = {
@@ -5432,6 +5435,23 @@
}
}
+ /* Verify size of heap is a multiple of 64K */
+ for (i = 0; i < ion_pdata.nr; i++) {
+ struct ion_platform_heap *heap = &(ion_pdata.heaps[i]);
+
+ if (heap->extra_data && heap->type == ION_HEAP_TYPE_CP) {
+ int map_all = ((struct ion_cp_heap_pdata *)
+ heap->extra_data)->iommu_map_all;
+
+ if (map_all && (heap->size & (SZ_64K-1))) {
+ heap->size = ALIGN(heap->size, SZ_64K);
+ pr_err("Heap %s size is not a multiple of 64K. Adjusting size to %x\n",
+ heap->name, heap->size);
+
+ }
+ }
+ }
+
msm8x60_reserve_table[MEMTYPE_EBI1].size += msm_ion_sf_size;
msm8x60_reserve_table[MEMTYPE_SMI].size += MSM_ION_MM_FW_SIZE;
msm8x60_reserve_table[MEMTYPE_SMI].size += MSM_ION_MM_SIZE;
diff --git a/arch/arm/mach-msm/include/mach/iommu_domains.h b/arch/arm/mach-msm/include/mach/iommu_domains.h
index e232d00..dfb100c 100644
--- a/arch/arm/mach-msm/include/mach/iommu_domains.h
+++ b/arch/arm/mach-msm/include/mach/iommu_domains.h
@@ -75,8 +75,14 @@
extern int msm_iommu_map_extra(struct iommu_domain *domain,
unsigned long start_iova,
unsigned long size,
+ unsigned long page_size,
int cached);
+extern void msm_iommu_unmap_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ unsigned long size,
+ unsigned long page_size);
+
extern int msm_iommu_map_contig_buffer(unsigned long phys,
unsigned int domain_no,
unsigned int partition_no,
@@ -115,12 +121,18 @@
static inline int msm_iommu_map_extra(struct iommu_domain *domain,
unsigned long start_iova,
unsigned long size,
+ unsigned long page_size,
int cached)
{
return -ENODEV;
-
}
+static inline void msm_iommu_unmap_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ unsigned long size,
+ unsigned long page_size)
+{
+}
static inline int msm_iommu_map_contig_buffer(unsigned long phys,
unsigned int domain_no,
@@ -141,7 +153,6 @@
{
return;
}
-
#endif
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_memtypes.h b/arch/arm/mach-msm/include/mach/msm_memtypes.h
index 6f9bed1..7afb38d 100644
--- a/arch/arm/mach-msm/include/mach/msm_memtypes.h
+++ b/arch/arm/mach-msm/include/mach/msm_memtypes.h
@@ -66,5 +66,5 @@
extern struct reserve_info *reserve_info;
-unsigned long __init reserve_memory_for_fmem(unsigned long);
+unsigned long __init reserve_memory_for_fmem(unsigned long, unsigned long);
#endif
diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c
index 4afb9bd..34c16d1 100644
--- a/arch/arm/mach-msm/iommu_domains.c
+++ b/arch/arm/mach-msm/iommu_domains.c
@@ -22,8 +22,8 @@
#include <mach/iommu_domains.h>
#include <mach/socinfo.h>
-/* dummy 4k for overmapping */
-char iommu_dummy[2*PAGE_SIZE-4];
+/* dummy 64K for overmapping */
+char iommu_dummy[2*SZ_64K-4];
struct msm_iommu_domain_state {
struct msm_iommu_domain *domains;
@@ -35,34 +35,50 @@
int msm_iommu_map_extra(struct iommu_domain *domain,
unsigned long start_iova,
unsigned long size,
+ unsigned long page_size,
int cached)
{
- int i, ret = 0;
- struct scatterlist *sglist;
- unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
- struct page *dummy_page = phys_to_page(
- PFN_ALIGN(virt_to_phys(iommu_dummy)));
+ int i, ret_value = 0;
+ unsigned long order = get_order(page_size);
+ unsigned long aligned_size = ALIGN(size, page_size);
+ unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
+ unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
+ unsigned long temp_iova = start_iova;
- sglist = vmalloc(sizeof(*sglist) * nrpages);
- if (!sglist) {
- ret = -ENOMEM;
- goto err1;
+ for (i = 0; i < nrpages; i++) {
+ int ret = iommu_map(domain, temp_iova, phy_addr, order, cached);
+ if (ret) {
+ pr_err("%s: could not map %lx in domain %p, error: %d\n",
+ __func__, start_iova, domain, ret);
+ ret_value = -EAGAIN;
+ goto out;
+ }
+ temp_iova += page_size;
}
-
- sg_init_table(sglist, nrpages);
-
- for (i = 0; i < nrpages; i++)
- sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
-
- ret = iommu_map_range(domain, start_iova, sglist, size, cached);
- if (ret) {
- pr_err("%s: could not map extra %lx in domain %p\n",
- __func__, start_iova, domain);
+ return ret_value;
+out:
+ for (; i > 0; --i) {
+ temp_iova -= page_size;
+ iommu_unmap(domain, start_iova, order);
}
+ return ret_value;
+}
- vfree(sglist);
-err1:
- return ret;
+void msm_iommu_unmap_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ unsigned long size,
+ unsigned long page_size)
+{
+ int i;
+ unsigned long order = get_order(page_size);
+ unsigned long aligned_size = ALIGN(size, page_size);
+ unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
+ unsigned long temp_iova = start_iova;
+
+ for (i = 0; i < nrpages; ++i) {
+ iommu_unmap(domain, temp_iova, order);
+ temp_iova += page_size;
+ }
}
static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
diff --git a/arch/arm/mach-msm/subsystem_map.c b/arch/arm/mach-msm/subsystem_map.c
index 96c3d49..4a1285b 100644
--- a/arch/arm/mach-msm/subsystem_map.c
+++ b/arch/arm/mach-msm/subsystem_map.c
@@ -407,7 +407,7 @@
if (flags & MSM_SUBSYSTEM_MAP_IOMMU_2X)
msm_iommu_map_extra
- (d, temp_va, length,
+ (d, temp_va, length, SZ_4K,
(IOMMU_READ | IOMMU_WRITE));
}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index c554e56..ca2380b 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -321,7 +321,8 @@
if (extra) {
unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, prot);
+ ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
+ SZ_4K, prot);
if (ret)
goto out2;
}
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index 4c10c64..d889c4d 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -60,8 +60,12 @@
* kernel space (un-cached).
* @umap_count: the total number of times this heap has been mapped in
* user space.
+ * @iommu_iova: saved iova when mapping full heap at once.
+ * @iommu_partition: partition used to map full heap.
* @reusable: indicates if the memory should be reused via fmem.
* @reserved_vrange: reserved virtual address range for use with fmem
+ * @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
+ * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
*/
struct ion_cp_heap {
struct ion_heap heap;
@@ -80,8 +84,13 @@
unsigned long kmap_cached_count;
unsigned long kmap_uncached_count;
unsigned long umap_count;
+ unsigned long iommu_iova[MAX_DOMAINS];
+ unsigned long iommu_partition[MAX_DOMAINS];
int reusable;
void *reserved_vrange;
+ int iommu_map_all;
+ int iommu_2x_map_domain;
+
};
enum {
@@ -247,6 +256,30 @@
return offset;
}
+static void iommu_unmap_all(unsigned long domain_num,
+ struct ion_cp_heap *cp_heap)
+{
+ unsigned long left_to_unmap = cp_heap->total_size;
+ unsigned long order = get_order(SZ_64K);
+ unsigned long page_size = SZ_64K;
+
+ struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
+ if (domain) {
+ unsigned long temp_iova = cp_heap->iommu_iova[domain_num];
+
+ while (left_to_unmap) {
+ iommu_unmap(domain, temp_iova, order);
+ temp_iova += page_size;
+ left_to_unmap -= page_size;
+ }
+ if (domain_num == cp_heap->iommu_2x_map_domain)
+ msm_iommu_unmap_extra(domain, temp_iova,
+ cp_heap->total_size, SZ_64K);
+ } else {
+ pr_err("Unable to get IOMMU domain %lu\n", domain_num);
+ }
+}
+
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
unsigned long size)
{
@@ -265,6 +298,26 @@
pr_err("%s: unable to transition heap to T-state\n",
__func__);
}
+
+ /* Unmap everything if we previously mapped the whole heap at once. */
+ if (!cp_heap->allocated_bytes) {
+ unsigned int i;
+ for (i = 0; i < MAX_DOMAINS; ++i) {
+ if (cp_heap->iommu_iova[i]) {
+ unsigned long vaddr_len = cp_heap->total_size;
+
+ if (i == cp_heap->iommu_2x_map_domain)
+ vaddr_len <<= 1;
+ iommu_unmap_all(i, cp_heap);
+
+ msm_free_iova_address(cp_heap->iommu_iova[i], i,
+ cp_heap->iommu_partition[i],
+ vaddr_len);
+ }
+ cp_heap->iommu_iova[i] = 0;
+ cp_heap->iommu_partition[i] = 0;
+ }
+ }
mutex_unlock(&cp_heap->lock);
}
@@ -566,6 +619,75 @@
return ret_value;
}
+static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
+ int partition, unsigned long prot, unsigned long align)
+{
+ unsigned long left_to_map = cp_heap->total_size;
+ unsigned long order = get_order(SZ_64K);
+ unsigned long page_size = SZ_64K;
+ int ret_value = 0;
+ unsigned long virt_addr_len = cp_heap->total_size;
+ struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
+
+ /* If we are mapping into the video domain we need to map twice the
+ * size of the heap to account for prefetch issue in video core.
+ */
+ if (domain_num == cp_heap->iommu_2x_map_domain)
+ virt_addr_len <<= 1;
+
+ if (cp_heap->total_size & (SZ_64K-1)) {
+ pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
+ ret_value = -EINVAL;
+ }
+ if (cp_heap->base & (SZ_64K-1)) {
+ pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
+ ret_value = -EINVAL;
+ }
+ if (!ret_value && domain) {
+ unsigned long temp_phys = cp_heap->base;
+ unsigned long temp_iova =
+ msm_allocate_iova_address(domain_num, partition,
+ virt_addr_len, SZ_64K);
+ if (!temp_iova) {
+ pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
+ __func__, domain_num, partition);
+ ret_value = -ENOMEM;
+ goto out;
+ }
+ cp_heap->iommu_iova[domain_num] = temp_iova;
+
+ while (left_to_map) {
+ int ret = iommu_map(domain, temp_iova, temp_phys,
+ order, prot);
+ if (ret) {
+ pr_err("%s: could not map %lx in domain %p, error: %d\n",
+ __func__, temp_iova, domain, ret);
+ ret_value = -EAGAIN;
+ goto free_iova;
+ }
+ temp_iova += page_size;
+ temp_phys += page_size;
+ left_to_map -= page_size;
+ }
+ if (domain_num == cp_heap->iommu_2x_map_domain)
+ ret_value = msm_iommu_map_extra(domain, temp_iova,
+ cp_heap->total_size,
+ SZ_64K, prot);
+ if (ret_value)
+ goto free_iova;
+ } else {
+ pr_err("Unable to get IOMMU domain %lu\n", domain_num);
+ ret_value = -ENOMEM;
+ }
+ goto out;
+
+free_iova:
+ msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
+ partition, virt_addr_len);
+out:
+ return ret_value;
+}
+
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
struct ion_iommu_map *data,
unsigned int domain_num,
@@ -578,6 +700,8 @@
int ret = 0;
unsigned long extra;
struct scatterlist *sglist = 0;
+ struct ion_cp_heap *cp_heap =
+ container_of(buffer->heap, struct ion_cp_heap, heap);
int prot = IOMMU_WRITE | IOMMU_READ;
prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
@@ -588,6 +712,33 @@
return 0;
}
+ if (cp_heap->iommu_iova[domain_num]) {
+ /* Already mapped. */
+ unsigned long offset = buffer->priv_phys - cp_heap->base;
+ data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
+ return 0;
+ } else if (cp_heap->iommu_map_all) {
+ ret = iommu_map_all(domain_num, cp_heap, partition_num,
+ align, prot);
+ if (!ret) {
+ unsigned long offset =
+ buffer->priv_phys - cp_heap->base;
+ data->iova_addr =
+ cp_heap->iommu_iova[domain_num] + offset;
+ cp_heap->iommu_partition[domain_num] = partition_num;
+ /*
+ clear delayed map flag so that we don't interfere
+ with this feature (we are already delaying).
+ */
+ data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
+ return 0;
+ } else {
+ cp_heap->iommu_iova[domain_num] = 0;
+ cp_heap->iommu_partition[domain_num] = 0;
+ return ret;
+ }
+ }
+
extra = iova_length - buffer->size;
data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
@@ -620,7 +771,8 @@
if (extra) {
unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, prot);
+ ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
+ SZ_4K, prot);
if (ret)
goto out2;
}
@@ -643,11 +795,20 @@
unsigned int domain_num;
unsigned int partition_num;
struct iommu_domain *domain;
+ struct ion_cp_heap *cp_heap =
+ container_of(data->buffer->heap, struct ion_cp_heap, heap);
if (!msm_use_iommu())
return;
+
domain_num = iommu_map_domain(data);
+
+ /* If we are mapping everything we'll wait to unmap until everything
+ is freed. */
+ if (cp_heap->iommu_iova[domain_num])
+ return;
+
partition_num = iommu_map_partition(data);
domain = msm_get_iommu_domain(domain_num);
@@ -728,7 +889,13 @@
cp_heap->request_region = extra_data->request_region;
if (extra_data->release_region)
cp_heap->release_region = extra_data->release_region;
+ cp_heap->iommu_map_all =
+ extra_data->iommu_map_all;
+ cp_heap->iommu_2x_map_domain =
+ extra_data->iommu_2x_map_domain;
+
}
+
return &cp_heap->heap;
destroy_pool:
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 6951603..312ca42 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -215,7 +215,8 @@
if (extra) {
unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, prot);
+ ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
+ prot);
if (ret)
goto out2;
}
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index 7e859ce..ed9ae27 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -282,7 +282,8 @@
extra_iova_addr = data->iova_addr + buffer->size;
if (extra) {
- ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, prot);
+ ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
+ prot);
if (ret)
goto out2;
}
@@ -490,7 +491,8 @@
if (extra) {
unsigned long extra_iova_addr = data->iova_addr + buffer->size;
- ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, prot);
+ ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
+ prot);
if (ret)
goto out2;
}
diff --git a/drivers/staging/qcache/fmem.c b/drivers/staging/qcache/fmem.c
index 7a95880..1f78799 100644
--- a/drivers/staging/qcache/fmem.c
+++ b/drivers/staging/qcache/fmem.c
@@ -62,7 +62,7 @@
if (!pdata->phys)
pdata->phys = allocate_contiguous_ebi_nomap(pdata->size,
- PAGE_SIZE);
+ pdata->align);
#ifdef CONFIG_MEMORY_HOTPLUG
fmem_section_start = pdata->phys >> PA_SECTION_SHIFT;
diff --git a/include/linux/fmem.h b/include/linux/fmem.h
index 44b7005..e4fa82c 100644
--- a/include/linux/fmem.h
+++ b/include/linux/fmem.h
@@ -22,6 +22,7 @@
unsigned long size;
unsigned long reserved_size_low;
unsigned long reserved_size_high;
+ unsigned long align;
};
struct fmem_data {
diff --git a/include/linux/ion.h b/include/linux/ion.h
index ae49bce..b5495a0 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -177,6 +177,8 @@
* or not.
* @fixed_position If nonzero, position in the fixed area.
* @virt_addr: Virtual address used when using fmem.
+ * @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
+ * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
* @request_region: function to be called when the number of allocations
* goes from 0 -> 1
* @release_region: function to be called when the number of allocations
@@ -192,6 +194,8 @@
int reusable;
int mem_is_fmem;
enum ion_fixed_position fixed_position;
+ int iommu_map_all;
+ int iommu_2x_map_domain;
ion_virt_addr_t *virt_addr;
int (*request_region)(void *);
int (*release_region)(void *);