gpu: ion: Move MFC heap to different address
Video hardware has the following requirements for
ION heaps:
1. MM heap must be at a higher address than FW heap.
2. MFC heap must at a higher address than FW
3. MM heap must be adjacent to FW heap.
(There cannot be another heap between FW and MM heap)
4. MM and MFC heap cannot be more than 256MB away
from the base address of the FW heap.
MM heap is configured as a reusable heap (FMEM heap) and FMEM
is carved out at a much higher address than the other heaps breaking
the above requirements. To support the above requirements the
MFC heap together with the FW heap must be carved out at the same
location as MM heap.
Change-Id: Ie0acb4b267d4307190ea3cd9ff23c710ffa1a538
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index b207db9..d380566 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -339,28 +339,34 @@
return MEMTYPE_EBI1;
}
+#define FMEM_ENABLED 1
+
#ifdef CONFIG_ION_MSM
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
static struct ion_cp_heap_pdata cp_mm_ion_pdata = {
.permission_type = IPT_TYPE_MM_CARVEOUT,
.align = PAGE_SIZE,
- .reusable = 1,
+ .reusable = FMEM_ENABLED,
+ .mem_is_fmem = FMEM_ENABLED,
};
static struct ion_cp_heap_pdata cp_mfc_ion_pdata = {
.permission_type = IPT_TYPE_MFC_SHAREDMEM,
.align = PAGE_SIZE,
.reusable = 0,
+ .mem_is_fmem = FMEM_ENABLED,
};
static struct ion_co_heap_pdata co_ion_pdata = {
.adjacent_mem_id = INVALID_HEAP_ID,
.align = PAGE_SIZE,
+ .mem_is_fmem = 0,
};
static struct ion_co_heap_pdata fw_co_ion_pdata = {
.adjacent_mem_id = ION_CP_MM_HEAP_ID,
.align = SZ_128K,
+ .mem_is_fmem = FMEM_ENABLED,
};
#endif
@@ -477,23 +483,17 @@
msm8960_reserve_table[mem_type].size += size;
}
-static __init const struct ion_platform_heap *find_ion_heap(int heap_id)
-{
- unsigned int i;
- for (i = 0; i < ion_pdata.nr; ++i) {
- const struct ion_platform_heap *heap = &(ion_pdata.heaps[i]);
- if (heap->id == heap_id)
- return (const struct ion_platform_heap *) heap;
- }
- return 0;
-}
-
/**
* Reserve memory for ION and calculate amount of reusable memory for fmem.
* We only reserve memory for heaps that are not reusable. However, we only
* support one reusable heap at the moment so we ignore the reusable flag for
* other than the first heap with reusable flag set. Also handle special case
- * for adjacent heap when the adjacent heap is adjacent to a reusable heap.
+ * for video heaps (MM,FW, and MFC). Video requires heaps MM and MFC to be
+ * at a higher address than FW in addition to not more than 256MB away from the
+ * base address of the firmware. This means that if MM is reusable the other
+ * two heaps must be allocated in the same region as FW. This is handled by the
+ * mem_is_fmem flag in the platform data. In addition the MM heap must be
+ * adjacent to the FW heap for content protection purposes.
*/
static void __init reserve_ion_memory(void)
{
@@ -503,7 +503,8 @@
adjust_mem_for_liquid();
fmem_pdata.size = 0;
- fmem_pdata.reserved_size = 0;
+ fmem_pdata.reserved_size_low = 0;
+ fmem_pdata.reserved_size_high = 0;
/* We only support 1 reusable heap. Check if more than one heap
* is specified as reusable and set as non-reusable if found.
@@ -528,7 +529,7 @@
for (i = 0; i < ion_pdata.nr; ++i) {
int reusable = 0;
int adjacent_heap_id = INVALID_HEAP_ID;
- int adj_reusable = 0;
+ int mem_is_fmem = 0;
const struct ion_platform_heap *heap = &(ion_pdata.heaps[i]);
if (heap->extra_data) {
@@ -536,31 +537,30 @@
case ION_HEAP_TYPE_CP:
reusable = ((struct ion_cp_heap_pdata *)
heap->extra_data)->reusable;
+ mem_is_fmem = ((struct ion_cp_heap_pdata *)
+ heap->extra_data)->mem_is_fmem;
break;
case ION_HEAP_TYPE_CARVEOUT:
adjacent_heap_id = ((struct ion_co_heap_pdata *)
heap->extra_data)->adjacent_mem_id;
+ mem_is_fmem = ((struct ion_co_heap_pdata *)
+ heap->extra_data)->mem_is_fmem;
break;
default:
break;
}
}
- if (adjacent_heap_id != INVALID_HEAP_ID) {
- const struct ion_platform_heap *adj_heap =
- find_ion_heap(adjacent_heap_id);
- if (adj_heap) {
- adj_reusable = ((struct ion_cp_heap_pdata *)
- adj_heap->extra_data)->reusable;
- if (adj_reusable)
- fmem_pdata.reserved_size += heap->size;
- }
+ if (mem_is_fmem && !reusable) {
+ if (adjacent_heap_id != INVALID_HEAP_ID)
+ fmem_pdata.reserved_size_low += heap->size;
+ else
+ fmem_pdata.reserved_size_high += heap->size;
}
-
- if (!reusable && !adj_reusable)
- reserve_mem_for_ion(MEMTYPE_EBI1, heap->size);
- else
+ if (mem_is_fmem)
fmem_pdata.size += heap->size;
+ else
+ reserve_mem_for_ion(MEMTYPE_EBI1, heap->size);
}
#endif
}