Initial Contribution
msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142
Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 0074b8d..e862f63 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -588,6 +588,9 @@
config CPU_TLB_V7
bool
+config EMULATE_DOMAIN_MANAGER_V7
+ bool
+
config VERIFY_PERMISSION_FAULT
bool
endif
@@ -738,6 +741,19 @@
If your SoC is configured to have a different size, define the value
here with proper conditions.
+config CPU_CACHE_ERR_REPORT
+ bool "Report errors in the L1 and L2 caches"
+ depends on ARCH_MSM_SCORPION
+ default n
+ help
+ The Scorpion processor supports reporting L2 errors, L1 icache parity
+ errors, and L1 dcache parity errors as imprecise external aborts. If
+ this option is not enabled these errors will go unreported and data
+ corruption will occur.
+
+ Say Y here to have errors in the L1 and L2 caches reported as
+ imprecise data aborts.
+
config CPU_DCACHE_WRITETHROUGH
bool "Force write through D-cache"
depends on (CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_FA526) && !CPU_DCACHE_DISABLE
@@ -821,7 +837,7 @@
depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
REALVIEW_EB_A9MP || SOC_IMX35 || SOC_IMX31 || MACH_REALVIEW_PBX || \
ARCH_NOMADIK || ARCH_OMAP4 || ARCH_EXYNOS4 || ARCH_TEGRA || \
- ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE
+ ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE || ARCH_MSM7X27
default y
select OUTER_CACHE
select OUTER_CACHE_SYNC
@@ -889,3 +905,30 @@
help
This option allows the use of custom mandatory barriers
included via the mach/barriers.h file.
+
+config VCM_MM
+ bool
+
+config VCM
+ bool "Virtual Contiguous Memory (VCM) Layer"
+ depends on MMU
+ select GENERIC_ALLOCATOR
+ select VCM_MM
+ default n
+ help
+ Virtual Contiguous Memory layer. This is the layer that is intended to
+ replace PMEM.
+
+ If you don't know what this is, say N here.
+
+config STRICT_MEMORY_RWX
+ bool "restrict kernel memory permissions as much as possible"
+ default n
+ help
+ If this is set, kernel text will be made RX, kernel data and stack
+ RW, rodata R (otherwise all of the kernel 1-to-1 mapping is
+ made RWX).
+ The tradeoff is that several sections are padded to
+ 1M boundaries (because their permissions are different and
+ splitting the 1M pages into 4K ones causes TLB performance
+ problems), wasting memory.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index bca7e61..8046026 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -92,6 +92,7 @@
obj-$(CONFIG_CPU_V6) += proc-v6.o
obj-$(CONFIG_CPU_V6K) += proc-v6.o
obj-$(CONFIG_CPU_V7) += proc-v7.o
+obj-$(CONFIG_EMULATE_DOMAIN_MANAGER_V7) += emulate_domain_manager-v7.o
AFLAGS_proc-v6.o :=-Wa,-march=armv6
AFLAGS_proc-v7.o :=-Wa,-march=armv7-a
@@ -100,3 +101,5 @@
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o
obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o
+obj-$(CONFIG_VCM) += vcm.o vcm_alloc.o
+obj-$(CONFIG_VCM_MM) += vcm_mm.o
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 1fa6f71..ad953fe 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -168,7 +168,7 @@
* - start - virtual start address
* - end - virtual end address
*/
-fa_dma_inv_range:
+ENTRY(fa_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
@@ -191,7 +191,7 @@
* - start - virtual start address
* - end - virtual end address
*/
-fa_dma_clean_range:
+ENTRY(fa_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -253,5 +253,7 @@
.long fa_flush_kern_dcache_area
.long fa_dma_map_area
.long fa_dma_unmap_area
+ .long fa_dma_inv_range
+ .long fa_dma_clean_range
.long fa_dma_flush_range
.size fa_cache_fns, . - fa_cache_fns
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 44c0867..25a4260 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -2,6 +2,7 @@
* arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
*
* Copyright (C) 2007 ARM Limited
+ * Copyright (c) 2009, 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -26,6 +27,7 @@
#define CACHE_LINE_SIZE 32
static void __iomem *l2x0_base;
+static uint32_t aux_ctrl_save;
static DEFINE_SPINLOCK(l2x0_lock);
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
static uint32_t l2x0_size;
@@ -111,13 +113,9 @@
}
#endif
-static void l2x0_cache_sync(void)
+void l2x0_cache_sync(void)
{
- unsigned long flags;
-
- spin_lock_irqsave(&l2x0_lock, flags);
cache_sync();
- spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void __l2x0_flush_all(void)
@@ -204,6 +202,27 @@
spin_unlock_irqrestore(&l2x0_lock, flags);
}
+static void l2x0_inv_range_atomic(unsigned long start, unsigned long end)
+{
+ unsigned long addr;
+
+ if (start & (CACHE_LINE_SIZE - 1)) {
+ start &= ~(CACHE_LINE_SIZE - 1);
+ writel_relaxed(start, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
+ start += CACHE_LINE_SIZE;
+ }
+
+ if (end & (CACHE_LINE_SIZE - 1)) {
+ end &= ~(CACHE_LINE_SIZE - 1);
+ writel_relaxed(end, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
+ }
+
+ for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
+ writel_relaxed(addr, l2x0_base + L2X0_INV_LINE_PA);
+
+ mb();
+}
+
static void l2x0_clean_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
@@ -234,6 +253,17 @@
spin_unlock_irqrestore(&l2x0_lock, flags);
}
+static void l2x0_clean_range_atomic(unsigned long start, unsigned long end)
+{
+ unsigned long addr;
+
+ start &= ~(CACHE_LINE_SIZE - 1);
+ for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
+ writel_relaxed(addr, l2x0_base + L2X0_CLEAN_LINE_PA);
+
+ mb();
+}
+
static void l2x0_flush_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
@@ -266,6 +296,17 @@
spin_unlock_irqrestore(&l2x0_lock, flags);
}
+void l2x0_flush_range_atomic(unsigned long start, unsigned long end)
+{
+ unsigned long addr;
+
+ start &= ~(CACHE_LINE_SIZE - 1);
+ for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
+ writel_relaxed(addr, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
+
+ mb();
+}
+
static void l2x0_disable(void)
{
unsigned long flags;
@@ -279,15 +320,19 @@
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
{
- __u32 aux;
+ __u32 aux, bits;
__u32 cache_id;
__u32 way_size = 0;
int ways;
const char *type;
l2x0_base = base;
-
cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+
+ bits = readl_relaxed(l2x0_base + L2X0_CTRL);
+ bits &= ~0x01; /* clear bit 0 */
+ writel_relaxed(bits, l2x0_base + L2X0_CTRL);
+
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
@@ -312,7 +357,7 @@
type = "L2x0 series";
break;
}
-
+ writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
l2x0_way_mask = (1 << ways) - 1;
/*
@@ -322,32 +367,76 @@
way_size = 1 << (way_size + 3);
l2x0_size = ways * way_size * SZ_1K;
- /*
- * Check if l2x0 controller is already enabled.
- * If you are booting from non-secure mode
- * accessing the below registers will fault.
- */
- if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+ l2x0_inv_all();
- /* l2x0 controller is disabled */
- writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
+ /* enable L2X0 */
+ bits = readl_relaxed(l2x0_base + L2X0_CTRL);
+ bits |= 0x01; /* set bit 0 */
+ writel_relaxed(bits, l2x0_base + L2X0_CTRL);
- l2x0_inv_all();
-
- /* enable L2X0 */
- writel_relaxed(1, l2x0_base + L2X0_CTRL);
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+ case L2X0_CACHE_ID_PART_L220:
+ outer_cache.inv_range = l2x0_inv_range;
+ outer_cache.clean_range = l2x0_clean_range;
+ outer_cache.flush_range = l2x0_flush_range;
+ printk(KERN_INFO "L220 cache controller enabled\n");
+ break;
+ case L2X0_CACHE_ID_PART_L310:
+ outer_cache.inv_range = l2x0_inv_range;
+ outer_cache.clean_range = l2x0_clean_range;
+ outer_cache.flush_range = l2x0_flush_range;
+ printk(KERN_INFO "L310 cache controller enabled\n");
+ break;
+ case L2X0_CACHE_ID_PART_L210:
+ default:
+ outer_cache.inv_range = l2x0_inv_range_atomic;
+ outer_cache.clean_range = l2x0_clean_range_atomic;
+ outer_cache.flush_range = l2x0_flush_range_atomic;
+ printk(KERN_INFO "L210 cache controller enabled\n");
+ break;
}
- outer_cache.inv_range = l2x0_inv_range;
- outer_cache.clean_range = l2x0_clean_range;
- outer_cache.flush_range = l2x0_flush_range;
outer_cache.sync = l2x0_cache_sync;
+
outer_cache.flush_all = l2x0_flush_all;
outer_cache.inv_all = l2x0_inv_all;
outer_cache.disable = l2x0_disable;
outer_cache.set_debug = l2x0_set_debug;
+ mb();
printk(KERN_INFO "%s cache controller enabled\n", type);
printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
ways, cache_id, aux, l2x0_size);
}
+
+void l2x0_suspend(void)
+{
+ /* Save aux control register value */
+ aux_ctrl_save = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
+ /* Flush all cache */
+ l2x0_flush_all();
+ /* Disable the cache */
+ writel_relaxed(0, l2x0_base + L2X0_CTRL);
+
+ /* Memory barrier */
+ dmb();
+}
+
+void l2x0_resume(int collapsed)
+{
+ if (collapsed) {
+ /* Disable the cache */
+ writel_relaxed(0, l2x0_base + L2X0_CTRL);
+
+ /* Restore aux control register value */
+ writel_relaxed(aux_ctrl_save, l2x0_base + L2X0_AUX_CTRL);
+
+ /* Invalidate the cache */
+ l2x0_inv_all();
+ }
+
+ /* Enable the cache */
+ writel_relaxed(1, l2x0_base + L2X0_CTRL);
+
+ mb();
+}
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index 2e2bc40..64f739e 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -93,6 +93,20 @@
/* FALLTHROUGH */
/*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v3_dma_inv_range)
+ /* FALLTHROUGH */
+
+/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -103,6 +117,17 @@
ENTRY(v3_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
+ /* FALLTHROUGH */
+
+/*
+ * dma_clean_range(start, end)
+ *
+ * Clean (write back) the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v3_dma_clean_range)
mov pc, lr
/*
@@ -113,7 +138,7 @@
*/
ENTRY(v3_dma_unmap_area)
teq r2, #DMA_TO_DEVICE
- bne v3_dma_flush_range
+ bne v3_dma_inv_range
/* FALLTHROUGH */
/*
@@ -140,5 +165,7 @@
.long v3_flush_kern_dcache_area
.long v3_dma_map_area
.long v3_dma_unmap_area
+ .long v3_dma_inv_range
+ .long v3_dma_clean_range
.long v3_dma_flush_range
.size v3_cache_fns, . - v3_cache_fns
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index a8fefb5..7824cf6 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -103,6 +103,20 @@
/* FALLTHROUGH */
/*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4_dma_inv_range)
+ /* FALLTHROUGH */
+
+/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -115,6 +129,17 @@
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
#endif
+ /* FALLTHROUGH */
+
+/*
+ * dma_clean_range(start, end)
+ *
+ * Clean (write back) the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4_dma_clean_range)
mov pc, lr
/*
@@ -125,7 +150,7 @@
*/
ENTRY(v4_dma_unmap_area)
teq r2, #DMA_TO_DEVICE
- bne v4_dma_flush_range
+ bne v4_dma_inv_range
/* FALLTHROUGH */
/*
@@ -152,5 +177,7 @@
.long v4_flush_kern_dcache_area
.long v4_dma_map_area
.long v4_dma_unmap_area
+ .long v4_dma_inv_range
+ .long v4_dma_clean_range
.long v4_dma_flush_range
.size v4_cache_fns, . - v4_cache_fns
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index f40c696..acbdaeb 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -184,7 +184,7 @@
* - start - virtual start address
* - end - virtual end address
*/
-v4wb_dma_inv_range:
+ENTRY(v4wb_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -205,7 +205,7 @@
* - start - virtual start address
* - end - virtual end address
*/
-v4wb_dma_clean_range:
+ENTRY(v4wb_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -264,5 +264,7 @@
.long v4wb_flush_kern_dcache_area
.long v4wb_dma_map_area
.long v4wb_dma_unmap_area
+ .long v4wb_dma_inv_range
+ .long v4wb_dma_clean_range
.long v4wb_dma_flush_range
.size v4wb_cache_fns, . - v4wb_cache_fns
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index a7b276d..5f60392 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -153,12 +153,23 @@
* - start - virtual start address
* - end - virtual end address
*/
-v4wt_dma_inv_range:
+ENTRY(v4wt_dma_inv_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
+ /* FALLTHROUGH */
+
+/*
+ * dma_clean_range(start, end)
+ *
+ * Clean the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v4wt_dma_clean_range)
mov pc, lr
/*
@@ -208,5 +219,7 @@
.long v4wt_flush_kern_dcache_area
.long v4wt_dma_map_area
.long v4wt_dma_unmap_area
+ .long v4wt_dma_inv_range
+ .long v4wt_dma_clean_range
.long v4wt_dma_flush_range
.size v4wt_cache_fns, . - v4wt_cache_fns
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 6b5441d..0a2528d 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -203,7 +203,7 @@
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v6_dma_inv_range:
+ENTRY(v6_dma_inv_range)
#ifdef CONFIG_DMA_CACHE_RWFO
ldrb r2, [r0] @ read for ownership
strb r2, [r0] @ write for ownership
@@ -248,7 +248,7 @@
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v6_dma_clean_range:
+ENTRY(v6_dma_clean_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef CONFIG_DMA_CACHE_RWFO
@@ -358,5 +358,7 @@
.long v6_flush_kern_dcache_area
.long v6_dma_map_area
.long v6_dma_unmap_area
+ .long v6_dma_inv_range
+ .long v6_dma_clean_range
.long v6_dma_flush_range
.size v6_cache_fns, . - v6_cache_fns
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index d32f02b..0db2092 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -242,7 +242,7 @@
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v7_dma_inv_range:
+ENTRY(v7_dma_inv_range)
dcache_line_size r2, r3
sub r3, r2, #1
tst r0, r3
@@ -266,7 +266,7 @@
* - start - virtual start address of region
* - end - virtual end address of region
*/
-v7_dma_clean_range:
+ENTRY(v7_dma_clean_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
@@ -336,5 +336,7 @@
.long v7_flush_kern_dcache_area
.long v7_dma_map_area
.long v7_dma_unmap_area
+ .long v7_dma_inv_range
+ .long v7_dma_clean_range
.long v7_dma_flush_range
.size v7_cache_fns, . - v7_cache_fns
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 82a093c..4fc0e3f 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -429,18 +429,22 @@
void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
enum dma_data_direction dir)
{
+#ifdef CONFIG_OUTER_CACHE
unsigned long paddr;
BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
+#endif
dmac_map_area(kaddr, size, dir);
+#ifdef CONFIG_OUTER_CACHE
paddr = __pa(kaddr);
if (dir == DMA_FROM_DEVICE) {
outer_inv_range(paddr, paddr + size);
} else {
outer_clean_range(paddr, paddr + size);
}
+#endif
/* FIXME: non-speculating: flush on bidirectional mappings? */
}
EXPORT_SYMBOL(___dma_single_cpu_to_dev);
@@ -448,6 +452,7 @@
void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
enum dma_data_direction dir)
{
+#ifdef CONFIG_OUTER_CACHE
BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
/* FIXME: non-speculating: not required */
@@ -456,7 +461,7 @@
unsigned long paddr = __pa(kaddr);
outer_inv_range(paddr, paddr + size);
}
-
+#endif
dmac_unmap_area(kaddr, size, dir);
}
EXPORT_SYMBOL(___dma_single_dev_to_cpu);
diff --git a/arch/arm/mm/emulate_domain_manager-v7.c b/arch/arm/mm/emulate_domain_manager-v7.c
new file mode 100644
index 0000000..3797e21
--- /dev/null
+++ b/arch/arm/mm/emulate_domain_manager-v7.c
@@ -0,0 +1,345 @@
+/*
+ * Basic implementation of a SW emulation of the domain manager feature in
+ * ARM architecture. Assumes single processor ARMv7 chipset.
+ *
+ * Requires hooks to be alerted to any runtime changes of dacr or MMU context.
+ *
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sched.h>
+#include <asm/domain.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <linux/module.h>
+
+#define DOMAIN_MANAGER_BITS (0xAAAAAAAA)
+
+#define DFSR_DOMAIN(dfsr) ((dfsr >> 4) & (16-1))
+
+#define FSR_PERMISSION_FAULT(fsr) ((fsr & 0x40D) == 0x00D)
+#define FSR_PERMISSION_SECT(fsr) ((fsr & 0x40F) == 0x00D)
+
+/* ARMv7 MMU HW Macros. Not conveniently defined elsewhere */
+#define MMU_TTB_ADDRESS(x) ((u32 *)(((u32)(x)) & ~((1 << 14) - 1)))
+#define MMU_PMD_INDEX(addr) (((u32)addr) >> SECTION_SHIFT)
+#define MMU_TABLE_ADDRESS(x) ((u32 *)((x) & ~((1 << 10) - 1)))
+#define MMU_TABLE_INDEX(x) ((((u32)x) >> 12) & (256 - 1))
+
+/* Convenience Macros */
+#define PMD_IS_VALID(x) (PMD_IS_TABLE(x) || PMD_IS_SECTION(x))
+#define PMD_IS_TABLE(x) ((x & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
+#define PMD_IS_SECTION(x) ((x & PMD_TYPE_MASK) == PMD_TYPE_SECT)
+#define PMD_IS_SUPERSECTION(x) \
+ (PMD_IS_SECTION(x) && ((x & PMD_SECT_SUPER) == PMD_SECT_SUPER))
+
+#define PMD_GET_DOMAIN(x) \
+ (PMD_IS_TABLE(x) || \
+ (PMD_IS_SECTION(x) && !PMD_IS_SUPERSECTION(x)) ? \
+ 0 : (x >> 5) & (16-1))
+
+#define PTE_IS_LARGE(x) ((x & PTE_TYPE_MASK) == PTE_TYPE_LARGE)
+
+
+/* Only DOMAIN_MMU_ENTRIES will be granted access simultaneously */
+#define DOMAIN_MMU_ENTRIES (8)
+
+#define LRU_INC(lru) ((lru + 1) >= DOMAIN_MMU_ENTRIES ? 0 : lru + 1)
+
+
+static DEFINE_SPINLOCK(edm_lock);
+
+static u32 edm_manager_bits;
+
+struct domain_entry_save {
+ u32 *mmu_entry;
+ u32 *addr;
+ u32 value;
+ u16 sect;
+ u16 size;
+};
+
+static struct domain_entry_save edm_save[DOMAIN_MMU_ENTRIES];
+
+static u32 edm_lru;
+
+
+/*
+ * Return virtual address of pmd (level 1) entry for addr
+ *
+ * This routine walks the ARMv7 page tables in HW.
+ */
+static inline u32 *__get_pmd_v7(u32 *addr)
+{
+ u32 *ttb;
+
+ __asm__ __volatile__(
+ "mrc p15, 0, %0, c2, c0, 0 @ ttbr0\n\t"
+ : "=r" (ttb)
+ :
+ );
+
+ return __va(MMU_TTB_ADDRESS(ttb) + MMU_PMD_INDEX(addr));
+}
+
+/*
+ * Return virtual address of pte (level 2) entry for addr
+ *
+ * This routine walks the ARMv7 page tables in HW.
+ */
+static inline u32 *__get_pte_v7(u32 *addr)
+{
+ u32 *pmd = __get_pmd_v7(addr);
+ u32 *table_pa = pmd && PMD_IS_TABLE(*pmd) ?
+ MMU_TABLE_ADDRESS(*pmd) : 0;
+ u32 *entry = table_pa ? __va(table_pa[MMU_TABLE_INDEX(addr)]) : 0;
+
+ return entry;
+}
+
+/*
+ * Invalidate the TLB for a given address for the current context
+ *
+ * After manipulating access permissions, TLB invalidation changes are
+ * observed
+ */
+static inline void __tlb_invalidate(u32 *addr)
+{
+ __asm__ __volatile__(
+ "mrc p15, 0, %%r2, c13, c0, 1 @ contextidr\n\t"
+ "and %%r2, %%r2, #0xff @ asid\n\t"
+ "mov %%r3, %0, lsr #12 @ mva[31:12]\n\t"
+ "orr %%r2, %%r2, %%r3, lsl #12 @ tlb mva and asid\n\t"
+ "mcr p15, 0, %%r2, c8, c7, 1 @ utlbimva\n\t"
+ "isb"
+ :
+ : "r" (addr)
+ : "r2", "r3"
+ );
+}
+
+/*
+ * Set HW MMU entry and do required synchronization operations.
+ */
+static inline void __set_entry(u32 *entry, u32 *addr, u32 value, int size)
+{
+ int i;
+
+ if (!entry)
+ return;
+
+ entry = (u32 *)((u32) entry & ~(size * sizeof(u32) - 1));
+
+ for (i = 0; i < size; i++)
+ entry[i] = value;
+
+ __asm__ __volatile__(
+ "mcr p15, 0, %0, c7, c10, 1 @ flush entry\n\t"
+ "dsb\n\t"
+ "isb\n\t"
+ :
+ : "r" (entry)
+ );
+ __tlb_invalidate(addr);
+}
+
+/*
+ * Return the number of duplicate entries associated with entry value.
+ * Supersections and Large page table entries are replicated 16x.
+ */
+static inline int __entry_size(int sect, int value)
+{
+ u32 size;
+
+ if (sect)
+ size = PMD_IS_SUPERSECTION(value) ? 16 : 1;
+ else
+ size = PTE_IS_LARGE(value) ? 16 : 1;
+
+ return size;
+}
+
+/*
+ * Change entry permissions to emulate domain manager access
+ */
+static inline int __manager_perm(int sect, int value)
+{
+ u32 edm_value;
+
+ if (sect) {
+ edm_value = (value & ~(PMD_SECT_APX | PMD_SECT_XN)) |
+ (PMD_SECT_AP_READ | PMD_SECT_AP_WRITE);
+ } else {
+ edm_value = (value & ~(PTE_EXT_APX | PTE_EXT_XN)) |
+ (PTE_EXT_AP1 | PTE_EXT_AP0);
+ }
+ return edm_value;
+}
+
+/*
+ * Restore original HW MMU entry. Cancels domain manager access
+ */
+static inline void __restore_entry(int index)
+{
+ struct domain_entry_save *entry = &edm_save[index];
+ u32 edm_value;
+
+ if (!entry->mmu_entry)
+ return;
+
+ edm_value = __manager_perm(entry->sect, entry->value);
+
+ if (*entry->mmu_entry == edm_value)
+ __set_entry(entry->mmu_entry, entry->addr,
+ entry->value, entry->size);
+
+ entry->mmu_entry = 0;
+}
+
+/*
+ * Modify HW MMU entry to grant domain manager access for a given MMU entry.
+ * This adds full read, write, and exec access permissions.
+ */
+static inline void __set_manager(int sect, u32 *addr)
+{
+ u32 *entry = sect ? __get_pmd_v7(addr) : __get_pte_v7(addr);
+ u32 value;
+ u32 edm_value;
+ u16 size;
+
+ if (!entry)
+ return;
+
+ value = *entry;
+
+ size = __entry_size(sect, value);
+ edm_value = __manager_perm(sect, value);
+
+ __set_entry(entry, addr, edm_value, size);
+
+ __restore_entry(edm_lru);
+
+ edm_save[edm_lru].mmu_entry = entry;
+ edm_save[edm_lru].addr = addr;
+ edm_save[edm_lru].value = value;
+ edm_save[edm_lru].sect = sect;
+ edm_save[edm_lru].size = size;
+
+ edm_lru = LRU_INC(edm_lru);
+}
+
+/*
+ * Restore original HW MMU entries.
+ *
+ * entry - MVA for HW MMU entry
+ */
+static inline void __restore(void)
+{
+ if (unlikely(edm_manager_bits)) {
+ u32 i;
+
+ for (i = 0; i < DOMAIN_MMU_ENTRIES; i++)
+ __restore_entry(i);
+ }
+}
+
+/*
+ * Common abort handler code
+ *
+ * If domain manager was actually set, permission fault would not happen.
+ * Open access permissions to emulate. Save original settings to restore
+ * later. Return 1 to pretend fault did not happen.
+ */
+static int __emulate_domain_manager_abort(u32 fsr, u32 far, int dabort)
+{
+ if (unlikely(FSR_PERMISSION_FAULT(fsr) && edm_manager_bits)) {
+ int domain = dabort ? DFSR_DOMAIN(fsr) : PMD_GET_DOMAIN(far);
+ if (edm_manager_bits & domain_val(domain, DOMAIN_MANAGER)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&edm_lock, flags);
+
+ __set_manager(FSR_PERMISSION_SECT(fsr), (u32 *) far);
+
+ spin_unlock_irqrestore(&edm_lock, flags);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Change domain setting.
+ *
+ * Lock and restore original contents. Extract and save manager bits. Set
+ * DACR, excluding manager bits.
+ */
+void emulate_domain_manager_set(u32 domain)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&edm_lock, flags);
+
+ if (edm_manager_bits != (domain & DOMAIN_MANAGER_BITS)) {
+ __restore();
+ edm_manager_bits = domain & DOMAIN_MANAGER_BITS;
+ }
+
+ __asm__ __volatile__(
+ "mcr p15, 0, %0, c3, c0, 0 @ set domain\n\t"
+ "isb"
+ :
+ : "r" (domain & ~DOMAIN_MANAGER_BITS)
+ );
+
+ spin_unlock_irqrestore(&edm_lock, flags);
+}
+EXPORT_SYMBOL_GPL(emulate_domain_manager_set);
+
+/*
+ * Switch thread context. Restore original contents.
+ */
+void emulate_domain_manager_switch_mm(unsigned long pgd_phys,
+ struct mm_struct *mm,
+ void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *))
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&edm_lock, flags);
+
+ __restore();
+
+ /* Call underlying kernel handler */
+ switch_mm(pgd_phys, mm);
+
+ spin_unlock_irqrestore(&edm_lock, flags);
+}
+EXPORT_SYMBOL_GPL(emulate_domain_manager_switch_mm);
+
+/*
+ * Kernel data_abort hook
+ */
+int emulate_domain_manager_data_abort(u32 dfsr, u32 dfar)
+{
+ return __emulate_domain_manager_abort(dfsr, dfar, 1);
+}
+EXPORT_SYMBOL_GPL(emulate_domain_manager_data_abort);
+
+/*
+ * Kernel prefetch_abort hook
+ */
+int emulate_domain_manager_prefetch_abort(u32 ifsr, u32 ifar)
+{
+ return __emulate_domain_manager_abort(ifsr, ifar, 0);
+}
+EXPORT_SYMBOL_GPL(emulate_domain_manager_prefetch_abort);
+
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index bc0e1d8..b6850fe 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -23,6 +23,14 @@
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
+#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
+#include <asm/io.h>
+#include <mach/msm_iomap.h>
+#endif
+
+#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7
+#include <asm/domain.h>
+#endif /* CONFIG_EMULATE_DOMAIN_MANAGER_V7 */
#include "fault.h"
@@ -484,6 +492,49 @@
return 1;
}
+#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
+#define __str(x) #x
+#define MRC(x, v1, v2, v4, v5, v6) do { \
+ unsigned int __##x; \
+ asm("mrc " __str(v1) ", " __str(v2) ", %0, " __str(v4) ", " \
+ __str(v5) ", " __str(v6) "\n" \
+ : "=r" (__##x)); \
+ pr_info("%s: %s = 0x%.8x\n", __func__, #x, __##x); \
+} while(0)
+
+#define MSM_TCSR_SPARE2 (MSM_TCSR_BASE + 0x60)
+
+#endif
+
+static int
+do_imprecise_ext(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
+ MRC(ADFSR, p15, 0, c5, c1, 0);
+ MRC(DFSR, p15, 0, c5, c0, 0);
+ MRC(ACTLR, p15, 0, c1, c0, 1);
+ MRC(EFSR, p15, 7, c15, c0, 1);
+ MRC(L2SR, p15, 3, c15, c1, 0);
+ MRC(L2CR0, p15, 3, c15, c0, 1);
+ MRC(L2CPUESR, p15, 3, c15, c1, 1);
+ MRC(L2CPUCR, p15, 3, c15, c0, 2);
+ MRC(SPESR, p15, 1, c9, c7, 0);
+ MRC(SPCR, p15, 0, c9, c7, 0);
+ MRC(DMACHSR, p15, 1, c11, c0, 0);
+ MRC(DMACHESR, p15, 1, c11, c0, 1);
+ MRC(DMACHCR, p15, 0, c11, c0, 2);
+
+ /* clear out EFSR and ADFSR after fault */
+ asm volatile ("mcr p15, 7, %0, c15, c0, 1\n\t"
+ "mcr p15, 0, %0, c5, c1, 0"
+ : : "r" (0));
+#endif
+#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
+ pr_info("%s: TCSR_SPARE2 = 0x%.8x\n", __func__, readl(MSM_TCSR_SPARE2));
+#endif
+ return 1;
+}
+
static struct fsr_info {
int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
int sig;
@@ -521,7 +572,7 @@
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "lock abort" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 21" },
- { do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */
+ { do_imprecise_ext, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 23" },
{ do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 25" },
@@ -555,6 +606,11 @@
const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
struct siginfo info;
+#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7
+ if (emulate_domain_manager_data_abort(fsr, addr))
+ return;
+#endif
+
if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
return;
@@ -623,6 +679,11 @@
const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
struct siginfo info;
+#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7
+ if (emulate_domain_manager_prefetch_abort(ifsr, addr))
+ return;
+#endif
+
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
return;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index c19571c..9afb93a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -20,6 +20,9 @@
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/sort.h>
+#ifdef CONFIG_MEMORY_HOTPLUG
+#include <linux/memory_hotplug.h>
+#endif
#include <asm/mach-types.h>
#include <asm/prom.h>
@@ -362,6 +365,46 @@
memblock_dump_all();
}
+#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+int _early_pfn_valid(unsigned long pfn)
+{
+ struct meminfo *mi = &meminfo;
+ unsigned int left = 0, right = mi->nr_banks;
+
+ do {
+ unsigned int mid = (right + left) / 2;
+ struct membank *bank = &mi->bank[mid];
+
+ if (pfn < bank_pfn_start(bank))
+ right = mid;
+ else if (pfn >= bank_pfn_end(bank))
+ left = mid + 1;
+ else
+ return 1;
+ } while (left < right);
+ return 0;
+}
+EXPORT_SYMBOL(_early_pfn_valid);
+#endif
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+static void map_reserved_memory(void)
+{
+ struct map_desc map;
+
+ map.pfn = (movable_reserved_start >> PAGE_SHIFT);
+ map.virtual = __phys_to_virt(movable_reserved_start);
+ map.length = movable_reserved_size;
+#ifdef CONFIG_STRICT_MEMORY_RWX
+ map.type = MT_MEMORY_RW;
+#else
+ map.type = MT_MEMORY;
+#endif
+
+ create_mapping(&map);
+}
+#endif
+
void __init bootmem_init(void)
{
unsigned long min, max_low, max_high;
@@ -390,6 +433,13 @@
*/
arm_bootmem_free(min, max_low, max_high);
+#ifdef CONFIG_MEMORY_HOTPLUG
+ if (movable_reserved_size) {
+ max_low = (movable_reserved_start + movable_reserved_size)
+ >> PAGE_SHIFT;
+ map_reserved_memory();
+ }
+#endif
high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
/*
@@ -715,6 +765,46 @@
"init");
}
+#ifdef CONFIG_MEMORY_HOTPLUG
+int arch_add_memory(int nid, u64 start, u64 size)
+{
+ struct pglist_data *pgdata = NODE_DATA(nid);
+ struct zone *zone = pgdata->node_zones + ZONE_MOVABLE;
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ int ret;
+
+ ret = __add_pages(nid, zone, start_pfn, nr_pages);
+ if (ret)
+ return ret;
+ return platform_physical_active_pages(start_pfn, nr_pages);
+}
+
+int arch_physical_active_memory(u64 start, u64 size)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+
+ return platform_physical_active_pages(start_pfn, nr_pages);
+}
+
+int arch_physical_remove_memory(u64 start, u64 size)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+
+ return platform_physical_remove_pages(start_pfn, nr_pages);
+}
+
+int arch_physical_low_power_memory(u64 start, u64 size)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+
+ return platform_physical_low_power_pages(start_pfn, nr_pages);
+}
+#endif
+
#ifdef CONFIG_BLK_DEV_INITRD
static int keep_initrd;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index ab50627..17e7b0b 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -204,8 +204,12 @@
/*
* Don't allow RAM to be mapped - this causes problems with ARMv6+
*/
- if (WARN_ON(pfn_valid(pfn)))
- return NULL;
+ if (pfn_valid(pfn)) {
+ printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n"
+ KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
+ KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n");
+ WARN_ON(1);
+ }
type = get_mem_type(mtype);
if (!type)
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 5b3d7d5..cd9eb2c 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -23,5 +23,8 @@
#endif
+struct map_desc;
+
void __init bootmem_init(void);
void arm_mm_memblock_reserve(void);
+void __init create_mapping(struct map_desc *md);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 594d677..fb48d30 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -212,6 +212,12 @@
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
.domain = DOMAIN_IO,
},
+ [MT_DEVICE_STRONGLY_ORDERED] = { /* Guaranteed strongly ordered */
+ .prot_pte = PROT_PTE_DEVICE,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED,
+ .domain = DOMAIN_IO,
+ },
[MT_DEVICE_WC] = { /* ioremap_wc */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
.prot_l1 = PMD_TYPE_TABLE,
@@ -250,6 +256,18 @@
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
+ [MT_MEMORY_R] = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+ .domain = DOMAIN_KERNEL,
+ },
+ [MT_MEMORY_RW] = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
+ .domain = DOMAIN_KERNEL,
+ },
+ [MT_MEMORY_RX] = {
+ .prot_sect = PMD_TYPE_SECT,
+ .domain = DOMAIN_KERNEL,
+ },
[MT_ROM] = {
.prot_sect = PMD_TYPE_SECT,
.domain = DOMAIN_KERNEL,
@@ -353,6 +371,8 @@
mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
+ mem_types[MT_DEVICE_STRONGLY_ORDERED].prot_sect |=
+ PMD_SECT_XN;
}
if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
/*
@@ -426,6 +446,8 @@
* from SVC mode and no access from userspace.
*/
mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+ mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
@@ -444,6 +466,9 @@
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
}
}
@@ -483,6 +508,9 @@
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
+ mem_types[MT_MEMORY_R].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_ROM].prot_sect |= cp->pmd;
switch (cp->pmd) {
@@ -662,7 +690,7 @@
* offsets, and we take full advantage of sections and
* supersections.
*/
-static void __init create_mapping(struct map_desc *md)
+void __init create_mapping(struct map_desc *md)
{
unsigned long addr, length, end;
phys_addr_t phys;
@@ -727,7 +755,7 @@
create_mapping(io_desc + i);
}
-static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
+static void * __initdata vmalloc_min = (void *)(VMALLOC_END - CONFIG_VMALLOC_RESERVE);
/*
* vmalloc=size forces the vmalloc area to be exactly 'size'
@@ -1049,4 +1077,16 @@
empty_zero_page = virt_to_page(zero_page);
__flush_dcache_page(NULL, empty_zero_page);
+
+#if defined(CONFIG_ARCH_MSM7X27)
+ /*
+ * ensure that the strongly ordered page is mapped before the
+ * first call to write_to_strongly_ordered_memory. This page
+ * is necessary for the msm 7x27 due to hardware quirks. The
+ * map call is made here to ensure the bootmem call is made
+ * in the right window (after initialization, before full
+ * allocators are initialized)
+ */
+ map_page_strongly_ordered();
+#endif
}
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 4136846..dfeb57f 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -251,7 +251,7 @@
*
* (same as v4wb)
*/
-arm1026_dma_inv_range:
+ENTRY(arm1026_dma_inv_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
@@ -277,7 +277,7 @@
*
* (same as v4wb)
*/
-arm1026_dma_clean_range:
+ENTRY(arm1026_dma_clean_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -343,6 +343,8 @@
.long arm1026_flush_kern_dcache_area
.long arm1026_dma_map_area
.long arm1026_dma_unmap_area
+ .long arm1026_dma_inv_range
+ .long arm1026_dma_clean_range
.long arm1026_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 089c0b5..449d463 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/domain.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
@@ -61,7 +62,14 @@
*/
.align 5
ENTRY(cpu_v7_reset)
- mov pc, r0
+ mrc p15, 0, r1, c1, c0, 0 @ ctrl register
+ bic r1, r1, #0x0001 @ ...............m
+ mcr p15, 0, r1, c1, c0, 0 @ Turn off MMU
+ mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D,flush TLB
+ mcr p15, 0, ip, c7, c5, 6 @ flush BTC
+ dsb
+ isb
+ mov pc,r0
ENDPROC(cpu_v7_reset)
/*
@@ -101,6 +109,11 @@
*/
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
+#ifdef CONFIG_EMULATE_DOMAIN_MANAGER_V7
+ ldr r2, =cpu_v7_switch_mm_private
+ b emulate_domain_manager_switch_mm
+cpu_v7_switch_mm_private:
+#endif
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
@@ -280,9 +293,8 @@
* - cache type register is implemented
*/
__v7_ca9mp_setup:
-#ifdef CONFIG_SMP
- ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
- ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
+#if defined(CONFIG_SMP)
+ mrc p15, 0, r0, c1, c0, 1
tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
@@ -375,6 +387,35 @@
ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP)
ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
mcr p15, 0, r8, c2, c0, 1 @ load TTB1
+#ifndef CONFIG_EMULATE_DOMAIN_MANAGER_V7
+ mov r10, #0x1f @ domains 0, 1 = manager
+ mcr p15, 0, r10, c3, c0, 0 @ load domain access register
+#endif
+#if defined(CONFIG_ARCH_MSM_SCORPION) && !defined(CONFIG_MSM_SMP)
+ mov r0, #0x33
+ mcr p15, 3, r0, c15, c0, 3 @ set L2CR1
+#endif
+#if defined (CONFIG_ARCH_MSM_SCORPION)
+ mrc p15, 0, r0, c1, c0, 1 @ read ACTLR
+#ifdef CONFIG_CPU_CACHE_ERR_REPORT
+ orr r0, r0, #0x37 @ turn on L1/L2 error reporting
+#else
+ bic r0, r0, #0x37
+#endif
+#if defined (CONFIG_ARCH_MSM_SCORPIONMP)
+ orr r0, r0, #0x1 << 24 @ optimal setting for Scorpion MP
+#endif
+#ifndef CONFIG_ARCH_MSM_KRAIT
+ mcr p15, 0, r0, c1, c0, 1 @ write ACTLR
+#endif
+#endif
+
+#if defined (CONFIG_ARCH_MSM_SCORPIONMP)
+ mrc p15, 3, r0, c15, c0, 2 @ optimal setting for Scorpion MP
+ orr r0, r0, #0x1 << 21
+ mcr p15, 3, r0, c15, c0, 2
+#endif
+
ldr r5, =PRRR @ PRRR
ldr r6, =NMRR @ NMRR
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 53cd5b4..8338c6e 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -38,11 +38,19 @@
dsb
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
+#ifdef CONFIG_ARCH_MSM8X60
+ mov r0, r0, lsl #PAGE_SHIFT
+#else
asid r3, r3 @ mask ASID
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
+#endif
mov r1, r1, lsl #PAGE_SHIFT
1:
+#ifdef CONFIG_ARCH_MSM8X60
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA (shareable)
+#else
ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
+#endif
ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
add r0, r0, #PAGE_SZ
@@ -70,7 +78,11 @@
mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT
1:
+#ifdef CONFIG_ARCH_MSM8X60
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA (shareable)
+#else
ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
+#endif
ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
add r0, r0, #PAGE_SZ
cmp r0, r1
diff --git a/arch/arm/mm/vcm.c b/arch/arm/mm/vcm.c
new file mode 100644
index 0000000..5c52a9c
--- /dev/null
+++ b/arch/arm/mm/vcm.c
@@ -0,0 +1,1830 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vcm_mm.h>
+#include <linux/vcm.h>
+#include <linux/vcm_alloc.h>
+#include <linux/vcm_types.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+
+#include <asm/page.h>
+#include <asm/sizes.h>
+
+#include <linux/iommu.h>
+
+/* alloc_vm_area */
+#include <linux/pfn.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+#include <asm/cacheflush.h>
+#include <asm/mach/map.h>
+
+#define ONE_TO_ONE_CHK 1
+
+#define vcm_err(a, ...) \
+ pr_err("ERROR %s %i " a, __func__, __LINE__, ##__VA_ARGS__)
+
+static unsigned int smmu_map_sizes[4] = {SZ_16M, SZ_1M, SZ_64K, SZ_4K};
+
+static phys_addr_t *bootmem_cont;
+static int cont_sz;
+static struct vcm *cont_vcm_id;
+static struct phys_chunk *cont_phys_chunk;
+
+DEFINE_SPINLOCK(vcmlock);
+
+/* Leaving this in for now to keep compatibility of the API. */
+/* This will disappear. */
+phys_addr_t vcm_get_dev_addr(struct res *res)
+{
+ if (!res) {
+ vcm_err("NULL RES");
+ return -EINVAL;
+ }
+ return res->dev_addr;
+}
+
+static int vcm_no_res(struct vcm *vcm)
+{
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ return list_empty(&vcm->res_head);
+fail:
+ return -EINVAL;
+}
+
+static int vcm_no_assoc(struct vcm *vcm)
+{
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ return list_empty(&vcm->assoc_head);
+fail:
+ return -EINVAL;
+}
+
+static int vcm_all_activated(struct vcm *vcm)
+{
+ struct avcm *avcm;
+
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ list_for_each_entry(avcm, &vcm->assoc_head, assoc_elm)
+ if (!avcm->is_active)
+ return 0;
+
+ return 1;
+fail:
+ return -EINVAL;
+}
+
+static void vcm_destroy_common(struct vcm *vcm)
+{
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ return;
+ }
+
+ memset(vcm, 0, sizeof(*vcm));
+ kfree(vcm);
+}
+
+static struct vcm *vcm_create_common(void)
+{
+ struct vcm *vcm = 0;
+
+ vcm = kzalloc(sizeof(*vcm), GFP_KERNEL);
+ if (!vcm) {
+ vcm_err("kzalloc(%i, GFP_KERNEL) ret 0\n",
+ sizeof(*vcm));
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&vcm->res_head);
+ INIT_LIST_HEAD(&vcm->assoc_head);
+
+ return vcm;
+
+fail:
+ return NULL;
+}
+
+
+static int vcm_create_pool(struct vcm *vcm, unsigned long start_addr,
+ size_t len)
+{
+ int ret = 0;
+
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ vcm->start_addr = start_addr;
+ vcm->len = len;
+
+ vcm->pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (!vcm->pool) {
+ vcm_err("gen_pool_create(%x, -1) ret 0\n", PAGE_SHIFT);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = gen_pool_add(vcm->pool, start_addr, len, -1);
+ if (ret) {
+ vcm_err("gen_pool_add(%p, %p, %i, -1) ret %i\n", vcm->pool,
+ (void *) start_addr, len, ret);
+ goto fail;
+ }
+
+ vcm->domain = iommu_domain_alloc();
+ if (!vcm->domain) {
+ vcm_err("Could not allocate domain\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+fail:
+ if (ret && vcm->pool)
+ gen_pool_destroy(vcm->pool);
+
+ return ret;
+}
+
+
+static struct vcm *vcm_create_flagged(int flag, unsigned long start_addr,
+ size_t len)
+{
+ int ret = 0;
+ struct vcm *vcm = 0;
+
+ vcm = vcm_create_common();
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ /* special one-to-one mapping case */
+ if ((flag & ONE_TO_ONE_CHK) &&
+ bootmem_cont &&
+ start_addr == (size_t) bootmem_cont &&
+ len == cont_sz) {
+ vcm->type = VCM_ONE_TO_ONE;
+ } else {
+ ret = vcm_create_pool(vcm, start_addr, len);
+ vcm->type = VCM_DEVICE;
+ }
+
+ if (ret) {
+ vcm_err("vcm_create_pool(%p, %p, %i) ret %i\n", vcm,
+ (void *) start_addr, len, ret);
+ goto fail2;
+ }
+
+ return vcm;
+
+fail2:
+ vcm_destroy_common(vcm);
+fail:
+ return NULL;
+}
+
+struct vcm *vcm_create(unsigned long start_addr, size_t len)
+{
+ unsigned long flags;
+ struct vcm *vcm;
+
+ spin_lock_irqsave(&vcmlock, flags);
+ vcm = vcm_create_flagged(ONE_TO_ONE_CHK, start_addr, len);
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return vcm;
+}
+
+
+static int ext_vcm_id_valid(size_t ext_vcm_id)
+{
+ return ((ext_vcm_id == VCM_PREBUILT_KERNEL) ||
+ (ext_vcm_id == VCM_PREBUILT_USER));
+}
+
+
+struct vcm *vcm_create_from_prebuilt(size_t ext_vcm_id)
+{
+ unsigned long flags;
+ struct vcm *vcm = 0;
+
+ spin_lock_irqsave(&vcmlock, flags);
+
+ if (!ext_vcm_id_valid(ext_vcm_id)) {
+ vcm_err("ext_vcm_id_valid(%i) ret 0\n", ext_vcm_id);
+ goto fail;
+ }
+
+ vcm = vcm_create_common();
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ if (ext_vcm_id == VCM_PREBUILT_KERNEL)
+ vcm->type = VCM_EXT_KERNEL;
+ else if (ext_vcm_id == VCM_PREBUILT_USER)
+ vcm->type = VCM_EXT_USER;
+ else {
+ vcm_err("UNREACHABLE ext_vcm_id is illegal\n");
+ goto fail_free;
+ }
+
+ /* TODO: set kernel and userspace start_addr and len, if this
+ * makes sense */
+
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return vcm;
+
+fail_free:
+ vcm_destroy_common(vcm);
+fail:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return NULL;
+}
+
+
+struct vcm *vcm_clone(struct vcm *vcm)
+{
+ return 0;
+}
+
+
+/* No lock needed, vcm->start_addr is never updated after creation */
+size_t vcm_get_start_addr(struct vcm *vcm)
+{
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ return 1;
+ }
+
+ return vcm->start_addr;
+}
+
+
+/* No lock needed, vcm->len is never updated after creation */
+size_t vcm_get_len(struct vcm *vcm)
+{
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ return 0;
+ }
+
+ return vcm->len;
+}
+
+
+static int vcm_free_common_rule(struct vcm *vcm)
+{
+ int ret;
+
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ ret = vcm_no_res(vcm);
+ if (!ret) {
+ vcm_err("vcm_no_res(%p) ret 0\n", vcm);
+ goto fail_busy;
+ }
+
+ if (ret == -EINVAL) {
+ vcm_err("vcm_no_res(%p) ret -EINVAL\n", vcm);
+ goto fail;
+ }
+
+ ret = vcm_no_assoc(vcm);
+ if (!ret) {
+ vcm_err("vcm_no_assoc(%p) ret 0\n", vcm);
+ goto fail_busy;
+ }
+
+ if (ret == -EINVAL) {
+ vcm_err("vcm_no_assoc(%p) ret -EINVAL\n", vcm);
+ goto fail;
+ }
+
+ return 0;
+
+fail_busy:
+ return -EBUSY;
+fail:
+ return -EINVAL;
+}
+
+
+static int vcm_free_pool_rule(struct vcm *vcm)
+{
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ /* A vcm always has a valid pool, don't free the vcm because
+ what we got is probably invalid.
+ */
+ if (!vcm->pool) {
+ vcm_err("NULL vcm->pool\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ return -EINVAL;
+}
+
+
+static void vcm_free_common(struct vcm *vcm)
+{
+ memset(vcm, 0, sizeof(*vcm));
+
+ kfree(vcm);
+}
+
+
+static int vcm_free_pool(struct vcm *vcm)
+{
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ gen_pool_destroy(vcm->pool);
+
+ return 0;
+
+fail:
+ return -EINVAL;
+}
+
+
+static int __vcm_free(struct vcm *vcm)
+{
+ int ret;
+
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ ret = vcm_free_common_rule(vcm);
+ if (ret != 0) {
+ vcm_err("vcm_free_common_rule(%p) ret %i\n", vcm, ret);
+ goto fail;
+ }
+
+ if (vcm->type == VCM_DEVICE) {
+ ret = vcm_free_pool_rule(vcm);
+ if (ret != 0) {
+ vcm_err("vcm_free_pool_rule(%p) ret %i\n",
+ (void *) vcm, ret);
+ goto fail;
+ }
+ if (vcm->domain)
+ iommu_domain_free(vcm->domain);
+
+ vcm->domain = NULL;
+ ret = vcm_free_pool(vcm);
+ if (ret != 0) {
+ vcm_err("vcm_free_pool(%p) ret %i", (void *) vcm, ret);
+ goto fail;
+ }
+ }
+
+ vcm_free_common(vcm);
+
+ return 0;
+
+fail:
+ return -EINVAL;
+}
+
+int vcm_free(struct vcm *vcm)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&vcmlock, flags);
+ ret = __vcm_free(vcm);
+ spin_unlock_irqrestore(&vcmlock, flags);
+
+ return ret;
+}
+
+
+static struct res *__vcm_reserve(struct vcm *vcm, size_t len, u32 attr)
+{
+ struct res *res = NULL;
+ int align_attr = 0, i = 0;
+
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ if (len == 0) {
+ vcm_err("len is 0\n");
+ goto fail;
+ }
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ vcm_err("kzalloc(%i, GFP_KERNEL) ret 0", sizeof(*res));
+ goto fail;
+ }
+
+ align_attr = (attr >> VCM_ALIGN_SHIFT) & VCM_ALIGN_MASK;
+
+ if (align_attr >= 32) {
+ vcm_err("Invalid alignment attribute: %d\n", align_attr);
+ goto fail2;
+ }
+
+ INIT_LIST_HEAD(&res->res_elm);
+ res->vcm = vcm;
+ res->len = len;
+ res->attr = attr;
+ res->alignment_req = smmu_map_sizes[ARRAY_SIZE(smmu_map_sizes) - 1];
+
+ if (align_attr == 0) {
+ for (i = 0; i < ARRAY_SIZE(smmu_map_sizes); i++)
+ if (len / smmu_map_sizes[i]) {
+ res->alignment_req = smmu_map_sizes[i];
+ break;
+ }
+ } else
+ res->alignment_req = 1 << align_attr;
+
+ res->aligned_len = res->alignment_req + len;
+
+ switch (vcm->type) {
+ case VCM_DEVICE:
+ /* should always be not zero */
+ if (!vcm->pool) {
+ vcm_err("NULL vcm->pool\n");
+ goto fail2;
+ }
+
+ res->ptr = gen_pool_alloc(vcm->pool, res->aligned_len);
+ if (!res->ptr) {
+ vcm_err("gen_pool_alloc(%p, %i) ret 0\n",
+ vcm->pool, res->aligned_len);
+ goto fail2;
+ }
+
+ /* Calculate alignment... this will all change anyway */
+ res->dev_addr = res->ptr +
+ (res->alignment_req -
+ (res->ptr & (res->alignment_req - 1)));
+
+ break;
+ case VCM_EXT_KERNEL:
+ res->vm_area = alloc_vm_area(res->aligned_len);
+ res->mapped = 0; /* be explicit */
+ if (!res->vm_area) {
+ vcm_err("NULL res->vm_area\n");
+ goto fail2;
+ }
+
+ res->dev_addr = (size_t) res->vm_area->addr +
+ (res->alignment_req -
+ ((size_t) res->vm_area->addr &
+ (res->alignment_req - 1)));
+
+ break;
+ case VCM_ONE_TO_ONE:
+ break;
+ default:
+ vcm_err("%i is an invalid vcm->type\n", vcm->type);
+ goto fail2;
+ }
+
+ list_add_tail(&res->res_elm, &vcm->res_head);
+
+ return res;
+
+fail2:
+ kfree(res);
+fail:
+ return 0;
+}
+
+
+struct res *vcm_reserve(struct vcm *vcm, size_t len, u32 attr)
+{
+ unsigned long flags;
+ struct res *res;
+
+ spin_lock_irqsave(&vcmlock, flags);
+ res = __vcm_reserve(vcm, len, attr);
+ spin_unlock_irqrestore(&vcmlock, flags);
+
+ return res;
+}
+
+
+struct res *vcm_reserve_at(enum memtarget_t memtarget, struct vcm *vcm,
+ size_t len, u32 attr)
+{
+ return 0;
+}
+
+
+static int __vcm_unreserve(struct res *res)
+{
+ struct vcm *vcm;
+
+ if (!res) {
+ vcm_err("NULL res\n");
+ goto fail;
+ }
+
+ if (!res->vcm) {
+ vcm_err("NULL res->vcm\n");
+ goto fail;
+ }
+
+ vcm = res->vcm;
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ switch (vcm->type) {
+ case VCM_DEVICE:
+ if (!res->vcm->pool) {
+ vcm_err("NULL (res->vcm))->pool\n");
+ goto fail;
+ }
+
+ /* res->ptr could be zero, this isn't an error */
+ gen_pool_free(res->vcm->pool, res->ptr,
+ res->aligned_len);
+ break;
+ case VCM_EXT_KERNEL:
+ if (res->mapped) {
+ vcm_err("res->mapped is true\n");
+ goto fail;
+ }
+
+ /* This may take a little explaining.
+ * In the kernel vunmap will free res->vm_area
+ * so if we've called it then we shouldn't call
+ * free_vm_area(). If we've called it we set
+ * res->vm_area to 0.
+ */
+ if (res->vm_area) {
+ free_vm_area(res->vm_area);
+ res->vm_area = 0;
+ }
+
+ break;
+ case VCM_ONE_TO_ONE:
+ break;
+ default:
+ vcm_err("%i is an invalid vcm->type\n", vcm->type);
+ goto fail;
+ }
+
+ list_del(&res->res_elm);
+
+ /* be extra careful by clearing the memory before freeing it */
+ memset(res, 0, sizeof(*res));
+
+ kfree(res);
+
+ return 0;
+
+fail:
+ return -EINVAL;
+}
+
+
+int vcm_unreserve(struct res *res)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&vcmlock, flags);
+ ret = __vcm_unreserve(res);
+ spin_unlock_irqrestore(&vcmlock, flags);
+
+ return ret;
+}
+
+
+/* No lock needed, res->len is never updated after creation */
+size_t vcm_get_res_len(struct res *res)
+{
+ if (!res) {
+ vcm_err("res is 0\n");
+ return 0;
+ }
+
+ return res->len;
+}
+
+
+int vcm_set_res_attr(struct res *res, u32 attr)
+{
+ return 0;
+}
+
+
+u32 vcm_get_res_attr(struct res *res)
+{
+ return 0;
+}
+
+
+size_t vcm_get_num_res(struct vcm *vcm)
+{
+ return 0;
+}
+
+
+struct res *vcm_get_next_res(struct vcm *vcm, struct res *res)
+{
+ return 0;
+}
+
+
+size_t vcm_res_copy(struct res *to, size_t to_off, struct res *from, size_t
+ from_off, size_t len)
+{
+ return 0;
+}
+
+
+size_t vcm_get_min_page_size(void)
+{
+ return PAGE_SIZE;
+}
+
+
+static int vcm_to_smmu_attr(u32 attr)
+{
+ int smmu_attr = 0;
+
+ switch (attr & VCM_CACHE_POLICY) {
+ case VCM_NOTCACHED:
+ smmu_attr = VCM_DEV_ATTR_NONCACHED;
+ break;
+ case VCM_WB_WA:
+ smmu_attr = VCM_DEV_ATTR_CACHED_WB_WA;
+ smmu_attr |= VCM_DEV_ATTR_SH;
+ break;
+ case VCM_WB_NWA:
+ smmu_attr = VCM_DEV_ATTR_CACHED_WB_NWA;
+ smmu_attr |= VCM_DEV_ATTR_SH;
+ break;
+ case VCM_WT:
+ smmu_attr = VCM_DEV_ATTR_CACHED_WT;
+ smmu_attr |= VCM_DEV_ATTR_SH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return smmu_attr;
+}
+
+
+static int vcm_process_chunk(struct iommu_domain *domain, phys_addr_t pa,
+ unsigned long va, size_t len, u32 attr, int map)
+{
+ int ret, i, map_order;
+ unsigned long map_len = smmu_map_sizes[ARRAY_SIZE(smmu_map_sizes) - 1];
+
+ for (i = 0; i < ARRAY_SIZE(smmu_map_sizes); i++) {
+ if (IS_ALIGNED(va, smmu_map_sizes[i]) && len >=
+ smmu_map_sizes[i]) {
+ map_len = smmu_map_sizes[i];
+ break;
+ }
+ }
+
+#ifdef VCM_PERF_DEBUG
+ if (va & (len - 1))
+ pr_warning("Warning! Suboptimal VCM mapping alignment "
+ "va = %p, len = %p. Expect TLB performance "
+ "degradation.\n", (void *) va, (void *) len);
+#endif
+
+ map_order = get_order(map_len);
+
+ while (len) {
+ if (va & (SZ_4K - 1)) {
+ vcm_err("Tried to map w/ align < 4k! va = %08lx\n", va);
+ goto fail;
+ }
+
+ if (map_len > len) {
+ vcm_err("map_len = %lu, len = %d, trying to overmap\n",
+ map_len, len);
+ goto fail;
+ }
+
+ if (map)
+ ret = iommu_map(domain, va, pa, map_order, attr);
+ else
+ ret = iommu_unmap(domain, va, map_order);
+
+ if (ret) {
+ vcm_err("iommu_map/unmap(%p, %p, %p, 0x%x, 0x%x) ret %i"
+ "map = %d", (void *) domain, (void *) pa,
+ (void *) va, (int) map_len, attr, ret, map);
+ goto fail;
+ }
+
+ va += map_len;
+ pa += map_len;
+ len -= map_len;
+ }
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+/* TBD if you vcm_back again what happens? */
+int vcm_back(struct res *res, struct physmem *physmem)
+{
+ unsigned long flags;
+ struct vcm *vcm;
+ struct phys_chunk *chunk;
+ size_t va = 0;
+ int ret;
+ int attr;
+
+ spin_lock_irqsave(&vcmlock, flags);
+
+ if (!res) {
+ vcm_err("NULL res\n");
+ goto fail;
+ }
+
+ vcm = res->vcm;
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ switch (vcm->type) {
+ case VCM_DEVICE:
+ case VCM_EXT_KERNEL: /* hack part 1 */
+ attr = vcm_to_smmu_attr(res->attr);
+ if (attr == -1) {
+ vcm_err("Bad SMMU attr\n");
+ goto fail;
+ }
+ break;
+ default:
+ attr = 0;
+ break;
+ }
+
+ if (!physmem) {
+ vcm_err("NULL physmem\n");
+ goto fail;
+ }
+
+ if (res->len == 0) {
+ vcm_err("res->len is 0\n");
+ goto fail;
+ }
+
+ if (physmem->len == 0) {
+ vcm_err("physmem->len is 0\n");
+ goto fail;
+ }
+
+ if (res->len != physmem->len) {
+ vcm_err("res->len (%i) != physmem->len (%i)\n",
+ res->len, physmem->len);
+ goto fail;
+ }
+
+ if (physmem->is_cont) {
+ if (physmem->res == 0) {
+ vcm_err("cont physmem->res is 0");
+ goto fail;
+ }
+ } else {
+ /* fail if no physmem */
+ if (list_empty(&physmem->alloc_head.allocated)) {
+ vcm_err("no allocated phys memory");
+ goto fail;
+ }
+ }
+
+ ret = vcm_no_assoc(res->vcm);
+ if (ret == 1) {
+ vcm_err("can't back un associated VCM\n");
+ goto fail;
+ }
+
+ if (ret == -1) {
+ vcm_err("vcm_no_assoc() ret -1\n");
+ goto fail;
+ }
+
+ ret = vcm_all_activated(res->vcm);
+ if (ret == 0) {
+ vcm_err("can't back, not all associations are activated\n");
+ goto fail_eagain;
+ }
+
+ if (ret == -1) {
+ vcm_err("vcm_all_activated() ret -1\n");
+ goto fail;
+ }
+
+ va = res->dev_addr;
+
+ list_for_each_entry(chunk, &physmem->alloc_head.allocated,
+ allocated) {
+ struct vcm *vcm = res->vcm;
+ size_t chunk_size = chunk->size;
+
+ if (chunk_size <= 0) {
+ vcm_err("Bad chunk size: %d\n", chunk_size);
+ goto fail;
+ }
+
+ switch (vcm->type) {
+ case VCM_DEVICE:
+ {
+ /* map all */
+ ret = vcm_process_chunk(vcm->domain, chunk->pa,
+ va, chunk_size, attr, 1);
+ if (ret != 0) {
+ vcm_err("vcm_process_chunk(%p, %p, %p,"
+ " 0x%x, 0x%x)"
+ " ret %i",
+ vcm->domain,
+ (void *) chunk->pa,
+ (void *) va,
+ (int) chunk_size, attr, ret);
+ goto fail;
+ }
+ break;
+ }
+
+ case VCM_EXT_KERNEL:
+ {
+ unsigned int pages_in_chunk = chunk_size / PAGE_SIZE;
+ unsigned long loc_va = va;
+ unsigned long loc_pa = chunk->pa;
+
+ const struct mem_type *mtype;
+
+ /* TODO: get this based on MEMTYPE */
+ mtype = get_mem_type(MT_DEVICE);
+ if (!mtype) {
+ vcm_err("mtype is 0\n");
+ goto fail;
+ }
+
+ /* TODO: Map with the same chunk size */
+ while (pages_in_chunk--) {
+ ret = ioremap_page(loc_va,
+ loc_pa,
+ mtype);
+ if (ret != 0) {
+ vcm_err("ioremap_page(%p, %p, %p) ret"
+ " %i", (void *) loc_va,
+ (void *) loc_pa,
+ (void *) mtype, ret);
+ goto fail;
+ /* TODO handle weird
+ inter-map case */
+ }
+
+ /* hack part 2 */
+ /* we're changing the PT entry behind
+ * linux's back
+ */
+ ret = cpu_set_attr(loc_va, PAGE_SIZE, attr);
+ if (ret != 0) {
+ vcm_err("cpu_set_attr(%p, %lu, %x)"
+ "ret %i\n",
+ (void *) loc_va, PAGE_SIZE,
+ attr, ret);
+ goto fail;
+ /* TODO handle weird
+ inter-map case */
+ }
+
+ res->mapped = 1;
+
+ loc_va += PAGE_SIZE;
+ loc_pa += PAGE_SIZE;
+ }
+
+ flush_cache_vmap(va, loc_va);
+ break;
+ }
+ case VCM_ONE_TO_ONE:
+ va = chunk->pa;
+ break;
+ default:
+ /* this should never happen */
+ goto fail;
+ }
+
+ va += chunk_size;
+ /* also add res to the allocated chunk list of refs */
+ }
+
+ /* note the reservation */
+ res->physmem = physmem;
+
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return 0;
+fail_eagain:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return -EAGAIN;
+fail:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return -EINVAL;
+}
+
+
+int vcm_unback(struct res *res)
+{
+ unsigned long flags;
+ struct vcm *vcm;
+ struct physmem *physmem;
+ int ret;
+
+ spin_lock_irqsave(&vcmlock, flags);
+
+ if (!res)
+ goto fail;
+
+ vcm = res->vcm;
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ if (!res->physmem) {
+ vcm_err("can't unback a non-backed reservation\n");
+ goto fail;
+ }
+
+ physmem = res->physmem;
+ if (!physmem) {
+ vcm_err("physmem is NULL\n");
+ goto fail;
+ }
+
+ if (list_empty(&physmem->alloc_head.allocated)) {
+ vcm_err("physmem allocation is empty\n");
+ goto fail;
+ }
+
+ ret = vcm_no_assoc(res->vcm);
+ if (ret == 1) {
+ vcm_err("can't unback a unassociated reservation\n");
+ goto fail;
+ }
+
+ if (ret == -1) {
+ vcm_err("vcm_no_assoc(%p) ret -1\n", (void *) res->vcm);
+ goto fail;
+ }
+
+ ret = vcm_all_activated(res->vcm);
+ if (ret == 0) {
+ vcm_err("can't unback, not all associations are active\n");
+ goto fail_eagain;
+ }
+
+ if (ret == -1) {
+ vcm_err("vcm_all_activated(%p) ret -1\n", (void *) res->vcm);
+ goto fail;
+ }
+
+
+ switch (vcm->type) {
+ case VCM_EXT_KERNEL:
+ if (!res->mapped) {
+ vcm_err("can't unback an unmapped VCM_EXT_KERNEL"
+ " VCM\n");
+ goto fail;
+ }
+
+ /* vunmap free's vm_area */
+ vunmap(res->vm_area->addr);
+ res->vm_area = 0;
+
+ res->mapped = 0;
+ break;
+
+ case VCM_DEVICE:
+ {
+ struct phys_chunk *chunk;
+ size_t va = res->dev_addr;
+
+ list_for_each_entry(chunk, &physmem->alloc_head.allocated,
+ allocated) {
+ struct vcm *vcm = res->vcm;
+ size_t chunk_size = chunk->size;
+
+ ret = vcm_process_chunk(vcm->domain, 0, va,
+ chunk_size, 0, 0);
+ if (ret != 0) {
+ vcm_err("vcm_unback_chunk(%p, %p, 0x%x)"
+ " ret %i",
+ (void *) vcm->domain,
+ (void *) va,
+ (int) chunk_size, ret);
+ goto fail;
+ /* TODO handle weird inter-unmap state*/
+ }
+
+ va += chunk_size;
+ /* may to a light unback, depending on the requested
+ * functionality
+ */
+ }
+ break;
+ }
+
+ case VCM_ONE_TO_ONE:
+ break;
+ default:
+ /* this should never happen */
+ goto fail;
+ }
+
+ /* clear the reservation */
+ res->physmem = 0;
+
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return 0;
+fail_eagain:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return -EAGAIN;
+fail:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return -EINVAL;
+}
+
+
+enum memtarget_t vcm_get_memtype_of_res(struct res *res)
+{
+ return VCM_INVALID;
+}
+
+static int vcm_free_max_munch_cont(struct phys_chunk *head)
+{
+ struct phys_chunk *chunk, *tmp;
+
+ if (!head)
+ return -EINVAL;
+
+ list_for_each_entry_safe(chunk, tmp, &head->allocated,
+ allocated) {
+ list_del_init(&chunk->allocated);
+ }
+
+ return 0;
+}
+
+static int vcm_alloc_max_munch_cont(size_t start_addr, size_t len,
+ struct phys_chunk *head)
+{
+ /* this function should always succeed, since it
+ parallels a VCM */
+
+ int i, j;
+
+ if (!head) {
+ vcm_err("head is NULL in continuous map.\n");
+ goto fail;
+ }
+
+ if (start_addr < (int) bootmem_cont) {
+ vcm_err("phys start addr (%p) < base (%p)\n",
+ (void *) start_addr, (void *) bootmem_cont);
+ goto fail;
+ }
+
+ if ((start_addr + len) >= ((size_t) bootmem_cont + cont_sz)) {
+ vcm_err("requested region (%p + %i) > "
+ " available region (%p + %i)",
+ (void *) start_addr, (int) len,
+ (void *) bootmem_cont, cont_sz);
+ goto fail;
+ }
+
+ i = (start_addr - (size_t) bootmem_cont)/SZ_4K;
+
+ for (j = 0; j < ARRAY_SIZE(smmu_map_sizes); ++j) {
+ while (len/smmu_map_sizes[j]) {
+ if (!list_empty(&cont_phys_chunk[i].allocated)) {
+ vcm_err("chunk %i ( addr %p) already mapped\n",
+ i, (void *) (start_addr +
+ (i*smmu_map_sizes[j])));
+ goto fail_free;
+ }
+ list_add_tail(&cont_phys_chunk[i].allocated,
+ &head->allocated);
+ cont_phys_chunk[i].size = smmu_map_sizes[j];
+
+ len -= smmu_map_sizes[j];
+ i += smmu_map_sizes[j]/SZ_4K;
+ }
+ }
+
+ if (len % SZ_4K) {
+ if (!list_empty(&cont_phys_chunk[i].allocated)) {
+ vcm_err("chunk %i (addr %p) already mapped\n",
+ i, (void *) (start_addr + (i*SZ_4K)));
+ goto fail_free;
+ }
+ len -= SZ_4K;
+ list_add_tail(&cont_phys_chunk[i].allocated,
+ &head->allocated);
+
+ i++;
+ }
+
+ return i;
+
+fail_free:
+ {
+ struct phys_chunk *chunk, *tmp;
+ /* just remove from list, if we're double alloc'ing
+ we don't want to stamp on the other guy */
+ list_for_each_entry_safe(chunk, tmp, &head->allocated,
+ allocated) {
+ list_del(&chunk->allocated);
+ }
+ }
+fail:
+ return 0;
+}
+
+struct physmem *vcm_phys_alloc(enum memtype_t memtype, size_t len, u32 attr)
+{
+ unsigned long flags;
+ int ret;
+ struct physmem *physmem = NULL;
+ int blocks_allocated;
+
+ spin_lock_irqsave(&vcmlock, flags);
+
+ physmem = kzalloc(sizeof(*physmem), GFP_KERNEL);
+ if (!physmem) {
+ vcm_err("physmem is NULL\n");
+ goto fail;
+ }
+
+ physmem->memtype = memtype;
+ physmem->len = len;
+ physmem->attr = attr;
+
+ INIT_LIST_HEAD(&physmem->alloc_head.allocated);
+
+ if (attr & VCM_PHYS_CONT) {
+ if (!cont_vcm_id) {
+ vcm_err("cont_vcm_id is NULL\n");
+ goto fail2;
+ }
+
+ physmem->is_cont = 1;
+
+ /* TODO: get attributes */
+ physmem->res = __vcm_reserve(cont_vcm_id, len, 0);
+ if (physmem->res == 0) {
+ vcm_err("contiguous space allocation failed\n");
+ goto fail2;
+ }
+
+ /* if we're here we know we have memory, create
+ the shadow physmem links*/
+ blocks_allocated =
+ vcm_alloc_max_munch_cont(
+ physmem->res->dev_addr,
+ len,
+ &physmem->alloc_head);
+
+ if (blocks_allocated == 0) {
+ vcm_err("shadow physmem allocation failed\n");
+ goto fail3;
+ }
+ } else {
+ blocks_allocated = vcm_alloc_max_munch(len, memtype,
+ &physmem->alloc_head);
+ if (blocks_allocated == 0) {
+ vcm_err("physical allocation failed:"
+ " vcm_alloc_max_munch(%i, %p) ret 0\n",
+ len, &physmem->alloc_head);
+ goto fail2;
+ }
+ }
+
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return physmem;
+
+fail3:
+ ret = __vcm_unreserve(physmem->res);
+ if (ret != 0) {
+ vcm_err("vcm_unreserve(%p) ret %i during cleanup",
+ (void *) physmem->res, ret);
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return 0;
+ }
+fail2:
+ kfree(physmem);
+fail:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return 0;
+}
+
+
+int vcm_phys_free(struct physmem *physmem)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&vcmlock, flags);
+
+ if (!physmem) {
+ vcm_err("physmem is NULL\n");
+ goto fail;
+ }
+
+ if (physmem->is_cont) {
+ if (physmem->res == 0) {
+ vcm_err("contiguous reservation is NULL\n");
+ goto fail;
+ }
+
+ ret = vcm_free_max_munch_cont(&physmem->alloc_head);
+ if (ret != 0) {
+ vcm_err("failed to free physical blocks:"
+ " vcm_free_max_munch_cont(%p) ret %i\n",
+ (void *) &physmem->alloc_head, ret);
+ goto fail;
+ }
+
+ ret = __vcm_unreserve(physmem->res);
+ if (ret != 0) {
+ vcm_err("failed to free virtual blocks:"
+ " vcm_unreserve(%p) ret %i\n",
+ (void *) physmem->res, ret);
+ goto fail;
+ }
+
+ } else {
+
+ ret = vcm_alloc_free_blocks(physmem->memtype,
+ &physmem->alloc_head);
+ if (ret != 0) {
+ vcm_err("failed to free physical blocks:"
+ " vcm_alloc_free_blocks(%p) ret %i\n",
+ (void *) &physmem->alloc_head, ret);
+ goto fail;
+ }
+ }
+
+ memset(physmem, 0, sizeof(*physmem));
+
+ kfree(physmem);
+
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return 0;
+
+fail:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return -EINVAL;
+}
+
+
+struct avcm *vcm_assoc(struct vcm *vcm, struct device *dev, u32 attr)
+{
+ unsigned long flags;
+ struct avcm *avcm = NULL;
+
+ spin_lock_irqsave(&vcmlock, flags);
+
+ if (!vcm) {
+ vcm_err("vcm is NULL\n");
+ goto fail;
+ }
+
+ if (!dev) {
+ vcm_err("dev_id is NULL\n");
+ goto fail;
+ }
+
+ if (vcm->type == VCM_EXT_KERNEL && !list_empty(&vcm->assoc_head)) {
+ vcm_err("only one device may be assocoated with a"
+ " VCM_EXT_KERNEL\n");
+ goto fail;
+ }
+
+ avcm = kzalloc(sizeof(*avcm), GFP_KERNEL);
+ if (!avcm) {
+ vcm_err("kzalloc(%i, GFP_KERNEL) ret NULL\n", sizeof(*avcm));
+ goto fail;
+ }
+
+ avcm->dev = dev;
+
+ avcm->vcm = vcm;
+ avcm->attr = attr;
+ avcm->is_active = 0;
+
+ INIT_LIST_HEAD(&avcm->assoc_elm);
+ list_add(&avcm->assoc_elm, &vcm->assoc_head);
+
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return avcm;
+
+fail:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return 0;
+}
+
+
+int vcm_deassoc(struct avcm *avcm)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vcmlock, flags);
+
+ if (!avcm) {
+ vcm_err("avcm is NULL\n");
+ goto fail;
+ }
+
+ if (list_empty(&avcm->assoc_elm)) {
+ vcm_err("nothing to deassociate\n");
+ goto fail;
+ }
+
+ if (avcm->is_active) {
+ vcm_err("association still activated\n");
+ goto fail_busy;
+ }
+
+ list_del(&avcm->assoc_elm);
+
+ memset(avcm, 0, sizeof(*avcm));
+
+ kfree(avcm);
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return 0;
+fail_busy:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return -EBUSY;
+fail:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return -EINVAL;
+}
+
+
+int vcm_set_assoc_attr(struct avcm *avcm, u32 attr)
+{
+ return 0;
+}
+
+
+u32 vcm_get_assoc_attr(struct avcm *avcm)
+{
+ return 0;
+}
+
+
+int vcm_activate(struct avcm *avcm)
+{
+ unsigned long flags;
+ struct vcm *vcm;
+
+ spin_lock_irqsave(&vcmlock, flags);
+
+ if (!avcm) {
+ vcm_err("avcm is NULL\n");
+ goto fail;
+ }
+
+ vcm = avcm->vcm;
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ if (!avcm->dev) {
+ vcm_err("cannot activate without a device\n");
+ goto fail_nodev;
+ }
+
+ if (avcm->is_active) {
+ vcm_err("double activate\n");
+ goto fail_busy;
+ }
+
+ if (vcm->type == VCM_DEVICE) {
+#ifdef CONFIG_SMMU
+ int ret;
+ ret = iommu_attach_device(vcm->domain, avcm->dev);
+ if (ret != 0) {
+ dev_err(avcm->dev, "failed to attach to domain\n");
+ goto fail_dev;
+ }
+#else
+ vcm_err("No SMMU support - cannot activate/deactivate\n");
+ goto fail_nodev;
+#endif
+ }
+
+ avcm->is_active = 1;
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return 0;
+
+#ifdef CONFIG_SMMU
+fail_dev:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return -ENODEV;
+#endif
+fail_busy:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return -EBUSY;
+fail_nodev:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return -ENODEV;
+fail:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return -EINVAL;
+}
+
+
+int vcm_deactivate(struct avcm *avcm)
+{
+ unsigned long flags;
+ struct vcm *vcm;
+
+ spin_lock_irqsave(&vcmlock, flags);
+
+ if (!avcm)
+ goto fail;
+
+ vcm = avcm->vcm;
+ if (!vcm) {
+ vcm_err("NULL vcm\n");
+ goto fail;
+ }
+
+ if (!avcm->dev) {
+ vcm_err("cannot deactivate without a device\n");
+ goto fail;
+ }
+
+ if (!avcm->is_active) {
+ vcm_err("double deactivate\n");
+ goto fail_nobusy;
+ }
+
+ if (vcm->type == VCM_DEVICE) {
+#ifdef CONFIG_SMMU
+ /* TODO, pmem check */
+ iommu_detach_device(vcm->domain, avcm->dev);
+#else
+ vcm_err("No SMMU support - cannot activate/deactivate\n");
+ goto fail;
+#endif
+ }
+
+ avcm->is_active = 0;
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return 0;
+fail_nobusy:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return -ENOENT;
+fail:
+ spin_unlock_irqrestore(&vcmlock, flags);
+ return -EINVAL;
+}
+
+struct bound *vcm_create_bound(struct vcm *vcm, size_t len)
+{
+ return 0;
+}
+
+
+int vcm_free_bound(struct bound *bound)
+{
+ return -EINVAL;
+}
+
+
+struct res *vcm_reserve_from_bound(struct bound *bound, size_t len,
+ u32 attr)
+{
+ return 0;
+}
+
+
+size_t vcm_get_bound_start_addr(struct bound *bound)
+{
+ return 0;
+}
+
+
+size_t vcm_get_bound_len(struct bound *bound)
+{
+ return 0;
+}
+
+
+struct physmem *vcm_map_phys_addr(phys_addr_t phys, size_t len)
+{
+ return 0;
+}
+
+
+size_t vcm_get_next_phys_addr(struct physmem *physmem, phys_addr_t phys,
+ size_t *len)
+{
+ return 0;
+}
+
+
+struct res *vcm_get_res(unsigned long dev_addr, struct vcm *vcm)
+{
+ return 0;
+}
+
+
+size_t vcm_translate(struct device *src_dev, struct vcm *src_vcm,
+ struct vcm *dst_vcm)
+{
+ return 0;
+}
+
+
+size_t vcm_get_phys_num_res(phys_addr_t phys)
+{
+ return 0;
+}
+
+
+struct res *vcm_get_next_phys_res(phys_addr_t phys, struct res *res,
+ size_t *len)
+{
+ return 0;
+}
+
+
+phys_addr_t vcm_get_pgtbl_pa(struct vcm *vcm)
+{
+ return 0;
+}
+
+
+/* No lock needed, smmu_translate has its own lock */
+phys_addr_t vcm_dev_addr_to_phys_addr(struct vcm *vcm, unsigned long dev_addr)
+{
+ if (!vcm)
+ return -EINVAL;
+#ifdef CONFIG_SMMU
+ return iommu_iova_to_phys(vcm->domain, dev_addr);
+#else
+ vcm_err("No support for SMMU - manual translation not supported\n");
+ return -ENODEV;
+#endif
+}
+
+
+/* No lock needed, bootmem_cont never changes after */
+phys_addr_t vcm_get_cont_memtype_pa(enum memtype_t memtype)
+{
+ if (memtype != VCM_MEMTYPE_0) {
+ vcm_err("memtype != VCM_MEMTYPE_0\n");
+ goto fail;
+ }
+
+ if (!bootmem_cont) {
+ vcm_err("bootmem_cont 0\n");
+ goto fail;
+ }
+
+ return (size_t) bootmem_cont;
+fail:
+ return 0;
+}
+
+
+/* No lock needed, constant */
+size_t vcm_get_cont_memtype_len(enum memtype_t memtype)
+{
+ if (memtype != VCM_MEMTYPE_0) {
+ vcm_err("memtype != VCM_MEMTYPE_0\n");
+ return 0;
+ }
+
+ return cont_sz;
+}
+
+int vcm_hook(struct device *dev, vcm_handler handler, void *data)
+{
+#ifdef CONFIG_SMMU
+ vcm_err("No interrupts in IOMMU API\n");
+ return -ENODEV;
+#else
+ vcm_err("No support for SMMU - interrupts not supported\n");
+ return -ENODEV;
+#endif
+}
+
+
+size_t vcm_hw_ver(size_t dev)
+{
+ return 0;
+}
+
+
+static int vcm_cont_phys_chunk_init(void)
+{
+ int i;
+ int cont_pa;
+
+ if (!cont_phys_chunk) {
+ vcm_err("cont_phys_chunk 0\n");
+ goto fail;
+ }
+
+ if (!bootmem_cont) {
+ vcm_err("bootmem_cont 0\n");
+ goto fail;
+ }
+
+ cont_pa = (size_t) bootmem_cont;
+
+ for (i = 0; i < cont_sz/PAGE_SIZE; ++i) {
+ cont_phys_chunk[i].pa = cont_pa; cont_pa += PAGE_SIZE;
+ cont_phys_chunk[i].size = SZ_4K;
+ /* Not part of an allocator-managed pool */
+ cont_phys_chunk[i].pool_idx = -1;
+ INIT_LIST_HEAD(&cont_phys_chunk[i].allocated);
+ }
+
+ return 0;
+
+fail:
+ return -EINVAL;
+}
+
+int vcm_sys_init(struct physmem_region *mem, int n_regions,
+ struct vcm_memtype_map *mt_map, int n_mt,
+ void *cont_pa, unsigned int cont_len)
+{
+ int ret;
+ printk(KERN_INFO "VCM Initialization\n");
+ bootmem_cont = cont_pa;
+ cont_sz = cont_len;
+
+ if (!bootmem_cont) {
+ vcm_err("bootmem_cont is 0\n");
+ ret = -1;
+ goto fail;
+ }
+
+ ret = vcm_setup_tex_classes();
+ if (ret != 0) {
+ printk(KERN_INFO "Could not determine TEX attribute mapping\n");
+ ret = -1;
+ goto fail;
+ }
+
+
+ ret = vcm_alloc_init(mem, n_regions, mt_map, n_mt);
+
+ if (ret != 0) {
+ vcm_err("vcm_alloc_init() ret %i\n", ret);
+ ret = -1;
+ goto fail;
+ }
+
+ cont_phys_chunk = kzalloc(sizeof(*cont_phys_chunk)*(cont_sz/PAGE_SIZE),
+ GFP_KERNEL);
+ if (!cont_phys_chunk) {
+ vcm_err("kzalloc(%lu, GFP_KERNEL) ret 0",
+ sizeof(*cont_phys_chunk)*(cont_sz/PAGE_SIZE));
+ goto fail_free;
+ }
+
+ /* the address and size will hit our special case unless we
+ pass an override */
+ cont_vcm_id = vcm_create_flagged(0, (size_t)bootmem_cont, cont_sz);
+ if (cont_vcm_id == 0) {
+ vcm_err("vcm_create_flagged(0, %p, %i) ret 0\n",
+ bootmem_cont, cont_sz);
+ ret = -1;
+ goto fail_free2;
+ }
+
+ ret = vcm_cont_phys_chunk_init();
+ if (ret != 0) {
+ vcm_err("vcm_cont_phys_chunk_init() ret %i\n", ret);
+ goto fail_free3;
+ }
+
+ printk(KERN_INFO "VCM Initialization OK\n");
+ return 0;
+
+fail_free3:
+ ret = __vcm_free(cont_vcm_id);
+ if (ret != 0) {
+ vcm_err("vcm_free(%p) ret %i during failure path\n",
+ (void *) cont_vcm_id, ret);
+ return ret;
+ }
+
+fail_free2:
+ kfree(cont_phys_chunk);
+ cont_phys_chunk = 0;
+
+fail_free:
+ ret = vcm_alloc_destroy();
+ if (ret != 0)
+ vcm_err("vcm_alloc_destroy() ret %i during failure path\n",
+ ret);
+
+ ret = -EINVAL;
+fail:
+ return ret;
+}
+
+
+int vcm_sys_destroy(void)
+{
+ int ret = 0;
+
+ if (!cont_phys_chunk) {
+ vcm_err("cont_phys_chunk is 0\n");
+ return -ENODEV;
+ }
+
+ if (!cont_vcm_id) {
+ vcm_err("cont_vcm_id is 0\n");
+ return -ENODEV;
+ }
+
+ ret = __vcm_free(cont_vcm_id);
+ if (ret != 0) {
+ vcm_err("vcm_free(%p) ret %i\n", (void *) cont_vcm_id, ret);
+ return -ENODEV;
+ }
+
+ cont_vcm_id = 0;
+
+ kfree(cont_phys_chunk);
+ cont_phys_chunk = 0;
+
+ ret = vcm_alloc_destroy();
+ if (ret != 0) {
+ vcm_err("vcm_alloc_destroy() ret %i\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Zach Pfeffer <zpfeffer@codeaurora.org>");
diff --git a/arch/arm/mm/vcm_alloc.c b/arch/arm/mm/vcm_alloc.c
new file mode 100644
index 0000000..5f3c024
--- /dev/null
+++ b/arch/arm/mm/vcm_alloc.c
@@ -0,0 +1,557 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/vcm.h>
+#include <linux/vcm_alloc.h>
+#include <linux/string.h>
+#include <asm/sizes.h>
+
+int basicalloc_init;
+
+#define vcm_alloc_err(a, ...) \
+ pr_err("ERROR %s %i " a, __func__, __LINE__, ##__VA_ARGS__)
+
+struct phys_chunk_head {
+ struct list_head head;
+ int num;
+};
+
+struct phys_pool {
+ int size;
+ int chunk_size;
+ struct phys_chunk_head head;
+};
+
+static int vcm_num_phys_pools;
+static int vcm_num_memtypes;
+static struct phys_pool *vcm_phys_pool;
+static struct vcm_memtype_map *memtype_map;
+
+static int num_pools(enum memtype_t memtype)
+{
+ if (memtype >= vcm_num_memtypes) {
+ vcm_alloc_err("Bad memtype: %d\n", memtype);
+ return -EINVAL;
+ }
+ return memtype_map[memtype].num_pools;
+}
+
+static int pool_chunk_size(enum memtype_t memtype, int prio_idx)
+{
+ int pool_idx;
+ if (memtype >= vcm_num_memtypes) {
+ vcm_alloc_err("Bad memtype: %d\n", memtype);
+ return -EINVAL;
+ }
+
+ if (prio_idx >= num_pools(memtype)) {
+ vcm_alloc_err("Bad prio index: %d, max=%d, mt=%d\n", prio_idx,
+ num_pools(memtype), memtype);
+ return -EINVAL;
+ }
+
+ pool_idx = memtype_map[memtype].pool_id[prio_idx];
+ return vcm_phys_pool[pool_idx].chunk_size;
+}
+
+int vcm_alloc_pool_idx_to_size(int pool_idx)
+{
+ if (pool_idx >= vcm_num_phys_pools) {
+ vcm_alloc_err("Bad pool index: %d\n, max=%d\n", pool_idx,
+ vcm_num_phys_pools);
+ return -EINVAL;
+ }
+ return vcm_phys_pool[pool_idx].chunk_size;
+}
+
+static struct phys_chunk_head *get_chunk_list(enum memtype_t memtype,
+ int prio_idx)
+{
+ unsigned int pool_idx;
+
+ if (memtype >= vcm_num_memtypes) {
+ vcm_alloc_err("Bad memtype: %d\n", memtype);
+ return NULL;
+ }
+
+ if (prio_idx >= num_pools(memtype)) {
+ vcm_alloc_err("bad chunk size: mt=%d, prioidx=%d, np=%d\n",
+ memtype, prio_idx, num_pools(memtype));
+ BUG();
+ return NULL;
+ }
+
+ if (!vcm_phys_pool) {
+ vcm_alloc_err("phys_pool is null\n");
+ return NULL;
+ }
+
+ /* We don't have a "pool count" anywhere but this is coming
+ * strictly from data in a board file
+ */
+ pool_idx = memtype_map[memtype].pool_id[prio_idx];
+
+ return &vcm_phys_pool[pool_idx].head;
+}
+
+static int is_allocated(struct list_head *allocated)
+{
+ /* This should not happen under normal conditions */
+ if (!allocated) {
+ vcm_alloc_err("no allocated\n");
+ return 0;
+ }
+
+ if (!basicalloc_init) {
+ vcm_alloc_err("no basicalloc_init\n");
+ return 0;
+ }
+ return !list_empty(allocated);
+}
+
+static int count_allocated_size(enum memtype_t memtype, int idx)
+{
+ int cnt = 0;
+ struct phys_chunk *chunk, *tmp;
+ struct phys_chunk_head *pch;
+
+ if (!basicalloc_init) {
+ vcm_alloc_err("no basicalloc_init\n");
+ return 0;
+ }
+
+ pch = get_chunk_list(memtype, idx);
+ if (!pch) {
+ vcm_alloc_err("null pch\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(chunk, tmp, &pch->head, list) {
+ if (is_allocated(&chunk->allocated))
+ cnt++;
+ }
+
+ return cnt;
+}
+
+
+int vcm_alloc_get_mem_size(void)
+{
+ if (!vcm_phys_pool) {
+ vcm_alloc_err("No physical pool set up!\n");
+ return -ENODEV;
+ }
+ return vcm_phys_pool[0].size;
+}
+EXPORT_SYMBOL(vcm_alloc_get_mem_size);
+
+void vcm_alloc_print_list(enum memtype_t memtype, int just_allocated)
+{
+ int i;
+ struct phys_chunk *chunk, *tmp;
+ struct phys_chunk_head *pch;
+
+ if (!basicalloc_init) {
+ vcm_alloc_err("no basicalloc_init\n");
+ return;
+ }
+
+ for (i = 0; i < num_pools(memtype); ++i) {
+ pch = get_chunk_list(memtype, i);
+
+ if (!pch) {
+ vcm_alloc_err("pch is null\n");
+ return;
+ }
+
+ if (list_empty(&pch->head))
+ continue;
+
+ list_for_each_entry_safe(chunk, tmp, &pch->head, list) {
+ if (just_allocated && !is_allocated(&chunk->allocated))
+ continue;
+
+ printk(KERN_INFO "pa = %#x, size = %#x\n",
+ chunk->pa, vcm_phys_pool[chunk->pool_idx].chunk_size);
+ }
+ }
+}
+EXPORT_SYMBOL(vcm_alloc_print_list);
+
+int vcm_alloc_blocks_avail(enum memtype_t memtype, int idx)
+{
+ struct phys_chunk_head *pch;
+ if (!basicalloc_init) {
+ vcm_alloc_err("no basicalloc_init\n");
+ return 0;
+ }
+ pch = get_chunk_list(memtype, idx);
+
+ if (!pch) {
+ vcm_alloc_err("pch is null\n");
+ return 0;
+ }
+ return pch->num;
+}
+EXPORT_SYMBOL(vcm_alloc_blocks_avail);
+
+
+int vcm_alloc_get_num_chunks(enum memtype_t memtype)
+{
+ return num_pools(memtype);
+}
+EXPORT_SYMBOL(vcm_alloc_get_num_chunks);
+
+
+int vcm_alloc_all_blocks_avail(enum memtarget_t memtype)
+{
+ int i;
+ int cnt = 0;
+
+ if (!basicalloc_init) {
+ vcm_alloc_err("no basicalloc_init\n");
+ return 0;
+ }
+
+ for (i = 0; i < num_pools(memtype); ++i)
+ cnt += vcm_alloc_blocks_avail(memtype, i);
+ return cnt;
+}
+EXPORT_SYMBOL(vcm_alloc_all_blocks_avail);
+
+
+int vcm_alloc_count_allocated(enum memtype_t memtype)
+{
+ int i;
+ int cnt = 0;
+
+ if (!basicalloc_init) {
+ vcm_alloc_err("no basicalloc_init\n");
+ return 0;
+ }
+
+ for (i = 0; i < num_pools(memtype); ++i)
+ cnt += count_allocated_size(memtype, i);
+ return cnt;
+}
+EXPORT_SYMBOL(vcm_alloc_count_allocated);
+
+int vcm_alloc_destroy(void)
+{
+ int i, mt;
+ struct phys_chunk *chunk, *tmp;
+
+ if (!basicalloc_init) {
+ vcm_alloc_err("no basicalloc_init\n");
+ return -ENODEV;
+ }
+
+ /* can't destroy a space that has allocations */
+ for (mt = 0; mt < vcm_num_memtypes; mt++)
+ if (vcm_alloc_count_allocated(mt)) {
+ vcm_alloc_err("allocations still present\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < vcm_num_phys_pools; i++) {
+ struct phys_chunk_head *pch = &vcm_phys_pool[i].head;
+
+ if (list_empty(&pch->head))
+ continue;
+ list_for_each_entry_safe(chunk, tmp, &pch->head, list) {
+ list_del(&chunk->list);
+ memset(chunk, 0, sizeof(*chunk));
+ kfree(chunk);
+ }
+ vcm_phys_pool[i].head.num = 0;
+ }
+
+ kfree(vcm_phys_pool);
+ kfree(memtype_map);
+
+ vcm_phys_pool = NULL;
+ memtype_map = NULL;
+ basicalloc_init = 0;
+ vcm_num_phys_pools = 0;
+ return 0;
+}
+EXPORT_SYMBOL(vcm_alloc_destroy);
+
+
+int vcm_alloc_init(struct physmem_region *mem, int n_regions,
+ struct vcm_memtype_map *mt_map, int n_mt)
+{
+ int i = 0, j = 0, r = 0, num_chunks;
+ struct phys_chunk *chunk;
+ struct phys_chunk_head *pch = NULL;
+ unsigned long pa;
+
+ /* no double inits */
+ if (basicalloc_init) {
+ vcm_alloc_err("double basicalloc_init\n");
+ BUG();
+ goto fail;
+ }
+ memtype_map = kzalloc(sizeof(*mt_map) * n_mt, GFP_KERNEL);
+ if (!memtype_map) {
+ vcm_alloc_err("Could not copy memtype map\n");
+ goto fail;
+ }
+ memcpy(memtype_map, mt_map, sizeof(*mt_map) * n_mt);
+
+ vcm_phys_pool = kzalloc(sizeof(*vcm_phys_pool) * n_regions, GFP_KERNEL);
+ vcm_num_phys_pools = n_regions;
+ vcm_num_memtypes = n_mt;
+
+ if (!vcm_phys_pool) {
+ vcm_alloc_err("Could not allocate physical pool structure\n");
+ goto fail;
+ }
+
+ /* separate out to ensure good cleanup */
+ for (i = 0; i < n_regions; i++) {
+ pch = &vcm_phys_pool[i].head;
+ INIT_LIST_HEAD(&pch->head);
+ pch->num = 0;
+ }
+
+ for (r = 0; r < n_regions; r++) {
+ pa = mem[r].addr;
+ vcm_phys_pool[r].size = mem[r].size;
+ vcm_phys_pool[r].chunk_size = mem[r].chunk_size;
+ pch = &vcm_phys_pool[r].head;
+
+ num_chunks = mem[r].size / mem[r].chunk_size;
+
+ printk(KERN_INFO "VCM Init: region %d, chunk size=%d, "
+ "num=%d, pa=%p\n", r, mem[r].chunk_size, num_chunks,
+ (void *)pa);
+
+ for (j = 0; j < num_chunks; ++j) {
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (!chunk) {
+ vcm_alloc_err("null chunk\n");
+ goto fail;
+ }
+ chunk->pa = pa;
+ chunk->size = mem[r].chunk_size;
+ pa += mem[r].chunk_size;
+ chunk->pool_idx = r;
+ INIT_LIST_HEAD(&chunk->allocated);
+ list_add_tail(&chunk->list, &pch->head);
+ pch->num++;
+ }
+ }
+
+ basicalloc_init = 1;
+ return 0;
+fail:
+ vcm_alloc_destroy();
+ return -EINVAL;
+}
+EXPORT_SYMBOL(vcm_alloc_init);
+
+
+int vcm_alloc_free_blocks(enum memtype_t memtype, struct phys_chunk *alloc_head)
+{
+ struct phys_chunk *chunk, *tmp;
+ struct phys_chunk_head *pch = NULL;
+
+ if (!basicalloc_init) {
+ vcm_alloc_err("no basicalloc_init\n");
+ goto fail;
+ }
+
+ if (!alloc_head) {
+ vcm_alloc_err("no alloc_head\n");
+ goto fail;
+ }
+
+ list_for_each_entry_safe(chunk, tmp, &alloc_head->allocated,
+ allocated) {
+ list_del_init(&chunk->allocated);
+ pch = &vcm_phys_pool[chunk->pool_idx].head;
+
+ if (!pch) {
+ vcm_alloc_err("null pch\n");
+ goto fail;
+ }
+ pch->num++;
+ }
+
+ return 0;
+fail:
+ return -ENODEV;
+}
+EXPORT_SYMBOL(vcm_alloc_free_blocks);
+
+
+int vcm_alloc_num_blocks(int num, enum memtype_t memtype, int idx,
+ struct phys_chunk *alloc_head)
+{
+ struct phys_chunk *chunk;
+ struct phys_chunk_head *pch = NULL;
+ int num_allocated = 0;
+
+ if (!basicalloc_init) {
+ vcm_alloc_err("no basicalloc_init\n");
+ goto fail;
+ }
+
+ if (!alloc_head) {
+ vcm_alloc_err("no alloc_head\n");
+ goto fail;
+ }
+
+ pch = get_chunk_list(memtype, idx);
+
+ if (!pch) {
+ vcm_alloc_err("null pch\n");
+ goto fail;
+ }
+ if (list_empty(&pch->head)) {
+ vcm_alloc_err("list is empty\n");
+ goto fail;
+ }
+
+ if (vcm_alloc_blocks_avail(memtype, idx) < num) {
+ vcm_alloc_err("not enough blocks? num=%d\n", num);
+ goto fail;
+ }
+
+ list_for_each_entry(chunk, &pch->head, list) {
+ if (num_allocated == num)
+ break;
+ if (is_allocated(&chunk->allocated))
+ continue;
+
+ list_add_tail(&chunk->allocated, &alloc_head->allocated);
+ pch->num--;
+ num_allocated++;
+ }
+ return num_allocated;
+fail:
+ return 0;
+}
+EXPORT_SYMBOL(vcm_alloc_num_blocks);
+
+
+int vcm_alloc_max_munch(int len, enum memtype_t memtype,
+ struct phys_chunk *alloc_head)
+{
+ int i;
+
+ int blocks_req = 0;
+ int block_residual = 0;
+ int blocks_allocated = 0;
+ int cur_chunk_size = 0;
+ int ba = 0;
+
+ if (!basicalloc_init) {
+ vcm_alloc_err("basicalloc_init is 0\n");
+ goto fail;
+ }
+
+ if (!alloc_head) {
+ vcm_alloc_err("alloc_head is NULL\n");
+ goto fail;
+ }
+
+ if (num_pools(memtype) <= 0) {
+ vcm_alloc_err("Memtype %d has improper mempool configuration\n",
+ memtype);
+ goto fail;
+ }
+
+ for (i = 0; i < num_pools(memtype); ++i) {
+ cur_chunk_size = pool_chunk_size(memtype, i);
+ if (cur_chunk_size <= 0) {
+ vcm_alloc_err("Bad chunk size: %d\n", cur_chunk_size);
+ goto fail;
+ }
+
+ blocks_req = len / cur_chunk_size;
+ block_residual = len % cur_chunk_size;
+
+ len = block_residual; /* len left */
+ if (blocks_req) {
+ int blocks_available = 0;
+ int blocks_diff = 0;
+ int bytes_diff = 0;
+
+ blocks_available = vcm_alloc_blocks_avail(memtype, i);
+ if (blocks_available < blocks_req) {
+ blocks_diff =
+ (blocks_req - blocks_available);
+ bytes_diff =
+ blocks_diff * cur_chunk_size;
+
+ /* add back in the rest */
+ len += bytes_diff;
+ } else {
+ /* got all the blocks I need */
+ blocks_available =
+ (blocks_available > blocks_req)
+ ? blocks_req : blocks_available;
+ }
+
+ ba = vcm_alloc_num_blocks(blocks_available, memtype, i,
+ alloc_head);
+
+ if (ba != blocks_available) {
+ vcm_alloc_err("blocks allocated (%i) !="
+ " blocks_available (%i):"
+ " chunk size = %#x,"
+ " alloc_head = %p\n",
+ ba, blocks_available,
+ i, (void *) alloc_head);
+ goto fail;
+ }
+ blocks_allocated += blocks_available;
+ }
+ }
+
+ if (len) {
+ int blocks_available = 0;
+ int last_sz = num_pools(memtype) - 1;
+ blocks_available = vcm_alloc_blocks_avail(memtype, last_sz);
+
+ if (blocks_available > 0) {
+ ba = vcm_alloc_num_blocks(1, memtype, last_sz,
+ alloc_head);
+ if (ba != 1) {
+ vcm_alloc_err("blocks allocated (%i) !="
+ " blocks_available (%i):"
+ " chunk size = %#x,"
+ " alloc_head = %p\n",
+ ba, 1,
+ last_sz,
+ (void *) alloc_head);
+ goto fail;
+ }
+ blocks_allocated += 1;
+ } else {
+ vcm_alloc_err("blocks_available (%#x) <= 1\n",
+ blocks_available);
+ goto fail;
+ }
+ }
+
+ return blocks_allocated;
+fail:
+ vcm_alloc_free_blocks(memtype, alloc_head);
+ return 0;
+}
+EXPORT_SYMBOL(vcm_alloc_max_munch);
diff --git a/arch/arm/mm/vcm_mm.c b/arch/arm/mm/vcm_mm.c
new file mode 100644
index 0000000..dee51fa
--- /dev/null
+++ b/arch/arm/mm/vcm_mm.c
@@ -0,0 +1,253 @@
+/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Architecture-specific VCM functions */
+
+#include <linux/kernel.h>
+#include <linux/vcm_mm.h>
+
+#include <asm/pgtable-hwdef.h>
+#include <asm/tlbflush.h>
+
+#define MRC(reg, processor, op1, crn, crm, op2) \
+__asm__ __volatile__ ( \
+" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 " \n" \
+: "=r" (reg))
+
+#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+
+/* Local type attributes (not the same as VCM) */
+#define ARM_MT_NORMAL 2
+#define ARM_MT_STRONGLYORDERED 0
+#define ARM_MT_DEVICE 1
+
+#define ARM_CP_NONCACHED 0
+#define ARM_CP_WB_WA 1
+#define ARM_CP_WB_NWA 3
+#define ARM_CP_WT_NWA 2
+
+#define smmu_err(a, ...) \
+ pr_err("ERROR %s %i " a, __func__, __LINE__, ##__VA_ARGS__)
+
+#define FL_OFFSET(va) (((va) & 0xFFF00000) >> 20)
+#define SL_OFFSET(va) (((va) & 0xFF000) >> 12)
+
+int vcm_driver_tex_class[4];
+
+static int find_tex_class(int icp, int ocp, int mt, int nos)
+{
+ int i = 0;
+ unsigned int prrr = 0;
+ unsigned int nmrr = 0;
+ int c_icp, c_ocp, c_mt, c_nos;
+
+ RCP15_PRRR(prrr);
+ RCP15_NMRR(nmrr);
+
+ /* There are only 8 classes on this architecture */
+ /* If they add more classes, registers will VASTLY change */
+ for (i = 0; i < 8; i++) {
+ c_nos = prrr & (1 << (i + 24)) ? 1 : 0;
+ c_mt = (prrr & (3 << (i * 2))) >> (i * 2);
+ c_icp = (nmrr & (3 << (i * 2))) >> (i * 2);
+ c_ocp = (nmrr & (3 << (i * 2 + 16))) >> (i * 2 + 16);
+
+ if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
+ return i;
+ }
+ smmu_err("Could not find TEX class for ICP=%d, OCP=%d, MT=%d, NOS=%d\n",
+ icp, ocp, mt, nos);
+
+ /* In reality, we may want to remove this panic. Some classes just */
+ /* will not be available, and will fail in smmu_set_attr */
+ panic("SMMU: Could not determine TEX attribute mapping.\n");
+ return -1;
+}
+
+
+int vcm_setup_tex_classes(void)
+{
+ unsigned int cpu_prrr;
+ unsigned int cpu_nmrr;
+
+ if (!(get_cr() & CR_TRE)) /* No TRE? */
+ panic("TEX remap not enabled, but the SMMU driver needs it!\n");
+
+ RCP15_PRRR(cpu_prrr);
+ RCP15_NMRR(cpu_nmrr);
+
+ vcm_driver_tex_class[VCM_DEV_ATTR_NONCACHED] =
+ find_tex_class(ARM_CP_NONCACHED, ARM_CP_NONCACHED,
+ ARM_MT_NORMAL, 1);
+
+ vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WB_WA] =
+ find_tex_class(ARM_CP_WB_WA, ARM_CP_WB_WA,
+ ARM_MT_NORMAL, 1);
+
+ vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WB_NWA] =
+ find_tex_class(ARM_CP_WB_NWA, ARM_CP_WB_NWA,
+ ARM_MT_NORMAL, 1);
+
+ vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WT] =
+ find_tex_class(ARM_CP_WT_NWA, ARM_CP_WT_NWA,
+ ARM_MT_NORMAL, 1);
+#ifdef DEBUG_TEX
+ printk(KERN_INFO "VCM driver debug: Using TEX classes: %d %d %d %d\n",
+ vcm_driver_tex_class[VCM_DEV_ATTR_NONCACHED],
+ vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WB_WA],
+ vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WB_NWA],
+ vcm_driver_tex_class[VCM_DEV_ATTR_CACHED_WT]);
+#endif
+ return 0;
+}
+
+
+int set_arm7_pte_attr(unsigned long pt_base, unsigned long va,
+ unsigned long len, unsigned int attr)
+{
+ unsigned long *fl_table = NULL;
+ unsigned long *fl_pte = NULL;
+ unsigned long fl_offset = 0;
+ unsigned long *sl_table = NULL;
+ unsigned long *sl_pte = NULL;
+ unsigned long sl_offset = 0;
+ int i;
+ int sh = 0;
+ int class = 0;
+
+ /* Alignment */
+ if (va & (len-1)) {
+ smmu_err("misaligned va: %p\n", (void *) va);
+ goto fail;
+ }
+ if (attr > 7) {
+ smmu_err("bad attribute: %d\n", attr);
+ goto fail;
+ }
+
+ sh = (attr & VCM_DEV_ATTR_SH) ? 1 : 0;
+ class = vcm_driver_tex_class[attr & 0x03];
+
+ if (class > 7 || class < 0) { /* Bad class */
+ smmu_err("bad tex class: %d\n", class);
+ goto fail;
+ }
+
+ if (len != SZ_16M && len != SZ_1M &&
+ len != SZ_64K && len != SZ_4K) {
+ smmu_err("bad size: %lu\n", len);
+ goto fail;
+ }
+
+ fl_table = (unsigned long *) pt_base;
+
+ if (!fl_table) {
+ smmu_err("null page table\n");
+ goto fail;
+ }
+
+ fl_offset = FL_OFFSET(va); /* Upper 12 bits */
+ fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
+
+ if (*fl_pte == 0) { /* Nothing there! */
+ smmu_err("first level pte is 0\n");
+ goto fail;
+ }
+
+ /* Supersection attributes */
+ if (len == SZ_16M) {
+ for (i = 0; i < 16; i++) {
+ /* Clear the old bits */
+ *(fl_pte+i) &= ~(PMD_SECT_S | PMD_SECT_CACHEABLE |
+ PMD_SECT_BUFFERABLE | PMD_SECT_TEX(1));
+
+ /* Assign new class and S bit */
+ *(fl_pte+i) |= sh ? PMD_SECT_S : 0;
+ *(fl_pte+i) |= class & 0x01 ? PMD_SECT_BUFFERABLE : 0;
+ *(fl_pte+i) |= class & 0x02 ? PMD_SECT_CACHEABLE : 0;
+ *(fl_pte+i) |= class & 0x04 ? PMD_SECT_TEX(1) : 0;
+ }
+ } else if (len == SZ_1M) {
+
+ /* Clear the old bits */
+ *(fl_pte) &= ~(PMD_SECT_S | PMD_SECT_CACHEABLE |
+ PMD_SECT_BUFFERABLE | PMD_SECT_TEX(1));
+
+ /* Assign new class and S bit */
+ *(fl_pte) |= sh ? PMD_SECT_S : 0;
+ *(fl_pte) |= class & 0x01 ? PMD_SECT_BUFFERABLE : 0;
+ *(fl_pte) |= class & 0x02 ? PMD_SECT_CACHEABLE : 0;
+ *(fl_pte) |= class & 0x04 ? PMD_SECT_TEX(1) : 0;
+ }
+
+ sl_table = (unsigned long *) __va(((*fl_pte) & 0xFFFFFC00));
+ sl_offset = SL_OFFSET(va);
+ sl_pte = sl_table + sl_offset;
+
+ if (len == SZ_64K) {
+ for (i = 0; i < 16; i++) {
+ /* Clear the old bits */
+ *(sl_pte+i) &= ~(PTE_EXT_SHARED | PTE_CACHEABLE |
+ PTE_BUFFERABLE | PTE_EXT_TEX(1));
+
+ /* Assign new class and S bit */
+ *(sl_pte+i) |= sh ? PTE_EXT_SHARED : 0;
+ *(sl_pte+i) |= class & 0x01 ? PTE_BUFFERABLE : 0;
+ *(sl_pte+i) |= class & 0x02 ? PTE_CACHEABLE : 0;
+ *(sl_pte+i) |= class & 0x04 ? PTE_EXT_TEX(1) : 0;
+ }
+ } else if (len == SZ_4K) {
+ /* Clear the old bits */
+ *(sl_pte) &= ~(PTE_EXT_SHARED | PTE_CACHEABLE |
+ PTE_BUFFERABLE | PTE_EXT_TEX(1));
+
+ /* Assign new class and S bit */
+ *(sl_pte) |= sh ? PTE_EXT_SHARED : 0;
+ *(sl_pte) |= class & 0x01 ? PTE_BUFFERABLE : 0;
+ *(sl_pte) |= class & 0x02 ? PTE_CACHEABLE : 0;
+ *(sl_pte) |= class & 0x04 ? PTE_EXT_TEX(1) : 0;
+ }
+
+
+ mb();
+ return 0;
+fail:
+ return 1;
+}
+
+
+int cpu_set_attr(unsigned long va, unsigned long len, unsigned int attr)
+{
+ int ret;
+ pgd_t *pgd = init_mm.pgd;
+
+ if (!pgd) {
+ smmu_err("null pgd\n");
+ goto fail;
+ }
+
+ ret = set_arm7_pte_attr((unsigned long)pgd, va, len, attr);
+
+ if (ret != 0) {
+ smmu_err("could not set attribute: \
+ pgd=%p, va=%p, len=%lu, attr=%d\n",
+ (void *) pgd, (void *) va, len, attr);
+ goto fail;
+ }
+ dmb();
+ flush_tlb_all();
+ return 0;
+fail:
+ return -1;
+}