xtensa: nommu support

Add support for !CONFIG_MMU setups.

Signed-off-by: Johannes Weiner <jw@emlix.com>
Signed-off-by: Chris Zankel <chris@zankel.net>
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
index 64e304a..f0b646d 100644
--- a/arch/xtensa/mm/Makefile
+++ b/arch/xtensa/mm/Makefile
@@ -2,4 +2,5 @@
 # Makefile for the Linux/Xtensa-specific parts of the memory manager.
 #
 
-obj-y	 := init.o fault.o tlb.o misc.o cache.o
+obj-y			:= init.o cache.o misc.o
+obj-$(CONFIG_MMU)	+= fault.o mmu.o tlb.o
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 6190988..427e14f 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -24,15 +24,8 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 
-#include <asm/pgtable.h>
 #include <asm/bootparam.h>
-#include <asm/mmu_context.h>
-#include <asm/tlb.h>
 #include <asm/page.h>
-#include <asm/pgalloc.h>
-
-
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
 /* References to section boundaries */
 
@@ -160,7 +153,7 @@
 }
 
 
-void __init paging_init(void)
+void __init zones_init(void)
 {
 	unsigned long zones_size[MAX_NR_ZONES];
 	int i;
@@ -175,43 +168,10 @@
 	zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
 #endif
 
-	/* Initialize the kernel's page tables. */
-
-	memset(swapper_pg_dir, 0, PAGE_SIZE);
-
 	free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
 }
 
 /*
- * Flush the mmu and reset associated register to default values.
- */
-
-void __init init_mmu (void)
-{
-	/* Writing zeros to the <t>TLBCFG special registers ensure
-	 * that valid values exist in the register.  For existing
-	 * PGSZID<w> fields, zero selects the first element of the
-	 * page-size array.  For nonexistent PGSZID<w> fields, zero is
-	 * the best value to write.  Also, when changing PGSZID<w>
-	 * fields, the corresponding TLB must be flushed.
-	 */
-	set_itlbcfg_register (0);
-	set_dtlbcfg_register (0);
-	flush_tlb_all ();
-
-	/* Set rasid register to a known value. */
-
-	set_rasid_register (ASID_USER_FIRST);
-
-	/* Set PTEVADDR special register to the start of the page
-	 * table, which is in kernel mappable space (ie. not
-	 * statically mapped).  This register's value is undefined on
-	 * reset.
-	 */
-	set_ptevaddr_register (PGTABLE_START);
-}
-
-/*
  * Initialize memory pages.
  */
 
@@ -281,23 +241,3 @@
 	printk("Freeing unused kernel memory: %dk freed\n",
 	       (&__init_end - &__init_begin) >> 10);
 }
-
-struct kmem_cache *pgtable_cache __read_mostly;
-
-static void pgd_ctor(void* addr)
-{
-	pte_t* ptep = (pte_t*)addr;
-	int i;
-
-	for (i = 0; i < 1024; i++, ptep++)
-		pte_clear(NULL, 0, ptep);
-
-}
-
-void __init pgtable_cache_init(void)
-{
-	pgtable_cache = kmem_cache_create("pgd",
-			PAGE_SIZE, PAGE_SIZE,
-			SLAB_HWCACHE_ALIGN,
-			pgd_ctor);
-}
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index c885664..b048406 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -84,6 +84,7 @@
 
 	retw
 
+#ifdef CONFIG_MMU
 /*
  * If we have to deal with cache aliasing, we use temporary memory mappings
  * to ensure that the source and destination pages have the same color as
@@ -311,6 +312,7 @@
 /* End of special treatment in tlb miss exception */
 
 ENTRY(__tlbtemp_mapping_end)
+#endif /* CONFIG_MMU
 
 /*
  * void __invalidate_icache_page(ulong start)
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
new file mode 100644
index 0000000..4bb91a9
--- /dev/null
+++ b/arch/xtensa/mm/mmu.c
@@ -0,0 +1,70 @@
+/*
+ * xtensa mmu stuff
+ *
+ * Extracted from init.c
+ */
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/cache.h>
+
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+void __init paging_init(void)
+{
+	memset(swapper_pg_dir, 0, PAGE_SIZE);
+}
+
+/*
+ * Flush the mmu and reset associated register to default values.
+ */
+void __init init_mmu(void)
+{
+	/* Writing zeros to the <t>TLBCFG special registers ensure
+	 * that valid values exist in the register.  For existing
+	 * PGSZID<w> fields, zero selects the first element of the
+	 * page-size array.  For nonexistent PGSZID<w> fields, zero is
+	 * the best value to write.  Also, when changing PGSZID<w>
+	 * fields, the corresponding TLB must be flushed.
+	 */
+	set_itlbcfg_register(0);
+	set_dtlbcfg_register(0);
+	flush_tlb_all();
+
+	/* Set rasid register to a known value. */
+
+	set_rasid_register(ASID_USER_FIRST);
+
+	/* Set PTEVADDR special register to the start of the page
+	 * table, which is in kernel mappable space (ie. not
+	 * statically mapped).  This register's value is undefined on
+	 * reset.
+	 */
+	set_ptevaddr_register(PGTABLE_START);
+}
+
+struct kmem_cache *pgtable_cache __read_mostly;
+
+static void pgd_ctor(void *addr)
+{
+	pte_t *ptep = (pte_t *)addr;
+	int i;
+
+	for (i = 0; i < 1024; i++, ptep++)
+		pte_clear(NULL, 0, ptep);
+
+}
+
+void __init pgtable_cache_init(void)
+{
+	pgtable_cache = kmem_cache_create("pgd",
+			PAGE_SIZE, PAGE_SIZE,
+			SLAB_HWCACHE_ALIGN,
+			pgd_ctor);
+}