powerpc/mm: Split the various pgtable-* headers based on MMU type

This patch moves the definition of the PTE format for each MMU type
to separate files instead of all in one file. This improves overall
maintainability and will make it easier to add new types.

On 64-bit, additionally, I've separated the headers relative to the
format of the page table tree (3 vs. 4 levels for 64K vs 4K pages)
from the headers specific to the PTE format for hash based processors,
this will make it easier to add support for Book3 "E" 64-bit
implementations.

There are still some type-related ifdef's in the generic headers,
we might remove them in the long run, but this patch shouldn't result
in any code change, -hopefully- just definitions being moved around.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index c627877..5420738 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -11,9 +11,9 @@
 #endif /* __ASSEMBLY__ */
 
 #ifdef CONFIG_PPC_64K_PAGES
-#include <asm/pgtable-64k.h>
+#include <asm/pgtable-ppc64-64k.h>
 #else
-#include <asm/pgtable-4k.h>
+#include <asm/pgtable-ppc64-4k.h>
 #endif
 
 #define FIRST_USER_ADDRESS	0
@@ -25,6 +25,8 @@
                 	    PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
 #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
 
+
+/* Some sanity checking */
 #if TASK_SIZE_USER64 > PGTABLE_RANGE
 #error TASK_SIZE_USER64 exceeds pagetable range
 #endif
@@ -33,7 +35,6 @@
 #error TASK_SIZE_USER64 exceeds user VSID range
 #endif
 
-
 /*
  * Define the address range of the vmalloc VM area.
  */
@@ -76,29 +77,26 @@
 
 
 /*
- * Common bits in a linux-style PTE.  These match the bits in the
- * (hardware-defined) PowerPC PTE as closely as possible. Additional
- * bits may be defined in pgtable-*.h
+ * Include the PTE bits definitions
  */
-#define _PAGE_PRESENT	0x0001 /* software: pte contains a translation */
-#define _PAGE_USER	0x0002 /* matches one of the PP bits */
-#define _PAGE_FILE	0x0002 /* (!present only) software: pte holds file offset */
-#define _PAGE_EXEC	0x0004 /* No execute on POWER4 and newer (we invert) */
-#define _PAGE_GUARDED	0x0008
-#define _PAGE_COHERENT	0x0010 /* M: enforce memory coherence (SMP systems) */
-#define _PAGE_NO_CACHE	0x0020 /* I: cache inhibit */
-#define _PAGE_WRITETHRU	0x0040 /* W: cache write-through */
-#define _PAGE_DIRTY	0x0080 /* C: page changed */
-#define _PAGE_ACCESSED	0x0100 /* R: page referenced */
-#define _PAGE_RW	0x0200 /* software: user write access allowed */
-#define _PAGE_BUSY	0x0800 /* software: PTE & hash are busy */
+#include <asm/pte-hash64.h>
 
-/* Strong Access Ordering */
-#define _PAGE_SAO	(_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
+/* To make some generic powerpc code happy */
+#ifndef _PAGE_HWEXEC
+#define _PAGE_HWEXEC		0
+#endif
 
-#define _PAGE_BASE	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
+/* Some other useful definitions */
+#define PTE_RPN_MAX	(1UL << (64 - PTE_RPN_SHIFT))
+#define PTE_RPN_MASK	(~((1UL<<PTE_RPN_SHIFT)-1))
 
-#define _PAGE_WRENABLE	(_PAGE_RW | _PAGE_DIRTY)
+/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
+ * pgprot changes
+ */
+#define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
+                         _PAGE_ACCESSED | _PAGE_SPECIAL)
+
+
 
 /* __pgprot defined in arch/powerpc/include/asm/page.h */
 #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
@@ -117,16 +115,9 @@
 #define PAGE_AGP	__pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
 #define HAVE_PAGE_AGP
 
-#define PAGE_PROT_BITS	(_PAGE_GUARDED | _PAGE_COHERENT | \
-			 _PAGE_NO_CACHE | _PAGE_WRITETHRU |		\
-			 _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER |		\
-			 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
-/* PTEIDX nibble */
-#define _PTEIDX_SECONDARY	0x8
-#define _PTEIDX_GROUP_IX	0x7
+/* We always have _PAGE_SPECIAL on 64 bit */
+#define __HAVE_ARCH_PTE_SPECIAL
 
-/* To make some generic powerpc code happy */
-#define _PAGE_HWEXEC		0
 
 /*
  * POWER4 and newer have per page execute protection, older chips can only
@@ -163,6 +154,38 @@
 #ifndef __ASSEMBLY__
 
 /*
+ * This is the default implementation of various PTE accessors, it's
+ * used in all cases except Book3S with 64K pages where we have a
+ * concept of sub-pages
+ */
+#ifndef __real_pte
+
+#ifdef STRICT_MM_TYPECHECKS
+#define __real_pte(e,p)		((real_pte_t){(e)})
+#define __rpte_to_pte(r)	((r).pte)
+#else
+#define __real_pte(e,p)		(e)
+#define __rpte_to_pte(r)	(__pte(r))
+#endif
+#define __rpte_to_hidx(r,index)	(pte_val(__rpte_to_pte(r)) >> 12)
+
+#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift)       \
+	do {							         \
+		index = 0;					         \
+		shift = mmu_psize_defs[psize].shift;		         \
+
+#define pte_iterate_hashed_end() } while(0)
+
+#ifdef CONFIG_PPC_HAS_HASH_64K
+#define pte_pagesize_index(mm, addr, pte)	get_slice_psize(mm, addr)
+#else
+#define pte_pagesize_index(mm, addr, pte)	MMU_PAGE_4K
+#endif
+
+#endif /* __real_pte */
+
+
+/*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
  *