Fix compilation, and bring 32/64 bit variants more in line.
Signed-off-by: Thiemo Seufer <ths@networkno.de>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h
index 8d66303..9b4d39d 100644
--- a/include/asm-mips/pgtable-32.h
+++ b/include/asm-mips/pgtable-32.h
@@ -43,10 +43,6 @@
* works even with the cache aliasing problem the R4k and above have.
*/
-/* PMD_SHIFT determines the size of the area a second-level page table can map */
-#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#ifdef CONFIG_64BIT_PHYS_ADDR
#define PGDIR_SHIFT 21
@@ -78,7 +74,7 @@
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
-#define VMALLOC_START KSEG2
+#define VMALLOC_START MAP_BASE
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
@@ -146,12 +142,13 @@
#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
#define __pgd_offset(address) pgd_index(address)
+#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
/* to find an entry in a page-table-directory */
#define pgd_offset(mm,addr) ((mm)->pgd + pgd_index(addr))
diff --git a/include/asm-mips/pgtable-64.h b/include/asm-mips/pgtable-64.h
index ac5517f..3500725 100644
--- a/include/asm-mips/pgtable-64.h
+++ b/include/asm-mips/pgtable-64.h
@@ -59,7 +59,7 @@
* two levels would be easy to implement.
*
* For 16kB page size we use a 2 level page tree which permits a total of
- * 36 bits of virtual address space. We could add a third leve. but it seems
+ * 36 bits of virtual address space. We could add a third level but it seems
* like at the moment there's no need for this.
*
* For 64kB page size we use a 2 level page table tree for a total of 42 bits
@@ -97,7 +97,7 @@
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
-#define VMALLOC_START XKSEG
+#define VMALLOC_START MAP_BASE
#define VMALLOC_END \
(VMALLOC_START + PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE)
@@ -134,7 +134,7 @@
}
/*
- * Empty pgd entries point to the invalid_pmd_table.
+ * Empty pud entries point to the invalid_pmd_table.
*/
static inline int pud_none(pud_t pud)
{
@@ -166,12 +166,13 @@
#endif
#define __pgd_offset(address) pgd_index(address)
+#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define page_pte(page) page_pte_prot(page, __pgprot(0))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, 0)
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
/* to find an entry in a page-table-directory */
#define pgd_offset(mm,addr) ((mm)->pgd + pgd_index(addr))