| Matt Fleming | 5d9b4b1 | 2009-12-13 14:38:50 +0000 | [diff] [blame^] | 1 | #ifndef __ASM_SH_PGTABLE_PMD_H |
| 2 | #define __ASM_SH_PGTABLE_PMD_H |
| 3 | |
| 4 | #include <asm-generic/pgtable-nopud.h> |
| 5 | |
| 6 | /* |
| 7 | * Some cores need a 3-level page table layout, for example when using |
| 8 | * 64-bit PTEs and 4K pages. |
| 9 | */ |
| 10 | |
| 11 | #define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */ |
| 12 | |
| 13 | /* PGD bits */ |
| 14 | #define PGDIR_SHIFT 30 |
| 15 | |
| 16 | #define PTRS_PER_PGD 4 |
| 17 | #define USER_PTRS_PER_PGD 2 |
| 18 | |
| 19 | /* PMD bits */ |
| 20 | #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) |
| 21 | #define PMD_SIZE (1UL << PMD_SHIFT) |
| 22 | #define PMD_MASK (~(PMD_SIZE-1)) |
| 23 | |
| 24 | #define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t)) |
| 25 | |
| 26 | #define pmd_ERROR(e) \ |
| 27 | printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) |
| 28 | |
| 29 | typedef struct { unsigned long long pmd; } pmd_t; |
| 30 | #define pmd_val(x) ((x).pmd) |
| 31 | #define __pmd(x) ((pmd_t) { (x) } ) |
| 32 | |
| 33 | static inline unsigned long pud_page_vaddr(pud_t pud) |
| 34 | { |
| 35 | return pud_val(pud); |
| 36 | } |
| 37 | |
| 38 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
| 39 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) |
| 40 | { |
| 41 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); |
| 42 | } |
| 43 | |
| 44 | #define pud_none(x) (!pud_val(x)) |
| 45 | #define pud_present(x) (pud_val(x)) |
| 46 | #define pud_clear(xp) do { set_pud(xp, __pud(0)); } while (0) |
| 47 | #define pud_bad(x) (pud_val(x) & ~PAGE_MASK) |
| 48 | |
| 49 | /* |
| 50 | * (puds are folded into pgds so this doesn't get actually called, |
| 51 | * but the define is needed for a generic inline function.) |
| 52 | */ |
| 53 | #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) |
| 54 | |
| 55 | #endif /* __ASM_SH_PGTABLE_PMD_H */ |