Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle |
| 7 | * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. |
| 8 | */ |
| 9 | #ifndef _ASM_PGTABLE_64_H |
| 10 | #define _ASM_PGTABLE_64_H |
| 11 | |
| 12 | #include <linux/config.h> |
| 13 | #include <linux/linkage.h> |
| 14 | |
| 15 | #include <asm/addrspace.h> |
| 16 | #include <asm/page.h> |
| 17 | #include <asm/cachectl.h> |
| 18 | |
| 19 | /* |
| 20 | * Each address space has 2 4K pages as its page directory, giving 1024 |
| 21 | * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a |
| 22 | * pair of 4K pages, giving 1024 (== PTRS_PER_PMD) 8 byte pointers to |
| 23 | * page tables. Each page table is a single 4K page, giving 512 (== |
| 24 | * PTRS_PER_PTE) 8 byte ptes. Each pgde is initialized to point to |
| 25 | * invalid_pmd_table, each pmde is initialized to point to |
| 26 | * invalid_pte_table, each pte is initialized to 0. When memory is low, |
| 27 | * and a pmd table or a page table allocation fails, empty_bad_pmd_table |
| 28 | * and empty_bad_page_table is returned back to higher layer code, so |
| 29 | * that the failure is recognized later on. Linux does not seem to |
| 30 | * handle these failures very well though. The empty_bad_page_table has |
| 31 | * invalid pte entries in it, to force page faults. |
| 32 | * |
| 33 | * Kernel mappings: kernel mappings are held in the swapper_pg_table. |
| 34 | * The layout is identical to userspace except it's indexed with the |
| 35 | * fault address - VMALLOC_START. |
| 36 | */ |
| 37 | |
| 38 | /* PMD_SHIFT determines the size of the area a second-level page table can map */ |
| 39 | #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) |
| 40 | #define PMD_SIZE (1UL << PMD_SHIFT) |
| 41 | #define PMD_MASK (~(PMD_SIZE-1)) |
| 42 | |
| 43 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ |
| 44 | #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + 1 - 3)) |
| 45 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 46 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 47 | |
| 48 | /* |
| 49 | * For 4kB page size we use a 3 level page tree and a 8kB pmd and pgds which |
| 50 | * permits us mapping 40 bits of virtual address space. |
| 51 | * |
| 52 | * We used to implement 41 bits by having an order 1 pmd level but that seemed |
| 53 | * rather pointless. |
| 54 | * |
| 55 | * For 8kB page size we use a 3 level page tree which permits a total of |
| 56 | * 8TB of address space. Alternatively a 33-bit / 8GB organization using |
| 57 | * two levels would be easy to implement. |
| 58 | * |
| 59 | * For 16kB page size we use a 2 level page tree which permits a total of |
| 60 | * 36 bits of virtual address space. We could add a third leve. but it seems |
| 61 | * like at the moment there's no need for this. |
| 62 | * |
| 63 | * For 64kB page size we use a 2 level page table tree for a total of 42 bits |
| 64 | * of virtual address space. |
| 65 | */ |
| 66 | #ifdef CONFIG_PAGE_SIZE_4KB |
| 67 | #define PGD_ORDER 1 |
| 68 | #define PMD_ORDER 0 |
| 69 | #define PTE_ORDER 0 |
| 70 | #endif |
| 71 | #ifdef CONFIG_PAGE_SIZE_8KB |
| 72 | #define PGD_ORDER 0 |
| 73 | #define PMD_ORDER 0 |
| 74 | #define PTE_ORDER 0 |
| 75 | #endif |
| 76 | #ifdef CONFIG_PAGE_SIZE_16KB |
| 77 | #define PGD_ORDER 0 |
| 78 | #define PMD_ORDER 0 |
| 79 | #define PTE_ORDER 0 |
| 80 | #endif |
| 81 | #ifdef CONFIG_PAGE_SIZE_64KB |
| 82 | #define PGD_ORDER 0 |
| 83 | #define PMD_ORDER 0 |
| 84 | #define PTE_ORDER 0 |
| 85 | #endif |
| 86 | |
| 87 | #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) |
| 88 | #define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t)) |
| 89 | #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) |
| 90 | |
| 91 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
Hugh Dickins | d455a36 | 2005-04-19 13:29:23 -0700 | [diff] [blame] | 92 | #define FIRST_USER_ADDRESS 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
| 94 | #define VMALLOC_START XKSEG |
| 95 | #define VMALLOC_END \ |
| 96 | (VMALLOC_START + PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE) |
| 97 | |
| 98 | #define pte_ERROR(e) \ |
| 99 | printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) |
| 100 | #define pmd_ERROR(e) \ |
| 101 | printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) |
| 102 | #define pgd_ERROR(e) \ |
| 103 | printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) |
| 104 | |
| 105 | extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)]; |
| 106 | extern pte_t empty_bad_page_table[PAGE_SIZE/sizeof(pte_t)]; |
| 107 | extern pmd_t invalid_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)]; |
| 108 | extern pmd_t empty_bad_pmd_table[2*PAGE_SIZE/sizeof(pmd_t)]; |
| 109 | |
| 110 | /* |
| 111 | * Empty pmd entries point to the invalid_pte_table. |
| 112 | */ |
| 113 | static inline int pmd_none(pmd_t pmd) |
| 114 | { |
| 115 | return pmd_val(pmd) == (unsigned long) invalid_pte_table; |
| 116 | } |
| 117 | |
| 118 | #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) |
| 119 | |
| 120 | static inline int pmd_present(pmd_t pmd) |
| 121 | { |
| 122 | return pmd_val(pmd) != (unsigned long) invalid_pte_table; |
| 123 | } |
| 124 | |
| 125 | static inline void pmd_clear(pmd_t *pmdp) |
| 126 | { |
| 127 | pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); |
| 128 | } |
| 129 | |
| 130 | /* |
| 131 | * Empty pgd entries point to the invalid_pmd_table. |
| 132 | */ |
| 133 | static inline int pgd_none(pgd_t pgd) |
| 134 | { |
| 135 | return pgd_val(pgd) == (unsigned long) invalid_pmd_table; |
| 136 | } |
| 137 | |
| 138 | #define pgd_bad(pgd) (pgd_val(pgd) &~ PAGE_MASK) |
| 139 | |
| 140 | static inline int pgd_present(pgd_t pgd) |
| 141 | { |
| 142 | return pgd_val(pgd) != (unsigned long) invalid_pmd_table; |
| 143 | } |
| 144 | |
| 145 | static inline void pgd_clear(pgd_t *pgdp) |
| 146 | { |
| 147 | pgd_val(*pgdp) = ((unsigned long) invalid_pmd_table); |
| 148 | } |
| 149 | |
| 150 | #define pte_page(x) pfn_to_page((unsigned long)((pte_val(x) >> PAGE_SHIFT))) |
| 151 | #ifdef CONFIG_CPU_VR41XX |
| 152 | #define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2))) |
| 153 | #define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot)) |
| 154 | #else |
| 155 | #define pte_pfn(x) ((unsigned long)((x).pte >> PAGE_SHIFT)) |
| 156 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
| 157 | #endif |
| 158 | |
| 159 | #define __pgd_offset(address) pgd_index(address) |
| 160 | #define page_pte(page) page_pte_prot(page, __pgprot(0)) |
| 161 | |
| 162 | /* to find an entry in a kernel page-table-directory */ |
| 163 | #define pgd_offset_k(address) pgd_offset(&init_mm, 0) |
| 164 | |
| 165 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) |
| 166 | |
| 167 | /* to find an entry in a page-table-directory */ |
| 168 | #define pgd_offset(mm,addr) ((mm)->pgd + pgd_index(addr)) |
| 169 | |
| 170 | static inline unsigned long pgd_page(pgd_t pgd) |
| 171 | { |
| 172 | return pgd_val(pgd); |
| 173 | } |
| 174 | |
| 175 | /* Find an entry in the second-level page table.. */ |
| 176 | static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address) |
| 177 | { |
| 178 | return (pmd_t *) pgd_page(*dir) + |
| 179 | ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); |
| 180 | } |
| 181 | |
| 182 | /* Find an entry in the third-level page table.. */ |
| 183 | #define __pte_offset(address) \ |
| 184 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
| 185 | #define pte_offset(dir, address) \ |
| 186 | ((pte_t *) (pmd_page_kernel(*dir)) + __pte_offset(address)) |
| 187 | #define pte_offset_kernel(dir, address) \ |
| 188 | ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(address)) |
| 189 | #define pte_offset_map(dir, address) \ |
| 190 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) |
| 191 | #define pte_offset_map_nested(dir, address) \ |
| 192 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) |
| 193 | #define pte_unmap(pte) ((void)(pte)) |
| 194 | #define pte_unmap_nested(pte) ((void)(pte)) |
| 195 | |
| 196 | /* |
| 197 | * Initialize a new pgd / pmd table with invalid pointers. |
| 198 | */ |
| 199 | extern void pgd_init(unsigned long page); |
| 200 | extern void pmd_init(unsigned long page, unsigned long pagetable); |
| 201 | |
| 202 | /* |
| 203 | * Non-present pages: high 24 bits are offset, next 8 bits type, |
| 204 | * low 32 bits zero. |
| 205 | */ |
| 206 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) |
| 207 | { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; } |
| 208 | |
| 209 | #define __swp_type(x) (((x).val >> 32) & 0xff) |
| 210 | #define __swp_offset(x) ((x).val >> 40) |
| 211 | #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) |
| 212 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 213 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
| 214 | |
| 215 | /* |
| 216 | * Bits 0, 1, 2, 7 and 8 are taken, split up the 32 bits of offset |
| 217 | * into this range: |
| 218 | */ |
| 219 | #define PTE_FILE_MAX_BITS 32 |
| 220 | |
| 221 | #define pte_to_pgoff(_pte) \ |
| 222 | ((((_pte).pte >> 3) & 0x1f ) + (((_pte).pte >> 9) << 6 )) |
| 223 | |
| 224 | #define pgoff_to_pte(off) \ |
| 225 | ((pte_t) { (((off) & 0x1f) << 3) + (((off) >> 6) << 9) + _PAGE_FILE }) |
| 226 | |
| 227 | #endif /* _ASM_PGTABLE_64_H */ |