Jeremy Fitzhardinge | 83a5101 | 2008-01-30 13:32:41 +0100 | [diff] [blame] | 1 | #ifndef _ASM_X86_PAGE_H |
| 2 | #define _ASM_X86_PAGE_H |
| 3 | |
| 4 | #include <linux/const.h> |
| 5 | |
| 6 | /* PAGE_SHIFT determines the page size */ |
| 7 | #define PAGE_SHIFT 12 |
| 8 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
| 9 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
| 10 | |
| 11 | #define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK) |
Ingo Molnar | 3da1bcc | 2008-01-30 13:32:42 +0100 | [diff] [blame] | 12 | #define PTE_MASK PHYSICAL_PAGE_MASK |
Jeremy Fitzhardinge | 83a5101 | 2008-01-30 13:32:41 +0100 | [diff] [blame] | 13 | |
| 14 | #define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT) |
| 15 | #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) |
| 16 | |
| 17 | #define HPAGE_SHIFT PMD_SHIFT |
| 18 | #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) |
| 19 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) |
| 20 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
| 21 | |
| 22 | /* to align the pointer to the (next) page boundary */ |
| 23 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) |
| 24 | |
Ingo Molnar | 6724a1d | 2008-01-30 13:32:43 +0100 | [diff] [blame^] | 25 | #define __PHYSICAL_MASK _AT(phys_addr_t, (_AC(1,ULL) << __PHYSICAL_MASK_SHIFT) - 1) |
Jeremy Fitzhardinge | 83a5101 | 2008-01-30 13:32:41 +0100 | [diff] [blame] | 26 | #define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1) |
| 27 | |
Ingo Molnar | 3da1bcc | 2008-01-30 13:32:42 +0100 | [diff] [blame] | 28 | #ifndef __ASSEMBLY__ |
| 29 | #include <linux/types.h> |
| 30 | #endif |
Jeremy Fitzhardinge | 83a5101 | 2008-01-30 13:32:41 +0100 | [diff] [blame] | 31 | |
| 32 | #ifdef CONFIG_X86_64 |
Ingo Molnar | 3da1bcc | 2008-01-30 13:32:42 +0100 | [diff] [blame] | 33 | #define PAGETABLE_LEVELS 4 |
| 34 | |
Jeremy Fitzhardinge | 83a5101 | 2008-01-30 13:32:41 +0100 | [diff] [blame] | 35 | #define THREAD_ORDER 1 |
| 36 | #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) |
| 37 | #define CURRENT_MASK (~(THREAD_SIZE-1)) |
| 38 | |
| 39 | #define EXCEPTION_STACK_ORDER 0 |
| 40 | #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) |
| 41 | |
| 42 | #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) |
| 43 | #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) |
| 44 | |
| 45 | #define IRQSTACK_ORDER 2 |
| 46 | #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER) |
| 47 | |
| 48 | #define STACKFAULT_STACK 1 |
| 49 | #define DOUBLEFAULT_STACK 2 |
| 50 | #define NMI_STACK 3 |
| 51 | #define DEBUG_STACK 4 |
| 52 | #define MCE_STACK 5 |
| 53 | #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ |
| 54 | |
| 55 | #define __PAGE_OFFSET _AC(0xffff810000000000, UL) |
| 56 | |
| 57 | #define __PHYSICAL_START CONFIG_PHYSICAL_START |
| 58 | #define __KERNEL_ALIGN 0x200000 |
| 59 | |
| 60 | /* |
| 61 | * Make sure kernel is aligned to 2MB address. Catching it at compile |
| 62 | * time is better. Change your config file and compile the kernel |
| 63 | * for a 2MB aligned address (CONFIG_PHYSICAL_START) |
| 64 | */ |
| 65 | #if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0 |
| 66 | #error "CONFIG_PHYSICAL_START must be a multiple of 2MB" |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 67 | #endif |
Jeremy Fitzhardinge | 83a5101 | 2008-01-30 13:32:41 +0100 | [diff] [blame] | 68 | |
| 69 | #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) |
| 70 | #define __START_KERNEL_map _AC(0xffffffff80000000, UL) |
| 71 | |
| 72 | /* See Documentation/x86_64/mm.txt for a description of the memory map. */ |
| 73 | #define __PHYSICAL_MASK_SHIFT 46 |
| 74 | #define __VIRTUAL_MASK_SHIFT 48 |
| 75 | |
| 76 | #define KERNEL_TEXT_SIZE (40*1024*1024) |
| 77 | #define KERNEL_TEXT_START _AC(0xffffffff80000000, UL) |
| 78 | |
Jeremy Fitzhardinge | 345b904 | 2008-01-30 13:32:42 +0100 | [diff] [blame] | 79 | #ifndef __ASSEMBLY__ |
| 80 | void clear_page(void *page); |
| 81 | void copy_page(void *to, void *from); |
Ingo Molnar | ba2b6c5 | 2008-01-30 13:32:42 +0100 | [diff] [blame] | 82 | |
| 83 | /* |
| 84 | * These are used to make use of C type-checking.. |
| 85 | */ |
| 86 | typedef unsigned long pteval_t; |
| 87 | typedef unsigned long pmdval_t; |
| 88 | typedef unsigned long pudval_t; |
| 89 | typedef unsigned long pgdval_t; |
| 90 | typedef unsigned long pgprotval_t; |
| 91 | typedef unsigned long phys_addr_t; |
| 92 | |
| 93 | typedef struct { pteval_t pte; } pte_t; |
| 94 | |
Ingo Molnar | d6e3cf6 | 2008-01-30 13:32:43 +0100 | [diff] [blame] | 95 | #define native_pte_val(x) ((x).pte) |
| 96 | #define native_make_pte(x) ((pte_t) { (x) } ) |
| 97 | |
Jeremy Fitzhardinge | 345b904 | 2008-01-30 13:32:42 +0100 | [diff] [blame] | 98 | #endif /* !__ASSEMBLY__ */ |
| 99 | |
Jeremy Fitzhardinge | 83a5101 | 2008-01-30 13:32:41 +0100 | [diff] [blame] | 100 | #endif /* CONFIG_X86_64 */ |
| 101 | |
| 102 | #ifdef CONFIG_X86_32 |
| 103 | |
| 104 | /* |
| 105 | * This handles the memory map. |
| 106 | * |
| 107 | * A __PAGE_OFFSET of 0xC0000000 means that the kernel has |
| 108 | * a virtual address space of one gigabyte, which limits the |
| 109 | * amount of physical memory you can use to about 950MB. |
| 110 | * |
| 111 | * If you want more physical memory than this then see the CONFIG_HIGHMEM4G |
| 112 | * and CONFIG_HIGHMEM64G options in the kernel configuration. |
| 113 | */ |
| 114 | #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) |
| 115 | |
| 116 | #ifdef CONFIG_X86_PAE |
| 117 | #define __PHYSICAL_MASK_SHIFT 36 |
| 118 | #define __VIRTUAL_MASK_SHIFT 32 |
Jeremy Fitzhardinge | 881d90d | 2008-01-30 13:32:43 +0100 | [diff] [blame] | 119 | #define PAGETABLE_LEVELS 3 |
| 120 | |
| 121 | #ifndef __ASSEMBLY__ |
| 122 | typedef u64 pteval_t; |
| 123 | typedef u64 pmdval_t; |
| 124 | typedef u64 pudval_t; |
| 125 | typedef u64 pgdval_t; |
| 126 | typedef u64 pgprotval_t; |
| 127 | typedef u64 phys_addr_t; |
| 128 | |
| 129 | typedef struct { unsigned long pte_low, pte_high; } pte_t; |
| 130 | |
| 131 | static inline unsigned long long native_pte_val(pte_t pte) |
| 132 | { |
| 133 | return pte.pte_low | ((unsigned long long)pte.pte_high << 32); |
| 134 | } |
| 135 | |
| 136 | static inline pte_t native_make_pte(unsigned long long val) |
| 137 | { |
| 138 | return (pte_t) { .pte_low = val, .pte_high = (val >> 32) } ; |
| 139 | } |
| 140 | |
| 141 | #endif /* __ASSEMBLY__ |
| 142 | */ |
Jeremy Fitzhardinge | 83a5101 | 2008-01-30 13:32:41 +0100 | [diff] [blame] | 143 | #else /* !CONFIG_X86_PAE */ |
| 144 | #define __PHYSICAL_MASK_SHIFT 32 |
| 145 | #define __VIRTUAL_MASK_SHIFT 32 |
Jeremy Fitzhardinge | 881d90d | 2008-01-30 13:32:43 +0100 | [diff] [blame] | 146 | #define PAGETABLE_LEVELS 2 |
| 147 | |
| 148 | #ifndef __ASSEMBLY__ |
| 149 | typedef unsigned long pteval_t; |
| 150 | typedef unsigned long pmdval_t; |
| 151 | typedef unsigned long pudval_t; |
| 152 | typedef unsigned long pgdval_t; |
| 153 | typedef unsigned long pgprotval_t; |
| 154 | typedef unsigned long phys_addr_t; |
| 155 | |
| 156 | typedef struct { pteval_t pte_low; } pte_t; |
| 157 | typedef pte_t boot_pte_t; |
| 158 | |
| 159 | static inline unsigned long native_pte_val(pte_t pte) |
| 160 | { |
| 161 | return pte.pte_low; |
| 162 | } |
| 163 | |
| 164 | static inline pte_t native_make_pte(unsigned long val) |
| 165 | { |
| 166 | return (pte_t) { .pte_low = val }; |
| 167 | } |
| 168 | |
| 169 | #endif /* __ASSEMBLY__ */ |
Jeremy Fitzhardinge | 83a5101 | 2008-01-30 13:32:41 +0100 | [diff] [blame] | 170 | #endif /* CONFIG_X86_PAE */ |
| 171 | |
| 172 | #ifdef CONFIG_HUGETLB_PAGE |
| 173 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
| 174 | #endif |
| 175 | |
Jeremy Fitzhardinge | 345b904 | 2008-01-30 13:32:42 +0100 | [diff] [blame] | 176 | #ifndef __ASSEMBLY__ |
| 177 | #ifdef CONFIG_X86_USE_3DNOW |
| 178 | #include <asm/mmx.h> |
| 179 | |
| 180 | static inline void clear_page(void *page) |
| 181 | { |
| 182 | mmx_clear_page(page); |
| 183 | } |
| 184 | |
| 185 | static inline void copy_page(void *to, void *from) |
| 186 | { |
| 187 | mmx_copy_page(to, from); |
| 188 | } |
| 189 | #else /* !CONFIG_X86_USE_3DNOW */ |
| 190 | #include <linux/string.h> |
| 191 | |
| 192 | static inline void clear_page(void *page) |
| 193 | { |
| 194 | memset(page, 0, PAGE_SIZE); |
| 195 | } |
| 196 | |
| 197 | static inline void copy_page(void *to, void *from) |
| 198 | { |
| 199 | memcpy(to, from, PAGE_SIZE); |
| 200 | } |
| 201 | #endif /* CONFIG_X86_3DNOW */ |
| 202 | #endif /* !__ASSEMBLY__ */ |
| 203 | |
Jeremy Fitzhardinge | 83a5101 | 2008-01-30 13:32:41 +0100 | [diff] [blame] | 204 | #endif /* CONFIG_X86_32 */ |
| 205 | |
| 206 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) |
| 207 | |
| 208 | #define VM_DATA_DEFAULT_FLAGS \ |
| 209 | (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ |
| 210 | VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 211 | |
| 212 | |
Jeremy Fitzhardinge | 345b904 | 2008-01-30 13:32:42 +0100 | [diff] [blame] | 213 | #ifndef __ASSEMBLY__ |
| 214 | struct page; |
| 215 | |
| 216 | static void inline clear_user_page(void *page, unsigned long vaddr, |
| 217 | struct page *pg) |
| 218 | { |
| 219 | clear_page(page); |
| 220 | } |
| 221 | |
| 222 | static void inline copy_user_page(void *to, void *from, unsigned long vaddr, |
| 223 | struct page *topage) |
| 224 | { |
| 225 | copy_page(to, from); |
| 226 | } |
| 227 | |
| 228 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ |
| 229 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) |
| 230 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
| 231 | |
Ingo Molnar | 38f0f12 | 2008-01-30 13:32:43 +0100 | [diff] [blame] | 232 | typedef struct { pgdval_t pgd; } pgd_t; |
| 233 | typedef struct { pgprotval_t pgprot; } pgprot_t; |
| 234 | |
| 235 | static inline pgd_t native_make_pgd(pgdval_t val) |
| 236 | { |
| 237 | return (pgd_t) { val }; |
| 238 | } |
| 239 | |
| 240 | static inline pgdval_t native_pgd_val(pgd_t pgd) |
| 241 | { |
| 242 | return pgd.pgd; |
| 243 | } |
| 244 | |
| 245 | #if PAGETABLE_LEVELS >= 3 |
| 246 | #if PAGETABLE_LEVELS == 4 |
| 247 | typedef struct { pudval_t pud; } pud_t; |
| 248 | |
| 249 | static inline pud_t native_make_pud(pmdval_t val) |
| 250 | { |
| 251 | return (pud_t) { val }; |
| 252 | } |
| 253 | |
| 254 | static inline pudval_t native_pud_val(pud_t pud) |
| 255 | { |
| 256 | return pud.pud; |
| 257 | } |
| 258 | #else /* PAGETABLE_LEVELS == 3 */ |
| 259 | #include <asm-generic/pgtable-nopud.h> |
| 260 | #endif /* PAGETABLE_LEVELS == 4 */ |
| 261 | |
| 262 | typedef struct { pmdval_t pmd; } pmd_t; |
| 263 | |
| 264 | static inline pmd_t native_make_pmd(pmdval_t val) |
| 265 | { |
| 266 | return (pmd_t) { val }; |
| 267 | } |
| 268 | |
| 269 | static inline pmdval_t native_pmd_val(pmd_t pmd) |
| 270 | { |
| 271 | return pmd.pmd; |
| 272 | } |
| 273 | #else /* PAGETABLE_LEVELS == 2 */ |
| 274 | #include <asm-generic/pgtable-nopmd.h> |
| 275 | #endif /* PAGETABLE_LEVELS >= 3 */ |
| 276 | |
| 277 | #define pgprot_val(x) ((x).pgprot) |
| 278 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
| 279 | |
| 280 | #ifdef CONFIG_PARAVIRT |
| 281 | #include <asm/paravirt.h> |
| 282 | #else /* !CONFIG_PARAVIRT */ |
| 283 | |
| 284 | #define pgd_val(x) native_pgd_val(x) |
| 285 | #define __pgd(x) native_make_pgd(x) |
| 286 | |
| 287 | #ifndef __PAGETABLE_PUD_FOLDED |
| 288 | #define pud_val(x) native_pud_val(x) |
| 289 | #define __pud(x) native_make_pud(x) |
| 290 | #endif |
| 291 | |
| 292 | #ifndef __PAGETABLE_PMD_FOLDED |
| 293 | #define pmd_val(x) native_pmd_val(x) |
| 294 | #define __pmd(x) native_make_pmd(x) |
| 295 | #endif |
| 296 | |
| 297 | #define pte_val(x) native_pte_val(x) |
| 298 | #define __pte(x) native_make_pte(x) |
| 299 | |
| 300 | #endif /* CONFIG_PARAVIRT */ |
| 301 | |
Jeremy Fitzhardinge | 345b904 | 2008-01-30 13:32:42 +0100 | [diff] [blame] | 302 | #endif /* __ASSEMBLY__ */ |
| 303 | |
| 304 | |
Jeremy Fitzhardinge | 83a5101 | 2008-01-30 13:32:41 +0100 | [diff] [blame] | 305 | #ifdef CONFIG_X86_32 |
| 306 | # include "page_32.h" |
| 307 | #else |
| 308 | # include "page_64.h" |
| 309 | #endif |
| 310 | |
| 311 | #endif /* _ASM_X86_PAGE_H */ |