blob: f65a2ae6e32347d5a03b841f67a900f5b09431e3 [file] [log] [blame]
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +01001#ifndef _ASM_X86_PAGE_H
2#define _ASM_X86_PAGE_H
3
4#include <linux/const.h>
5
6/* PAGE_SHIFT determines the page size */
7#define PAGE_SHIFT 12
8#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
9#define PAGE_MASK (~(PAGE_SIZE-1))
10
11#define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
Ingo Molnar3da1bcc2008-01-30 13:32:42 +010012#define PTE_MASK PHYSICAL_PAGE_MASK
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +010013
14#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
15#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
16
17#define HPAGE_SHIFT PMD_SHIFT
18#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
19#define HPAGE_MASK (~(HPAGE_SIZE - 1))
20#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
21
22/* to align the pointer to the (next) page boundary */
23#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
24
Ingo Molnar6724a1d2008-01-30 13:32:43 +010025#define __PHYSICAL_MASK _AT(phys_addr_t, (_AC(1,ULL) << __PHYSICAL_MASK_SHIFT) - 1)
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +010026#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
27
Ingo Molnar3da1bcc2008-01-30 13:32:42 +010028#ifndef __ASSEMBLY__
29#include <linux/types.h>
30#endif
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +010031
32#ifdef CONFIG_X86_64
Ingo Molnar3da1bcc2008-01-30 13:32:42 +010033#define PAGETABLE_LEVELS 4
34
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +010035#define THREAD_ORDER 1
36#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
37#define CURRENT_MASK (~(THREAD_SIZE-1))
38
39#define EXCEPTION_STACK_ORDER 0
40#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
41
42#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
43#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
44
45#define IRQSTACK_ORDER 2
46#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
47
48#define STACKFAULT_STACK 1
49#define DOUBLEFAULT_STACK 2
50#define NMI_STACK 3
51#define DEBUG_STACK 4
52#define MCE_STACK 5
53#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
54
55#define __PAGE_OFFSET _AC(0xffff810000000000, UL)
56
57#define __PHYSICAL_START CONFIG_PHYSICAL_START
58#define __KERNEL_ALIGN 0x200000
59
60/*
61 * Make sure kernel is aligned to 2MB address. Catching it at compile
62 * time is better. Change your config file and compile the kernel
63 * for a 2MB aligned address (CONFIG_PHYSICAL_START)
64 */
65#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
66#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
Thomas Gleixner96a388d2007-10-11 11:20:03 +020067#endif
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +010068
69#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
70#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
71
72/* See Documentation/x86_64/mm.txt for a description of the memory map. */
73#define __PHYSICAL_MASK_SHIFT 46
74#define __VIRTUAL_MASK_SHIFT 48
75
76#define KERNEL_TEXT_SIZE (40*1024*1024)
77#define KERNEL_TEXT_START _AC(0xffffffff80000000, UL)
78
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +010079#ifndef __ASSEMBLY__
80void clear_page(void *page);
81void copy_page(void *to, void *from);
Ingo Molnarba2b6c52008-01-30 13:32:42 +010082
Jeremy Fitzhardingee62f4472008-01-30 13:32:44 +010083extern unsigned long end_pfn;
84extern unsigned long end_pfn_map;
85extern unsigned long phys_base;
86
Jeremy Fitzhardinge98fd5ae2008-01-30 13:32:43 +010087extern unsigned long __phys_addr(unsigned long);
88#define __phys_reloc_hide(x) (x)
89
Ingo Molnarba2b6c52008-01-30 13:32:42 +010090/*
91 * These are used to make use of C type-checking..
92 */
93typedef unsigned long pteval_t;
94typedef unsigned long pmdval_t;
95typedef unsigned long pudval_t;
96typedef unsigned long pgdval_t;
97typedef unsigned long pgprotval_t;
98typedef unsigned long phys_addr_t;
99
100typedef struct { pteval_t pte; } pte_t;
101
Ingo Molnard6e3cf62008-01-30 13:32:43 +0100102#define native_pte_val(x) ((x).pte)
103#define native_make_pte(x) ((pte_t) { (x) } )
104
Jeremy Fitzhardingee62f4472008-01-30 13:32:44 +0100105#define vmemmap ((struct page *)VMEMMAP_START)
106
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +0100107#endif /* !__ASSEMBLY__ */
108
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +0100109#endif /* CONFIG_X86_64 */
110
111#ifdef CONFIG_X86_32
112
113/*
114 * This handles the memory map.
115 *
116 * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
117 * a virtual address space of one gigabyte, which limits the
118 * amount of physical memory you can use to about 950MB.
119 *
120 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
121 * and CONFIG_HIGHMEM64G options in the kernel configuration.
122 */
123#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
124
125#ifdef CONFIG_X86_PAE
126#define __PHYSICAL_MASK_SHIFT 36
127#define __VIRTUAL_MASK_SHIFT 32
Jeremy Fitzhardinge881d90d2008-01-30 13:32:43 +0100128#define PAGETABLE_LEVELS 3
129
130#ifndef __ASSEMBLY__
131typedef u64 pteval_t;
132typedef u64 pmdval_t;
133typedef u64 pudval_t;
134typedef u64 pgdval_t;
135typedef u64 pgprotval_t;
136typedef u64 phys_addr_t;
137
138typedef struct { unsigned long pte_low, pte_high; } pte_t;
139
140static inline unsigned long long native_pte_val(pte_t pte)
141{
142 return pte.pte_low | ((unsigned long long)pte.pte_high << 32);
143}
144
145static inline pte_t native_make_pte(unsigned long long val)
146{
147 return (pte_t) { .pte_low = val, .pte_high = (val >> 32) } ;
148}
149
150#endif /* __ASSEMBLY__
151 */
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +0100152#else /* !CONFIG_X86_PAE */
153#define __PHYSICAL_MASK_SHIFT 32
154#define __VIRTUAL_MASK_SHIFT 32
Jeremy Fitzhardinge881d90d2008-01-30 13:32:43 +0100155#define PAGETABLE_LEVELS 2
156
157#ifndef __ASSEMBLY__
158typedef unsigned long pteval_t;
159typedef unsigned long pmdval_t;
160typedef unsigned long pudval_t;
161typedef unsigned long pgdval_t;
162typedef unsigned long pgprotval_t;
163typedef unsigned long phys_addr_t;
164
165typedef struct { pteval_t pte_low; } pte_t;
166typedef pte_t boot_pte_t;
167
168static inline unsigned long native_pte_val(pte_t pte)
169{
170 return pte.pte_low;
171}
172
173static inline pte_t native_make_pte(unsigned long val)
174{
175 return (pte_t) { .pte_low = val };
176}
177
178#endif /* __ASSEMBLY__ */
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +0100179#endif /* CONFIG_X86_PAE */
180
181#ifdef CONFIG_HUGETLB_PAGE
182#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
183#endif
184
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +0100185#ifndef __ASSEMBLY__
Jeremy Fitzhardinge98fd5ae2008-01-30 13:32:43 +0100186#define __phys_addr(x) ((x)-PAGE_OFFSET)
187#define __phys_reloc_hide(x) RELOC_HIDE((x), 0)
188
189#ifdef CONFIG_FLATMEM
190#define pfn_valid(pfn) ((pfn) < max_mapnr)
191#endif /* CONFIG_FLATMEM */
192
Jeremy Fitzhardingee62f4472008-01-30 13:32:44 +0100193extern int nx_enabled;
194
195/*
196 * This much address space is reserved for vmalloc() and iomap()
197 * as well as fixmap mappings.
198 */
199extern unsigned int __VMALLOC_RESERVE;
200extern int sysctl_legacy_va_layout;
201extern int page_is_ram(unsigned long pagenr);
202
203#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
204#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
205
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +0100206#ifdef CONFIG_X86_USE_3DNOW
207#include <asm/mmx.h>
208
209static inline void clear_page(void *page)
210{
211 mmx_clear_page(page);
212}
213
214static inline void copy_page(void *to, void *from)
215{
216 mmx_copy_page(to, from);
217}
218#else /* !CONFIG_X86_USE_3DNOW */
219#include <linux/string.h>
220
221static inline void clear_page(void *page)
222{
223 memset(page, 0, PAGE_SIZE);
224}
225
226static inline void copy_page(void *to, void *from)
227{
228 memcpy(to, from, PAGE_SIZE);
229}
230#endif /* CONFIG_X86_3DNOW */
231#endif /* !__ASSEMBLY__ */
232
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +0100233#endif /* CONFIG_X86_32 */
234
235#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
236
237#define VM_DATA_DEFAULT_FLAGS \
238 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
239 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
240
241
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +0100242#ifndef __ASSEMBLY__
243struct page;
244
245static void inline clear_user_page(void *page, unsigned long vaddr,
246 struct page *pg)
247{
248 clear_page(page);
249}
250
251static void inline copy_user_page(void *to, void *from, unsigned long vaddr,
252 struct page *topage)
253{
254 copy_page(to, from);
255}
256
257#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
258 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
259#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
260
Ingo Molnar38f0f122008-01-30 13:32:43 +0100261typedef struct { pgdval_t pgd; } pgd_t;
262typedef struct { pgprotval_t pgprot; } pgprot_t;
263
264static inline pgd_t native_make_pgd(pgdval_t val)
265{
266 return (pgd_t) { val };
267}
268
269static inline pgdval_t native_pgd_val(pgd_t pgd)
270{
271 return pgd.pgd;
272}
273
274#if PAGETABLE_LEVELS >= 3
275#if PAGETABLE_LEVELS == 4
276typedef struct { pudval_t pud; } pud_t;
277
278static inline pud_t native_make_pud(pmdval_t val)
279{
280 return (pud_t) { val };
281}
282
283static inline pudval_t native_pud_val(pud_t pud)
284{
285 return pud.pud;
286}
287#else /* PAGETABLE_LEVELS == 3 */
288#include <asm-generic/pgtable-nopud.h>
289#endif /* PAGETABLE_LEVELS == 4 */
290
291typedef struct { pmdval_t pmd; } pmd_t;
292
293static inline pmd_t native_make_pmd(pmdval_t val)
294{
295 return (pmd_t) { val };
296}
297
298static inline pmdval_t native_pmd_val(pmd_t pmd)
299{
300 return pmd.pmd;
301}
302#else /* PAGETABLE_LEVELS == 2 */
303#include <asm-generic/pgtable-nopmd.h>
304#endif /* PAGETABLE_LEVELS >= 3 */
305
306#define pgprot_val(x) ((x).pgprot)
307#define __pgprot(x) ((pgprot_t) { (x) } )
308
309#ifdef CONFIG_PARAVIRT
310#include <asm/paravirt.h>
311#else /* !CONFIG_PARAVIRT */
312
313#define pgd_val(x) native_pgd_val(x)
314#define __pgd(x) native_make_pgd(x)
315
316#ifndef __PAGETABLE_PUD_FOLDED
317#define pud_val(x) native_pud_val(x)
318#define __pud(x) native_make_pud(x)
319#endif
320
321#ifndef __PAGETABLE_PMD_FOLDED
322#define pmd_val(x) native_pmd_val(x)
323#define __pmd(x) native_make_pmd(x)
324#endif
325
326#define pte_val(x) native_pte_val(x)
327#define __pte(x) native_make_pte(x)
328
329#endif /* CONFIG_PARAVIRT */
330
Jeremy Fitzhardinge98fd5ae2008-01-30 13:32:43 +0100331#define __pa(x) __phys_addr((unsigned long)(x))
332/* __pa_symbol should be used for C visible symbols.
333 This seems to be the official gcc blessed way to do such arithmetic. */
334#define __pa_symbol(x) __pa(__phys_reloc_hide((unsigned long)(x)))
335
336#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
337
338#define __boot_va(x) __va(x)
339#define __boot_pa(x) __pa(x)
340
341#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
342#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
343#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
344
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +0100345#endif /* __ASSEMBLY__ */
346
Jeremy Fitzhardingee62f4472008-01-30 13:32:44 +0100347#include <asm-generic/memory_model.h>
348#include <asm-generic/page.h>
349
350#define __HAVE_ARCH_GATE_AREA 1
Jeremy Fitzhardinge345b9042008-01-30 13:32:42 +0100351
Jeremy Fitzhardinge83a51012008-01-30 13:32:41 +0100352#ifdef CONFIG_X86_32
353# include "page_32.h"
354#else
355# include "page_64.h"
356#endif
357
358#endif /* _ASM_X86_PAGE_H */