blob: ed932453ef26a3538ce3fd2c7670dfc78c18d9b9 [file] [log] [blame]
Vegard Nossum77ef50a2008-06-18 17:08:48 +02001#ifndef ASM_X86__PGTABLE_H
2#define ASM_X86__PGTABLE_H
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +01003
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +01004#define FIRST_USER_ADDRESS 0
5
Jiri Slaby43cdf5d2008-03-22 18:50:22 +01006#define _PAGE_BIT_PRESENT 0 /* is present */
7#define _PAGE_BIT_RW 1 /* writeable */
8#define _PAGE_BIT_USER 2 /* userspace addressable */
9#define _PAGE_BIT_PWT 3 /* page write through */
10#define _PAGE_BIT_PCD 4 /* page cache disabled */
11#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
12#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010013#define _PAGE_BIT_FILE 6
14#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
Andi Kleen9bf5a472008-02-04 16:48:06 +010015#define _PAGE_BIT_PAT 7 /* on 4KB pages */
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010016#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
18#define _PAGE_BIT_UNUSED2 10
19#define _PAGE_BIT_UNUSED3 11
Andi Kleen9bf5a472008-02-04 16:48:06 +010020#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
Nick Piggina0a8f532008-07-25 19:45:20 -070021#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
Jeremy Fitzhardinge110e0352008-08-28 13:58:39 -070022#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010023#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
24
Jeremy Fitzhardinge4226ab92008-05-26 23:30:58 +010025#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
26#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
27#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
28#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
29#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
30#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
31#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
32#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
33#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
34#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
35#define _PAGE_UNUSED2 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED2)
36#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
37#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
38#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
Nick Piggina0a8f532008-07-25 19:45:20 -070039#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
Jeremy Fitzhardinge110e0352008-08-28 13:58:39 -070040#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
Nick Piggina0a8f532008-07-25 19:45:20 -070041#define __HAVE_ARCH_PTE_SPECIAL
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010042
43#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
Jeremy Fitzhardinge4226ab92008-05-26 23:30:58 +010044#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010045#else
Jeremy Fitzhardinge4226ab92008-05-26 23:30:58 +010046#define _PAGE_NX (_AT(pteval_t, 0))
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010047#endif
48
49/* If _PAGE_PRESENT is clear, we use these: */
Joe Perches3cbaeaf2008-03-23 01:03:12 -070050#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping,
51 * saved PTE; unset:swap */
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010052#define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
53 pte_present gives true */
54
Joe Perches3cbaeaf2008-03-23 01:03:12 -070055#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
56 _PAGE_ACCESSED | _PAGE_DIRTY)
57#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
58 _PAGE_DIRTY)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010059
Jeremy Fitzhardinge86aaf4f2008-05-20 08:26:22 +010060/* Set of bits not changed in pte_modify */
Jeremy Fitzhardinge59438c92008-07-21 22:59:42 -070061#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
Nick Piggina0a8f532008-07-25 19:45:20 -070062 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010063
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070064#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
65#define _PAGE_CACHE_WB (0)
66#define _PAGE_CACHE_WC (_PAGE_PWT)
67#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
68#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
69
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010070#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
Joe Perches3cbaeaf2008-03-23 01:03:12 -070071#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
72 _PAGE_ACCESSED | _PAGE_NX)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010073
Joe Perches3cbaeaf2008-03-23 01:03:12 -070074#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
75 _PAGE_USER | _PAGE_ACCESSED)
76#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
77 _PAGE_ACCESSED | _PAGE_NX)
78#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
79 _PAGE_ACCESSED)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010080#define PAGE_COPY PAGE_COPY_NOEXEC
Joe Perches3cbaeaf2008-03-23 01:03:12 -070081#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
82 _PAGE_ACCESSED | _PAGE_NX)
83#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
84 _PAGE_ACCESSED)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010085
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010086#define __PAGE_KERNEL_EXEC \
Jeremy Fitzhardinge84906382008-07-01 16:46:35 -070087 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010088#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010089
90#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
91#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
Ingo Molnard2e626f2008-01-30 13:34:04 +010092#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -070093#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010094#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
Suresh Siddhad546b672008-03-25 17:39:12 -070095#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010096#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
97#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
98#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
Jack Steiner3a9e1892008-07-01 14:45:32 -050099#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100100#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
101
Jeremy Fitzhardinge84906382008-07-01 16:46:35 -0700102#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
103#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
104#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
105#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
106#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
107#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
108#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
109#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
110#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
Jack Steiner3a9e1892008-07-01 14:45:32 -0500111#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
Jeremy Fitzhardinge84906382008-07-01 16:46:35 -0700112#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
113#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
114#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100115
116/* xwr */
117#define __P000 PAGE_NONE
118#define __P001 PAGE_READONLY
119#define __P010 PAGE_COPY
120#define __P011 PAGE_COPY
121#define __P100 PAGE_READONLY_EXEC
122#define __P101 PAGE_READONLY_EXEC
123#define __P110 PAGE_COPY_EXEC
124#define __P111 PAGE_COPY_EXEC
125
126#define __S000 PAGE_NONE
127#define __S001 PAGE_READONLY
128#define __S010 PAGE_SHARED
129#define __S011 PAGE_SHARED
130#define __S100 PAGE_READONLY_EXEC
131#define __S101 PAGE_READONLY_EXEC
132#define __S110 PAGE_SHARED_EXEC
133#define __S111 PAGE_SHARED_EXEC
134
Suresh Siddhab2bc2732008-09-23 14:00:36 -0700135/*
136 * early identity mapping pte attrib macros.
137 */
138#ifdef CONFIG_X86_64
139#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
140#else
Suresh Siddha3a85e772008-09-23 14:00:37 -0700141#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
142#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
Suresh Siddhab2bc2732008-09-23 14:00:36 -0700143#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
144#endif
145
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100146#ifndef __ASSEMBLY__
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100147
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100148/*
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +0100149 * ZERO_PAGE is a global shared page that is always zero: used
150 * for zero-mapped memory areas etc..
151 */
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700152extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +0100153#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
154
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +0100155extern spinlock_t pgd_lock;
156extern struct list_head pgd_list;
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +0100157
158/*
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100159 * The following only work if pte_present() is true.
160 * Undefined behaviour if not..
161 */
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700162static inline int pte_dirty(pte_t pte)
163{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100164 return pte_flags(pte) & _PAGE_DIRTY;
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100165}
166
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700167static inline int pte_young(pte_t pte)
168{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100169 return pte_flags(pte) & _PAGE_ACCESSED;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700170}
171
172static inline int pte_write(pte_t pte)
173{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100174 return pte_flags(pte) & _PAGE_RW;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700175}
176
177static inline int pte_file(pte_t pte)
178{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100179 return pte_flags(pte) & _PAGE_FILE;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700180}
181
182static inline int pte_huge(pte_t pte)
183{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100184 return pte_flags(pte) & _PAGE_PSE;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700185}
186
187static inline int pte_global(pte_t pte)
188{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100189 return pte_flags(pte) & _PAGE_GLOBAL;
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700190}
191
192static inline int pte_exec(pte_t pte)
193{
Jeremy Fitzhardingea15af1c2008-05-26 23:31:06 +0100194 return !(pte_flags(pte) & _PAGE_NX);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700195}
196
Nick Piggin7e675132008-04-28 02:13:00 -0700197static inline int pte_special(pte_t pte)
198{
Nick Piggina0a8f532008-07-25 19:45:20 -0700199 return pte_val(pte) & _PAGE_SPECIAL;
Nick Piggin7e675132008-04-28 02:13:00 -0700200}
201
Hugh Dickins91030ca2008-09-09 16:42:45 +0100202static inline unsigned long pte_pfn(pte_t pte)
203{
204 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
205}
206
207#define pte_page(pte) pfn_to_page(pte_pfn(pte))
208
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700209static inline int pmd_large(pmd_t pte)
210{
211 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
212 (_PAGE_PSE | _PAGE_PRESENT);
213}
214
215static inline pte_t pte_mkclean(pte_t pte)
216{
Jeremy Fitzhardinge4226ab92008-05-26 23:30:58 +0100217 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700218}
219
220static inline pte_t pte_mkold(pte_t pte)
221{
Jeremy Fitzhardinge4226ab92008-05-26 23:30:58 +0100222 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700223}
224
225static inline pte_t pte_wrprotect(pte_t pte)
226{
Jeremy Fitzhardinge4226ab92008-05-26 23:30:58 +0100227 return __pte(pte_val(pte) & ~_PAGE_RW);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700228}
229
230static inline pte_t pte_mkexec(pte_t pte)
231{
Jeremy Fitzhardinge4226ab92008-05-26 23:30:58 +0100232 return __pte(pte_val(pte) & ~_PAGE_NX);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700233}
234
235static inline pte_t pte_mkdirty(pte_t pte)
236{
237 return __pte(pte_val(pte) | _PAGE_DIRTY);
238}
239
240static inline pte_t pte_mkyoung(pte_t pte)
241{
242 return __pte(pte_val(pte) | _PAGE_ACCESSED);
243}
244
245static inline pte_t pte_mkwrite(pte_t pte)
246{
247 return __pte(pte_val(pte) | _PAGE_RW);
248}
249
250static inline pte_t pte_mkhuge(pte_t pte)
251{
252 return __pte(pte_val(pte) | _PAGE_PSE);
253}
254
255static inline pte_t pte_clrhuge(pte_t pte)
256{
Jeremy Fitzhardinge4226ab92008-05-26 23:30:58 +0100257 return __pte(pte_val(pte) & ~_PAGE_PSE);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700258}
259
260static inline pte_t pte_mkglobal(pte_t pte)
261{
262 return __pte(pte_val(pte) | _PAGE_GLOBAL);
263}
264
265static inline pte_t pte_clrglobal(pte_t pte)
266{
Jeremy Fitzhardinge4226ab92008-05-26 23:30:58 +0100267 return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700268}
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100269
Nick Piggin7e675132008-04-28 02:13:00 -0700270static inline pte_t pte_mkspecial(pte_t pte)
271{
Nick Piggina0a8f532008-07-25 19:45:20 -0700272 return __pte(pte_val(pte) | _PAGE_SPECIAL);
Nick Piggin7e675132008-04-28 02:13:00 -0700273}
274
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100275extern pteval_t __supported_pte_mask;
276
277static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
278{
279 return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
280 pgprot_val(pgprot)) & __supported_pte_mask);
281}
282
283static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
284{
285 return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
286 pgprot_val(pgprot)) & __supported_pte_mask);
287}
288
Ingo Molnar38472312008-01-30 13:32:57 +0100289static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
290{
291 pteval_t val = pte_val(pte);
292
293 /*
294 * Chop off the NX bit (if present), and add the NX portion of
295 * the newprot (if present):
296 */
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700297 val &= _PAGE_CHG_MASK;
298 val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
Ingo Molnar38472312008-01-30 13:32:57 +0100299
300 return __pte(val);
301}
302
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700303/* mprotect needs to preserve PAT bits when updating vm_page_prot */
304#define pgprot_modify pgprot_modify
305static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
306{
307 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
308 pgprotval_t addbits = pgprot_val(newprot);
309 return __pgprot(preservebits | addbits);
310}
311
Jeremy Fitzhardinge77be1fa2008-07-21 22:59:56 -0700312#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
Andi Kleenc6ca18e2008-01-30 13:33:51 +0100313
Andi Kleen1e8e23b2008-01-30 13:33:53 +0100314#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
315
venkatesh.pallipadi@intel.comf0970c12008-03-18 17:00:20 -0700316#ifndef __ASSEMBLY__
317#define __HAVE_PHYS_MEM_ACCESS_PROT
318struct file;
319pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
320 unsigned long size, pgprot_t vma_prot);
321int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
322 unsigned long size, pgprot_t *vma_prot);
323#endif
324
Jeremy Fitzhardinged494a962008-06-17 11:41:59 -0700325/* Install a pte for a particular vaddr in kernel space. */
326void set_pte_vaddr(unsigned long vaddr, pte_t pte);
327
Eduardo Habkosta312b372008-07-08 15:06:23 -0700328#ifdef CONFIG_X86_32
329extern void native_pagetable_setup_start(pgd_t *base);
330extern void native_pagetable_setup_done(pgd_t *base);
331#else
332static inline void native_pagetable_setup_start(pgd_t *base) {}
333static inline void native_pagetable_setup_done(pgd_t *base) {}
334#endif
335
Jaswinder Singhe0b7c812008-07-23 17:40:34 +0530336extern int arch_report_meminfo(char *page);
337
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100338#ifdef CONFIG_PARAVIRT
339#include <asm/paravirt.h>
340#else /* !CONFIG_PARAVIRT */
341#define set_pte(ptep, pte) native_set_pte(ptep, pte)
342#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
343
344#define set_pte_present(mm, addr, ptep, pte) \
345 native_set_pte_present(mm, addr, ptep, pte)
346#define set_pte_atomic(ptep, pte) \
347 native_set_pte_atomic(ptep, pte)
348
349#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
350
351#ifndef __PAGETABLE_PUD_FOLDED
352#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
353#define pgd_clear(pgd) native_pgd_clear(pgd)
354#endif
355
356#ifndef set_pud
357# define set_pud(pudp, pud) native_set_pud(pudp, pud)
358#endif
359
360#ifndef __PAGETABLE_PMD_FOLDED
361#define pud_clear(pud) native_pud_clear(pud)
362#endif
363
364#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
365#define pmd_clear(pmd) native_pmd_clear(pmd)
366
367#define pte_update(mm, addr, ptep) do { } while (0)
368#define pte_update_defer(mm, addr, ptep) do { } while (0)
Eduardo Habkosta312b372008-07-08 15:06:23 -0700369
370static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
371{
372 native_pagetable_setup_start(base);
373}
374
375static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
376{
377 native_pagetable_setup_done(base);
378}
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100379#endif /* CONFIG_PARAVIRT */
380
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100381#endif /* __ASSEMBLY__ */
382
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200383#ifdef CONFIG_X86_32
384# include "pgtable_32.h"
385#else
386# include "pgtable_64.h"
387#endif
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100388
Jeremy Fitzhardingefb15a9b2008-06-25 00:19:06 -0400389/*
390 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
391 *
392 * this macro returns the index of the entry in the pgd page which would
393 * control the given virtual address
394 */
395#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
396
397/*
398 * pgd_offset() returns a (pgd_t *)
399 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
400 */
401#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
402/*
403 * a shortcut which implies the use of the kernel's pgd, instead
404 * of a process's
405 */
406#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
407
408
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700409#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
410#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
411
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100412#ifndef __ASSEMBLY__
413
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100414enum {
415 PG_LEVEL_NONE,
416 PG_LEVEL_4K,
417 PG_LEVEL_2M,
Ingo Molnar86f03982008-01-30 13:34:09 +0100418 PG_LEVEL_1G,
Andi Kleence0c0e52008-05-02 11:46:49 +0200419 PG_LEVEL_NUM
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100420};
421
Thomas Gleixner65280e62008-05-05 16:35:21 +0200422#ifdef CONFIG_PROC_FS
423extern void update_page_count(int level, unsigned long pages);
424#else
425static inline void update_page_count(int level, unsigned long pages) { }
426#endif
Andi Kleence0c0e52008-05-02 11:46:49 +0200427
Thomas Gleixner0a663082008-01-30 13:34:04 +0100428/*
429 * Helper function that returns the kernel pagetable entry controlling
430 * the virtual address 'address'. NULL means no pagetable entry present.
431 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
432 * as a pte too.
433 */
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100434extern pte_t *lookup_address(unsigned long address, unsigned int *level);
Thomas Gleixner0a663082008-01-30 13:34:04 +0100435
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100436/* local pte updates need not use xchg for locking */
437static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
438{
439 pte_t res = *ptep;
440
441 /* Pure native function needs no input for mm, addr */
442 native_pte_clear(NULL, 0, ptep);
443 return res;
444}
445
446static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
447 pte_t *ptep , pte_t pte)
448{
449 native_set_pte(ptep, pte);
450}
451
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100452#ifndef CONFIG_PARAVIRT
453/*
454 * Rules for using pte_update - it must be called after any PTE update which
455 * has not been done using the set_pte / clear_pte interfaces. It is used by
456 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
457 * updates should either be sets, clears, or set_pte_atomic for P->P
458 * transitions, which means this hook should only be called for user PTEs.
459 * This hook implies a P->P protection or access change has taken place, which
460 * requires a subsequent TLB flush. The notification can optionally be delayed
461 * until the TLB flush event by using the pte_update_defer form of the
462 * interface, but care must be taken to assure that the flush happens while
463 * still holding the same page table lock so that the shadow and primary pages
464 * do not become out of sync on SMP.
465 */
466#define pte_update(mm, addr, ptep) do { } while (0)
467#define pte_update_defer(mm, addr, ptep) do { } while (0)
468#endif
469
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100470/*
471 * We only update the dirty/accessed state if we set
472 * the dirty bit by hand in the kernel, since the hardware
473 * will do the accessed bit for us, and we don't want to
474 * race with other CPU's that might be updating the dirty
475 * bit at the same time.
476 */
Jeremy Fitzhardingebea41802008-06-25 00:18:57 -0400477struct vm_area_struct;
478
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100479#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -0700480extern int ptep_set_access_flags(struct vm_area_struct *vma,
481 unsigned long address, pte_t *ptep,
482 pte_t entry, int dirty);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100483
484#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -0700485extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
486 unsigned long addr, pte_t *ptep);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100487
488#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
Jeremy Fitzhardingec20311e2008-03-17 16:37:05 -0700489extern int ptep_clear_flush_young(struct vm_area_struct *vma,
490 unsigned long address, pte_t *ptep);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100491
492#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700493static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
494 pte_t *ptep)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100495{
496 pte_t pte = native_ptep_get_and_clear(ptep);
497 pte_update(mm, addr, ptep);
498 return pte;
499}
500
501#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700502static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
503 unsigned long addr, pte_t *ptep,
504 int full)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100505{
506 pte_t pte;
507 if (full) {
508 /*
509 * Full address destruction in progress; paravirt does not
510 * care about updates and native needs no locking
511 */
512 pte = native_local_ptep_get_and_clear(ptep);
513 } else {
514 pte = ptep_get_and_clear(mm, addr, ptep);
515 }
516 return pte;
517}
518
519#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700520static inline void ptep_set_wrprotect(struct mm_struct *mm,
521 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100522{
Jeremy Fitzhardinged8d89822008-01-30 13:32:58 +0100523 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100524 pte_update(mm, addr, ptep);
525}
526
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700527/*
528 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
529 *
530 * dst - pointer to pgd range anwhere on a pgd page
531 * src - ""
532 * count - the number of pgds to copy.
533 *
534 * dst and src can be on the same page, but the range must not overlap,
535 * and must not cross a page boundary.
536 */
537static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
538{
539 memcpy(dst, src, count * sizeof(pgd_t));
540}
541
542
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100543#include <asm-generic/pgtable.h>
544#endif /* __ASSEMBLY__ */
545
Vegard Nossum77ef50a2008-06-18 17:08:48 +0200546#endif /* ASM_X86__PGTABLE_H */