blob: 55c3a0e3a8ce34e2aed22986dcf780697d6a1168 [file] [log] [blame]
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +01001#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H
3
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +01004#define FIRST_USER_ADDRESS 0
5
Jiri Slaby43cdf5d2008-03-22 18:50:22 +01006#define _PAGE_BIT_PRESENT 0 /* is present */
7#define _PAGE_BIT_RW 1 /* writeable */
8#define _PAGE_BIT_USER 2 /* userspace addressable */
9#define _PAGE_BIT_PWT 3 /* page write through */
10#define _PAGE_BIT_PCD 4 /* page cache disabled */
11#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
12#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010013#define _PAGE_BIT_FILE 6
14#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
Andi Kleen9bf5a472008-02-04 16:48:06 +010015#define _PAGE_BIT_PAT 7 /* on 4KB pages */
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010016#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
18#define _PAGE_BIT_UNUSED2 10
19#define _PAGE_BIT_UNUSED3 11
Andi Kleen9bf5a472008-02-04 16:48:06 +010020#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010021#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
22
Jeremy Fitzhardingef2919232008-01-30 13:32:59 +010023/*
24 * Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a
25 * sign-extended value on 32-bit with all 1's in the upper word,
26 * which preserves the upper pte values on 64-bit ptes:
27 */
Ingo Molnar61f382262008-01-30 13:32:55 +010028#define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT)
29#define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW)
30#define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER)
31#define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT)
32#define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD)
33#define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED)
34#define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY)
35#define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */
36#define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */
37#define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1)
38#define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2)
39#define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3)
Andi Kleen9bf5a472008-02-04 16:48:06 +010040#define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT)
41#define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010042
43#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
44#define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX)
45#else
46#define _PAGE_NX 0
47#endif
48
49/* If _PAGE_PRESENT is clear, we use these: */
Joe Perches3cbaeaf2008-03-23 01:03:12 -070050#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping,
51 * saved PTE; unset:swap */
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010052#define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
53 pte_present gives true */
54
Joe Perches3cbaeaf2008-03-23 01:03:12 -070055#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
56 _PAGE_ACCESSED | _PAGE_DIRTY)
57#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
58 _PAGE_DIRTY)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010059
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -070060#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_PCD | _PAGE_PWT | \
61 _PAGE_ACCESSED | _PAGE_DIRTY)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010062
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070063#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
64#define _PAGE_CACHE_WB (0)
65#define _PAGE_CACHE_WC (_PAGE_PWT)
66#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
67#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
68
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010069#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
Joe Perches3cbaeaf2008-03-23 01:03:12 -070070#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
71 _PAGE_ACCESSED | _PAGE_NX)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010072
Joe Perches3cbaeaf2008-03-23 01:03:12 -070073#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
74 _PAGE_USER | _PAGE_ACCESSED)
75#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
76 _PAGE_ACCESSED | _PAGE_NX)
77#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
78 _PAGE_ACCESSED)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010079#define PAGE_COPY PAGE_COPY_NOEXEC
Joe Perches3cbaeaf2008-03-23 01:03:12 -070080#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
81 _PAGE_ACCESSED | _PAGE_NX)
82#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
83 _PAGE_ACCESSED)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010084
85#ifdef CONFIG_X86_32
86#define _PAGE_KERNEL_EXEC \
87 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
88#define _PAGE_KERNEL (_PAGE_KERNEL_EXEC | _PAGE_NX)
89
90#ifndef __ASSEMBLY__
Andi Kleenc93c82b2008-01-30 13:33:50 +010091extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +010092#endif /* __ASSEMBLY__ */
93#else
94#define __PAGE_KERNEL_EXEC \
95 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
96#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
97#endif
98
99#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
100#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
Ingo Molnard2e626f2008-01-30 13:34:04 +0100101#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700102#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100103#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
Suresh Siddhad546b672008-03-25 17:39:12 -0700104#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100105#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
106#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
107#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
108#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
109
110#ifdef CONFIG_X86_32
111# define MAKE_GLOBAL(x) __pgprot((x))
112#else
113# define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
114#endif
115
116#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
117#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
118#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
119#define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX)
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700120#define PAGE_KERNEL_WC MAKE_GLOBAL(__PAGE_KERNEL_WC)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100121#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
Suresh Siddhad546b672008-03-25 17:39:12 -0700122#define PAGE_KERNEL_UC_MINUS MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS)
Ingo Molnard2e626f2008-01-30 13:34:04 +0100123#define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE)
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100124#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
125#define PAGE_KERNEL_LARGE_EXEC MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC)
126#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
127#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
128
129/* xwr */
130#define __P000 PAGE_NONE
131#define __P001 PAGE_READONLY
132#define __P010 PAGE_COPY
133#define __P011 PAGE_COPY
134#define __P100 PAGE_READONLY_EXEC
135#define __P101 PAGE_READONLY_EXEC
136#define __P110 PAGE_COPY_EXEC
137#define __P111 PAGE_COPY_EXEC
138
139#define __S000 PAGE_NONE
140#define __S001 PAGE_READONLY
141#define __S010 PAGE_SHARED
142#define __S011 PAGE_SHARED
143#define __S100 PAGE_READONLY_EXEC
144#define __S101 PAGE_READONLY_EXEC
145#define __S110 PAGE_SHARED_EXEC
146#define __S111 PAGE_SHARED_EXEC
147
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100148#ifndef __ASSEMBLY__
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100149
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100150/*
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +0100151 * ZERO_PAGE is a global shared page that is always zero: used
152 * for zero-mapped memory areas etc..
153 */
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700154extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +0100155#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
156
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +0100157extern spinlock_t pgd_lock;
158extern struct list_head pgd_list;
Jeremy Fitzhardinge8405b122008-01-30 13:32:58 +0100159
160/*
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100161 * The following only work if pte_present() is true.
162 * Undefined behaviour if not..
163 */
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700164static inline int pte_dirty(pte_t pte)
165{
166 return pte_val(pte) & _PAGE_DIRTY;
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100167}
168
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700169static inline int pte_young(pte_t pte)
170{
171 return pte_val(pte) & _PAGE_ACCESSED;
172}
173
174static inline int pte_write(pte_t pte)
175{
176 return pte_val(pte) & _PAGE_RW;
177}
178
179static inline int pte_file(pte_t pte)
180{
181 return pte_val(pte) & _PAGE_FILE;
182}
183
184static inline int pte_huge(pte_t pte)
185{
186 return pte_val(pte) & _PAGE_PSE;
187}
188
189static inline int pte_global(pte_t pte)
190{
191 return pte_val(pte) & _PAGE_GLOBAL;
192}
193
194static inline int pte_exec(pte_t pte)
195{
196 return !(pte_val(pte) & _PAGE_NX);
197}
198
Nick Piggin7e675132008-04-28 02:13:00 -0700199static inline int pte_special(pte_t pte)
200{
201 return 0;
202}
203
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700204static inline int pmd_large(pmd_t pte)
205{
206 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
207 (_PAGE_PSE | _PAGE_PRESENT);
208}
209
210static inline pte_t pte_mkclean(pte_t pte)
211{
212 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY);
213}
214
215static inline pte_t pte_mkold(pte_t pte)
216{
217 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED);
218}
219
220static inline pte_t pte_wrprotect(pte_t pte)
221{
222 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW);
223}
224
225static inline pte_t pte_mkexec(pte_t pte)
226{
227 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX);
228}
229
230static inline pte_t pte_mkdirty(pte_t pte)
231{
232 return __pte(pte_val(pte) | _PAGE_DIRTY);
233}
234
235static inline pte_t pte_mkyoung(pte_t pte)
236{
237 return __pte(pte_val(pte) | _PAGE_ACCESSED);
238}
239
240static inline pte_t pte_mkwrite(pte_t pte)
241{
242 return __pte(pte_val(pte) | _PAGE_RW);
243}
244
245static inline pte_t pte_mkhuge(pte_t pte)
246{
247 return __pte(pte_val(pte) | _PAGE_PSE);
248}
249
250static inline pte_t pte_clrhuge(pte_t pte)
251{
252 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE);
253}
254
255static inline pte_t pte_mkglobal(pte_t pte)
256{
257 return __pte(pte_val(pte) | _PAGE_GLOBAL);
258}
259
260static inline pte_t pte_clrglobal(pte_t pte)
261{
262 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL);
263}
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100264
Nick Piggin7e675132008-04-28 02:13:00 -0700265static inline pte_t pte_mkspecial(pte_t pte)
266{
267 return pte;
268}
269
Jeremy Fitzhardinge6fdc05d2008-01-30 13:32:57 +0100270extern pteval_t __supported_pte_mask;
271
272static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
273{
274 return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
275 pgprot_val(pgprot)) & __supported_pte_mask);
276}
277
278static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
279{
280 return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
281 pgprot_val(pgprot)) & __supported_pte_mask);
282}
283
Ingo Molnar38472312008-01-30 13:32:57 +0100284static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
285{
286 pteval_t val = pte_val(pte);
287
288 /*
289 * Chop off the NX bit (if present), and add the NX portion of
290 * the newprot (if present):
291 */
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700292 val &= _PAGE_CHG_MASK;
293 val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
Ingo Molnar38472312008-01-30 13:32:57 +0100294
295 return __pte(val);
296}
297
Venki Pallipadi1c12c4c2008-05-14 16:05:51 -0700298/* mprotect needs to preserve PAT bits when updating vm_page_prot */
299#define pgprot_modify pgprot_modify
300static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
301{
302 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
303 pgprotval_t addbits = pgprot_val(newprot);
304 return __pgprot(preservebits | addbits);
305}
306
Andi Kleenc6ca18e2008-01-30 13:33:51 +0100307#define pte_pgprot(x) __pgprot(pte_val(x) & (0xfff | _PAGE_NX))
308
Andi Kleen1e8e23b2008-01-30 13:33:53 +0100309#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
310
venkatesh.pallipadi@intel.comf0970c12008-03-18 17:00:20 -0700311#ifndef __ASSEMBLY__
312#define __HAVE_PHYS_MEM_ACCESS_PROT
313struct file;
314pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
315 unsigned long size, pgprot_t vma_prot);
316int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
317 unsigned long size, pgprot_t *vma_prot);
318#endif
319
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100320#ifdef CONFIG_PARAVIRT
321#include <asm/paravirt.h>
322#else /* !CONFIG_PARAVIRT */
323#define set_pte(ptep, pte) native_set_pte(ptep, pte)
324#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
325
326#define set_pte_present(mm, addr, ptep, pte) \
327 native_set_pte_present(mm, addr, ptep, pte)
328#define set_pte_atomic(ptep, pte) \
329 native_set_pte_atomic(ptep, pte)
330
331#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
332
333#ifndef __PAGETABLE_PUD_FOLDED
334#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
335#define pgd_clear(pgd) native_pgd_clear(pgd)
336#endif
337
338#ifndef set_pud
339# define set_pud(pudp, pud) native_set_pud(pudp, pud)
340#endif
341
342#ifndef __PAGETABLE_PMD_FOLDED
343#define pud_clear(pud) native_pud_clear(pud)
344#endif
345
346#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
347#define pmd_clear(pmd) native_pmd_clear(pmd)
348
349#define pte_update(mm, addr, ptep) do { } while (0)
350#define pte_update_defer(mm, addr, ptep) do { } while (0)
351#endif /* CONFIG_PARAVIRT */
352
Jeremy Fitzhardinge46141392008-01-30 13:32:56 +0100353#endif /* __ASSEMBLY__ */
354
Thomas Gleixner96a388d2007-10-11 11:20:03 +0200355#ifdef CONFIG_X86_32
356# include "pgtable_32.h"
357#else
358# include "pgtable_64.h"
359#endif
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100360
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700361#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
362#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
363
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100364#ifndef __ASSEMBLY__
365
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100366enum {
367 PG_LEVEL_NONE,
368 PG_LEVEL_4K,
369 PG_LEVEL_2M,
Ingo Molnar86f03982008-01-30 13:34:09 +0100370 PG_LEVEL_1G,
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100371};
372
Thomas Gleixner0a663082008-01-30 13:34:04 +0100373/*
374 * Helper function that returns the kernel pagetable entry controlling
375 * the virtual address 'address'. NULL means no pagetable entry present.
376 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
377 * as a pte too.
378 */
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100379extern pte_t *lookup_address(unsigned long address, unsigned int *level);
Thomas Gleixner0a663082008-01-30 13:34:04 +0100380
Jeremy Fitzhardinge48916452008-01-30 13:32:58 +0100381/* local pte updates need not use xchg for locking */
382static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
383{
384 pte_t res = *ptep;
385
386 /* Pure native function needs no input for mm, addr */
387 native_pte_clear(NULL, 0, ptep);
388 return res;
389}
390
391static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
392 pte_t *ptep , pte_t pte)
393{
394 native_set_pte(ptep, pte);
395}
396
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100397#ifndef CONFIG_PARAVIRT
398/*
399 * Rules for using pte_update - it must be called after any PTE update which
400 * has not been done using the set_pte / clear_pte interfaces. It is used by
401 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
402 * updates should either be sets, clears, or set_pte_atomic for P->P
403 * transitions, which means this hook should only be called for user PTEs.
404 * This hook implies a P->P protection or access change has taken place, which
405 * requires a subsequent TLB flush. The notification can optionally be delayed
406 * until the TLB flush event by using the pte_update_defer form of the
407 * interface, but care must be taken to assure that the flush happens while
408 * still holding the same page table lock so that the shadow and primary pages
409 * do not become out of sync on SMP.
410 */
411#define pte_update(mm, addr, ptep) do { } while (0)
412#define pte_update_defer(mm, addr, ptep) do { } while (0)
413#endif
414
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100415/*
416 * We only update the dirty/accessed state if we set
417 * the dirty bit by hand in the kernel, since the hardware
418 * will do the accessed bit for us, and we don't want to
419 * race with other CPU's that might be updating the dirty
420 * bit at the same time.
421 */
422#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -0700423extern int ptep_set_access_flags(struct vm_area_struct *vma,
424 unsigned long address, pte_t *ptep,
425 pte_t entry, int dirty);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100426
427#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -0700428extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
429 unsigned long addr, pte_t *ptep);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100430
431#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
Jeremy Fitzhardingec20311e2008-03-17 16:37:05 -0700432extern int ptep_clear_flush_young(struct vm_area_struct *vma,
433 unsigned long address, pte_t *ptep);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100434
435#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700436static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
437 pte_t *ptep)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100438{
439 pte_t pte = native_ptep_get_and_clear(ptep);
440 pte_update(mm, addr, ptep);
441 return pte;
442}
443
444#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700445static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
446 unsigned long addr, pte_t *ptep,
447 int full)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100448{
449 pte_t pte;
450 if (full) {
451 /*
452 * Full address destruction in progress; paravirt does not
453 * care about updates and native needs no locking
454 */
455 pte = native_local_ptep_get_and_clear(ptep);
456 } else {
457 pte = ptep_get_and_clear(mm, addr, ptep);
458 }
459 return pte;
460}
461
462#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Joe Perches3cbaeaf2008-03-23 01:03:12 -0700463static inline void ptep_set_wrprotect(struct mm_struct *mm,
464 unsigned long addr, pte_t *ptep)
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100465{
Jeremy Fitzhardinged8d89822008-01-30 13:32:58 +0100466 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100467 pte_update(mm, addr, ptep);
468}
469
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700470/*
471 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
472 *
473 * dst - pointer to pgd range anwhere on a pgd page
474 * src - ""
475 * count - the number of pgds to copy.
476 *
477 * dst and src can be on the same page, but the range must not overlap,
478 * and must not cross a page boundary.
479 */
480static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
481{
482 memcpy(dst, src, count * sizeof(pgd_t));
483}
484
485
Jeremy Fitzhardinge195466d2008-01-30 13:32:58 +0100486#include <asm-generic/pgtable.h>
487#endif /* __ASSEMBLY__ */
488
Jeremy Fitzhardinge6c386652008-01-30 13:32:55 +0100489#endif /* _ASM_X86_PGTABLE_H */