blob: 2a6e67db8bc35bd89a3f4c6b41ff5f0d0aa0b545 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _I386_PGTABLE_3LEVEL_H
2#define _I386_PGTABLE_3LEVEL_H
3
4#include <asm-generic/pgtable-nopud.h>
5
6/*
7 * Intel Physical Address Extension (PAE) Mode - three-level page
8 * tables on PPro+ CPUs.
9 *
10 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
11 */
12
13#define pte_ERROR(e) \
14 printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
15#define pmd_ERROR(e) \
16 printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
17#define pgd_ERROR(e) \
18 printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
19
20#define pud_none(pud) 0
21#define pud_bad(pud) 0
22#define pud_present(pud) 1
23
24/*
25 * Is the pte executable?
26 */
27static inline int pte_x(pte_t pte)
28{
29 return !(pte_val(pte) & _PAGE_NX);
30}
31
32/*
33 * All present user-pages with !NX bit are user-executable:
34 */
35static inline int pte_exec(pte_t pte)
36{
37 return pte_user(pte) && pte_x(pte);
38}
39/*
40 * All present pages with !NX bit are kernel-executable:
41 */
42static inline int pte_exec_kernel(pte_t pte)
43{
44 return pte_x(pte);
45}
46
Rusty Russellda181a82006-12-07 02:14:08 +010047#ifndef CONFIG_PARAVIRT
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* Rules for using set_pte: the pte being assigned *must* be
49 * either not present or in a state where the hardware will
50 * not attempt to update the pte. In places where this is
51 * not possible, use pte_get_and_clear to obtain the old pte
52 * value and then use set_pte to update it. -ben
53 */
54static inline void set_pte(pte_t *ptep, pte_t pte)
55{
56 ptep->pte_high = pte.pte_high;
57 smp_wmb();
58 ptep->pte_low = pte.pte_low;
59}
60#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
61
Zachary Amsdend6d861e2006-09-30 23:29:36 -070062/*
63 * Since this is only called on user PTEs, and the page fault handler
64 * must handle the already racy situation of simultaneous page faults,
65 * we are justified in merely clearing the PTE present bit, followed
66 * by a set. The ordering here is important.
67 */
68static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
69{
70 ptep->pte_low = 0;
71 smp_wmb();
72 ptep->pte_high = pte.pte_high;
73 smp_wmb();
74 ptep->pte_low = pte.pte_low;
75}
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#define set_pte_atomic(pteptr,pteval) \
78 set_64bit((unsigned long long *)(pteptr),pte_val(pteval))
79#define set_pmd(pmdptr,pmdval) \
80 set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval))
81#define set_pud(pudptr,pudval) \
Zachary Amsdenc9b02a22005-09-03 15:56:40 -070082 (*(pudptr) = (pudval))
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84/*
Zachary Amsden6e5882c2006-04-27 11:32:29 -070085 * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
86 * entry, so clear the bottom half first and enforce ordering with a compiler
87 * barrier.
88 */
89static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
90{
91 ptep->pte_low = 0;
92 smp_wmb();
93 ptep->pte_high = 0;
94}
95
96static inline void pmd_clear(pmd_t *pmd)
97{
98 u32 *tmp = (u32 *)pmd;
99 *tmp = 0;
100 smp_wmb();
101 *(tmp + 1) = 0;
102}
Rusty Russellda181a82006-12-07 02:14:08 +0100103#endif
104
105/*
106 * Pentium-II erratum A13: in PAE mode we explicitly have to flush
107 * the TLB via cr3 if the top-level pgd is changed...
108 * We do not let the generic code free and clear pgd entries due to
109 * this erratum.
110 */
111static inline void pud_clear (pud_t * pud) { }
112
113#define pud_page(pud) \
114((struct page *) __va(pud_val(pud) & PAGE_MASK))
115
116#define pud_page_vaddr(pud) \
117((unsigned long) __va(pud_val(pud) & PAGE_MASK))
118
119
120/* Find an entry in the second-level page table.. */
121#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
122 pmd_index(address))
Zachary Amsden6e5882c2006-04-27 11:32:29 -0700123
Rusty Russell60497422006-09-25 23:32:30 -0700124#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
126{
127 pte_t res;
128
129 /* xchg acts as a barrier before the setting of the high bits */
130 res.pte_low = xchg(&ptep->pte_low, 0);
131 res.pte_high = ptep->pte_high;
132 ptep->pte_high = 0;
133
134 return res;
135}
136
Rusty Russell60497422006-09-25 23:32:30 -0700137#define __HAVE_ARCH_PTE_SAME
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138static inline int pte_same(pte_t a, pte_t b)
139{
140 return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
141}
142
143#define pte_page(x) pfn_to_page(pte_pfn(x))
144
145static inline int pte_none(pte_t pte)
146{
147 return !pte.pte_low && !pte.pte_high;
148}
149
150static inline unsigned long pte_pfn(pte_t pte)
151{
152 return (pte.pte_low >> PAGE_SHIFT) |
153 (pte.pte_high << (32 - PAGE_SHIFT));
154}
155
156extern unsigned long long __supported_pte_mask;
157
158static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
159{
160 pte_t pte;
161
162 pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
163 (pgprot_val(pgprot) >> 32);
164 pte.pte_high &= (__supported_pte_mask >> 32);
165 pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
166 __supported_pte_mask;
167 return pte;
168}
169
170static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
171{
172 return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | \
173 pgprot_val(pgprot)) & __supported_pte_mask);
174}
175
176/*
177 * Bits 0, 6 and 7 are taken in the low part of the pte,
178 * put the 32 bits of offset into the high part.
179 */
180#define pte_to_pgoff(pte) ((pte).pte_high)
181#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
182#define PTE_FILE_MAX_BITS 32
183
184/* Encode and de-code a swap entry */
185#define __swp_type(x) (((x).val) & 0x1f)
186#define __swp_offset(x) ((x).val >> 5)
187#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
188#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
189#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
190
191#define __pmd_free_tlb(tlb, x) do { } while (0)
192
Jan Beulich101f12a2006-03-23 02:59:45 -0800193#define vmalloc_sync_all() ((void)0)
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195#endif /* _I386_PGTABLE_3LEVEL_H */