blob: dc8f99ee305fc4edd43232abdc0e5564b2c15434 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_GENERIC_PGTABLE_H
2#define _ASM_GENERIC_PGTABLE_H
3
Rusty Russell673eae82006-09-25 23:32:29 -07004#ifndef __ASSEMBLY__
5
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#ifndef __HAVE_ARCH_PTEP_ESTABLISH
7/*
8 * Establish a new mapping:
9 * - flush the old one
10 * - update the page tables
11 * - inform the TLB about the new one
12 *
Hugh Dickinsb8072f02005-10-29 18:16:41 -070013 * We hold the mm semaphore for reading, and the pte lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 *
15 * Note: the old pte is known to not be writable, so we don't need to
16 * worry about dirty bits etc getting lost.
17 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#define ptep_establish(__vma, __address, __ptep, __entry) \
19do { \
20 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
21 flush_tlb_page(__vma, __address); \
22} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#endif
24
25#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
26/*
27 * Largely same as above, but only sets the access flags (dirty,
28 * accessed, and writable). Furthermore, we know it always gets set
29 * to a "more permissive" setting, which allows most architectures
30 * to optimize this.
31 */
32#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
33do { \
34 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
35 flush_tlb_page(__vma, __address); \
36} while (0)
37#endif
38
39#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
40#define ptep_test_and_clear_young(__vma, __address, __ptep) \
41({ \
42 pte_t __pte = *(__ptep); \
43 int r = 1; \
44 if (!pte_young(__pte)) \
45 r = 0; \
46 else \
47 set_pte_at((__vma)->vm_mm, (__address), \
48 (__ptep), pte_mkold(__pte)); \
49 r; \
50})
51#endif
52
53#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
54#define ptep_clear_flush_young(__vma, __address, __ptep) \
55({ \
56 int __young; \
57 __young = ptep_test_and_clear_young(__vma, __address, __ptep); \
58 if (__young) \
59 flush_tlb_page(__vma, __address); \
60 __young; \
61})
62#endif
63
64#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
65#define ptep_test_and_clear_dirty(__vma, __address, __ptep) \
66({ \
67 pte_t __pte = *__ptep; \
68 int r = 1; \
69 if (!pte_dirty(__pte)) \
70 r = 0; \
71 else \
72 set_pte_at((__vma)->vm_mm, (__address), (__ptep), \
73 pte_mkclean(__pte)); \
74 r; \
75})
76#endif
77
78#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
79#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
80({ \
81 int __dirty; \
82 __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \
83 if (__dirty) \
84 flush_tlb_page(__vma, __address); \
85 __dirty; \
86})
87#endif
88
89#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
90#define ptep_get_and_clear(__mm, __address, __ptep) \
91({ \
92 pte_t __pte = *(__ptep); \
93 pte_clear((__mm), (__address), (__ptep)); \
94 __pte; \
95})
96#endif
97
Zachary Amsdena6003882005-09-03 15:55:04 -070098#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
99#define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \
100({ \
101 pte_t __pte; \
102 __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \
103 __pte; \
104})
105#endif
106
Zachary Amsden9888a1c2006-09-30 23:29:31 -0700107/*
108 * Some architectures may be able to avoid expensive synchronization
109 * primitives when modifications are made to PTE's which are already
110 * not present, or in the process of an address space destruction.
111 */
112#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
113#define pte_clear_not_present_full(__mm, __address, __ptep, __full) \
Zachary Amsdena6003882005-09-03 15:55:04 -0700114do { \
115 pte_clear((__mm), (__address), (__ptep)); \
116} while (0)
117#endif
118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
120#define ptep_clear_flush(__vma, __address, __ptep) \
121({ \
122 pte_t __pte; \
123 __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
124 flush_tlb_page(__vma, __address); \
125 __pte; \
126})
127#endif
128
129#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
Tim Schmielau8c65b4a2005-11-07 00:59:43 -0800130struct mm_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
132{
133 pte_t old_pte = *ptep;
134 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
135}
136#endif
137
138#ifndef __HAVE_ARCH_PTE_SAME
139#define pte_same(A,B) (pte_val(A) == pte_val(B))
140#endif
141
Martin Schwidefsky6c210482007-04-27 16:01:57 +0200142#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
143#define page_test_dirty(page) (0)
144#endif
145
146#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
147#define page_clear_dirty(page) do { } while (0)
148#endif
149
150#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
Abhijit Karmarkarb4955ce2005-06-21 17:15:13 -0700151#define pte_maybe_dirty(pte) pte_dirty(pte)
152#else
153#define pte_maybe_dirty(pte) (1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154#endif
155
156#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
157#define page_test_and_clear_young(page) (0)
158#endif
159
160#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
161#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
162#endif
163
164#ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
165#define lazy_mmu_prot_update(pte) do { } while (0)
166#endif
167
David S. Miller0b0968a2006-06-01 17:47:25 -0700168#ifndef __HAVE_ARCH_MOVE_PTE
Nick Piggin8b1f3122005-09-27 21:45:18 -0700169#define move_pte(pte, prot, old_addr, new_addr) (pte)
Nick Piggin8b1f3122005-09-27 21:45:18 -0700170#endif
171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172/*
Zachary Amsden6606c3e2006-09-30 23:29:33 -0700173 * A facility to provide lazy MMU batching. This allows PTE updates and
174 * page invalidations to be delayed until a call to leave lazy MMU mode
175 * is issued. Some architectures may benefit from doing this, and it is
176 * beneficial for both shadow and direct mode hypervisors, which may batch
177 * the PTE updates which happen during this window. Note that using this
178 * interface requires that read hazards be removed from the code. A read
179 * hazard could result in the direct mode hypervisor case, since the actual
180 * write to the page tables may not yet have taken place, so reads though
181 * a raw PTE pointer after it has been modified are not guaranteed to be
182 * up to date. This mode can only be entered and left under the protection of
183 * the page table locks for all page tables which may be modified. In the UP
184 * case, this is required so that preemption is disabled, and in the SMP case,
185 * it must synchronize the delayed page table writes properly on other CPUs.
186 */
187#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
188#define arch_enter_lazy_mmu_mode() do {} while (0)
189#define arch_leave_lazy_mmu_mode() do {} while (0)
Zachary Amsden49f19712007-04-08 16:04:01 -0700190#define arch_flush_lazy_mmu_mode() do {} while (0)
Zachary Amsden6606c3e2006-09-30 23:29:33 -0700191#endif
192
193/*
Zachary Amsden9226d122007-02-13 13:26:21 +0100194 * A facility to provide batching of the reload of page tables with the
195 * actual context switch code for paravirtualized guests. By convention,
196 * only one of the lazy modes (CPU, MMU) should be active at any given
197 * time, entry should never be nested, and entry and exits should always
198 * be paired. This is for sanity of maintaining and reasoning about the
199 * kernel code.
200 */
201#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE
202#define arch_enter_lazy_cpu_mode() do {} while (0)
203#define arch_leave_lazy_cpu_mode() do {} while (0)
Zachary Amsden49f19712007-04-08 16:04:01 -0700204#define arch_flush_lazy_cpu_mode() do {} while (0)
Zachary Amsden9226d122007-02-13 13:26:21 +0100205#endif
206
207/*
Hugh Dickins8f6c99c2005-04-19 13:29:17 -0700208 * When walking page tables, get the address of the next boundary,
209 * or the end address of the range if that comes earlier. Although no
210 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 */
212
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213#define pgd_addr_end(addr, end) \
214({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
215 (__boundary - 1 < (end) - 1)? __boundary: (end); \
216})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218#ifndef pud_addr_end
219#define pud_addr_end(addr, end) \
220({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
221 (__boundary - 1 < (end) - 1)? __boundary: (end); \
222})
223#endif
224
225#ifndef pmd_addr_end
226#define pmd_addr_end(addr, end) \
227({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
228 (__boundary - 1 < (end) - 1)? __boundary: (end); \
229})
230#endif
231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232/*
233 * When walking page tables, we usually want to skip any p?d_none entries;
234 * and any p?d_bad entries - reporting the error before resetting to none.
235 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
236 */
237void pgd_clear_bad(pgd_t *);
238void pud_clear_bad(pud_t *);
239void pmd_clear_bad(pmd_t *);
240
241static inline int pgd_none_or_clear_bad(pgd_t *pgd)
242{
243 if (pgd_none(*pgd))
244 return 1;
245 if (unlikely(pgd_bad(*pgd))) {
246 pgd_clear_bad(pgd);
247 return 1;
248 }
249 return 0;
250}
251
252static inline int pud_none_or_clear_bad(pud_t *pud)
253{
254 if (pud_none(*pud))
255 return 1;
256 if (unlikely(pud_bad(*pud))) {
257 pud_clear_bad(pud);
258 return 1;
259 }
260 return 0;
261}
262
263static inline int pmd_none_or_clear_bad(pmd_t *pmd)
264{
265 if (pmd_none(*pmd))
266 return 1;
267 if (unlikely(pmd_bad(*pmd))) {
268 pmd_clear_bad(pmd);
269 return 1;
270 }
271 return 0;
272}
273#endif /* !__ASSEMBLY__ */
274
275#endif /* _ASM_GENERIC_PGTABLE_H */