blob: f605e8d0eed38a194b63cc04208cfc3455351574 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_GENERIC_PGTABLE_H
2#define _ASM_GENERIC_PGTABLE_H
3
Rusty Russell673eae82006-09-25 23:32:29 -07004#ifndef __ASSEMBLY__
5
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
7/*
8 * Largely same as above, but only sets the access flags (dirty,
9 * accessed, and writable). Furthermore, we know it always gets set
10 * to a "more permissive" setting, which allows most architectures
Benjamin Herrenschmidt8dab5242007-06-16 10:16:12 -070011 * to optimize this. We return whether the PTE actually changed, which
12 * in turn instructs the caller to do things like update__mmu_cache.
13 * This used to be done in the caller, but sparc needs minor faults to
14 * force that call on sun4c so we changed this macro slightly
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 */
16#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
Benjamin Herrenschmidt8dab5242007-06-16 10:16:12 -070017({ \
18 int __changed = !pte_same(*(__ptep), __entry); \
19 if (__changed) { \
20 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
21 flush_tlb_page(__vma, __address); \
22 } \
23 __changed; \
24})
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#endif
26
27#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
28#define ptep_test_and_clear_young(__vma, __address, __ptep) \
29({ \
30 pte_t __pte = *(__ptep); \
31 int r = 1; \
32 if (!pte_young(__pte)) \
33 r = 0; \
34 else \
35 set_pte_at((__vma)->vm_mm, (__address), \
36 (__ptep), pte_mkold(__pte)); \
37 r; \
38})
39#endif
40
41#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
42#define ptep_clear_flush_young(__vma, __address, __ptep) \
43({ \
44 int __young; \
45 __young = ptep_test_and_clear_young(__vma, __address, __ptep); \
46 if (__young) \
47 flush_tlb_page(__vma, __address); \
48 __young; \
49})
50#endif
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
53#define ptep_get_and_clear(__mm, __address, __ptep) \
54({ \
55 pte_t __pte = *(__ptep); \
56 pte_clear((__mm), (__address), (__ptep)); \
57 __pte; \
58})
59#endif
60
Zachary Amsdena6003882005-09-03 15:55:04 -070061#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
62#define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \
63({ \
64 pte_t __pte; \
65 __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \
66 __pte; \
67})
68#endif
69
Zachary Amsden9888a1c2006-09-30 23:29:31 -070070/*
71 * Some architectures may be able to avoid expensive synchronization
72 * primitives when modifications are made to PTE's which are already
73 * not present, or in the process of an address space destruction.
74 */
75#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
76#define pte_clear_not_present_full(__mm, __address, __ptep, __full) \
Zachary Amsdena6003882005-09-03 15:55:04 -070077do { \
78 pte_clear((__mm), (__address), (__ptep)); \
79} while (0)
80#endif
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
83#define ptep_clear_flush(__vma, __address, __ptep) \
84({ \
85 pte_t __pte; \
86 __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
87 flush_tlb_page(__vma, __address); \
88 __pte; \
89})
90#endif
91
92#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080093struct mm_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
95{
96 pte_t old_pte = *ptep;
97 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
98}
99#endif
100
101#ifndef __HAVE_ARCH_PTE_SAME
102#define pte_same(A,B) (pte_val(A) == pte_val(B))
103#endif
104
Martin Schwidefsky6c210482007-04-27 16:01:57 +0200105#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
106#define page_test_dirty(page) (0)
107#endif
108
109#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
110#define page_clear_dirty(page) do { } while (0)
111#endif
112
113#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
Abhijit Karmarkarb4955ce2005-06-21 17:15:13 -0700114#define pte_maybe_dirty(pte) pte_dirty(pte)
115#else
116#define pte_maybe_dirty(pte) (1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#endif
118
119#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
120#define page_test_and_clear_young(page) (0)
121#endif
122
123#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
124#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
125#endif
126
127#ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
128#define lazy_mmu_prot_update(pte) do { } while (0)
129#endif
130
David S. Miller0b0968a2006-06-01 17:47:25 -0700131#ifndef __HAVE_ARCH_MOVE_PTE
Nick Piggin8b1f3122005-09-27 21:45:18 -0700132#define move_pte(pte, prot, old_addr, new_addr) (pte)
Nick Piggin8b1f3122005-09-27 21:45:18 -0700133#endif
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135/*
Zachary Amsden6606c3e2006-09-30 23:29:33 -0700136 * A facility to provide lazy MMU batching. This allows PTE updates and
137 * page invalidations to be delayed until a call to leave lazy MMU mode
138 * is issued. Some architectures may benefit from doing this, and it is
139 * beneficial for both shadow and direct mode hypervisors, which may batch
140 * the PTE updates which happen during this window. Note that using this
141 * interface requires that read hazards be removed from the code. A read
142 * hazard could result in the direct mode hypervisor case, since the actual
143 * write to the page tables may not yet have taken place, so reads though
144 * a raw PTE pointer after it has been modified are not guaranteed to be
145 * up to date. This mode can only be entered and left under the protection of
146 * the page table locks for all page tables which may be modified. In the UP
147 * case, this is required so that preemption is disabled, and in the SMP case,
148 * it must synchronize the delayed page table writes properly on other CPUs.
149 */
150#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
151#define arch_enter_lazy_mmu_mode() do {} while (0)
152#define arch_leave_lazy_mmu_mode() do {} while (0)
Zachary Amsden49f19712007-04-08 16:04:01 -0700153#define arch_flush_lazy_mmu_mode() do {} while (0)
Zachary Amsden6606c3e2006-09-30 23:29:33 -0700154#endif
155
156/*
Zachary Amsden9226d122007-02-13 13:26:21 +0100157 * A facility to provide batching of the reload of page tables with the
158 * actual context switch code for paravirtualized guests. By convention,
159 * only one of the lazy modes (CPU, MMU) should be active at any given
160 * time, entry should never be nested, and entry and exits should always
161 * be paired. This is for sanity of maintaining and reasoning about the
162 * kernel code.
163 */
164#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE
165#define arch_enter_lazy_cpu_mode() do {} while (0)
166#define arch_leave_lazy_cpu_mode() do {} while (0)
Zachary Amsden49f19712007-04-08 16:04:01 -0700167#define arch_flush_lazy_cpu_mode() do {} while (0)
Zachary Amsden9226d122007-02-13 13:26:21 +0100168#endif
169
170/*
Hugh Dickins8f6c99c2005-04-19 13:29:17 -0700171 * When walking page tables, get the address of the next boundary,
172 * or the end address of the range if that comes earlier. Although no
173 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 */
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176#define pgd_addr_end(addr, end) \
177({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
178 (__boundary - 1 < (end) - 1)? __boundary: (end); \
179})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
181#ifndef pud_addr_end
182#define pud_addr_end(addr, end) \
183({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
184 (__boundary - 1 < (end) - 1)? __boundary: (end); \
185})
186#endif
187
188#ifndef pmd_addr_end
189#define pmd_addr_end(addr, end) \
190({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
191 (__boundary - 1 < (end) - 1)? __boundary: (end); \
192})
193#endif
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195/*
196 * When walking page tables, we usually want to skip any p?d_none entries;
197 * and any p?d_bad entries - reporting the error before resetting to none.
198 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
199 */
200void pgd_clear_bad(pgd_t *);
201void pud_clear_bad(pud_t *);
202void pmd_clear_bad(pmd_t *);
203
204static inline int pgd_none_or_clear_bad(pgd_t *pgd)
205{
206 if (pgd_none(*pgd))
207 return 1;
208 if (unlikely(pgd_bad(*pgd))) {
209 pgd_clear_bad(pgd);
210 return 1;
211 }
212 return 0;
213}
214
215static inline int pud_none_or_clear_bad(pud_t *pud)
216{
217 if (pud_none(*pud))
218 return 1;
219 if (unlikely(pud_bad(*pud))) {
220 pud_clear_bad(pud);
221 return 1;
222 }
223 return 0;
224}
225
226static inline int pmd_none_or_clear_bad(pmd_t *pmd)
227{
228 if (pmd_none(*pmd))
229 return 1;
230 if (unlikely(pmd_bad(*pmd))) {
231 pmd_clear_bad(pmd);
232 return 1;
233 }
234 return 0;
235}
236#endif /* !__ASSEMBLY__ */
237
238#endif /* _ASM_GENERIC_PGTABLE_H */