blob: b4bfe338ea0e568258bc66ffffa4b03c6e51ca6d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_GENERIC_PGTABLE_H
2#define _ASM_GENERIC_PGTABLE_H
3
Rusty Russell673eae82006-09-25 23:32:29 -07004#ifndef __ASSEMBLY__
Greg Ungerer95352392007-08-10 13:01:20 -07005#ifdef CONFIG_MMU
Rusty Russell673eae82006-09-25 23:32:29 -07006
Ben Hutchingsfbd71842011-02-27 05:41:35 +00007#include <linux/mm_types.h>
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Andrea Arcangelie2cda322011-01-13 15:46:40 -080010extern int ptep_set_access_flags(struct vm_area_struct *vma,
11 unsigned long address, pte_t *ptep,
12 pte_t entry, int dirty);
13#endif
14
15#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
16extern int pmdp_set_access_flags(struct vm_area_struct *vma,
17 unsigned long address, pmd_t *pmdp,
18 pmd_t entry, int dirty);
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#endif
20
21#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
Andrea Arcangelie2cda322011-01-13 15:46:40 -080022static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
23 unsigned long address,
24 pte_t *ptep)
25{
26 pte_t pte = *ptep;
27 int r = 1;
28 if (!pte_young(pte))
29 r = 0;
30 else
31 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
32 return r;
33}
34#endif
35
36#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
37#ifdef CONFIG_TRANSPARENT_HUGEPAGE
38static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
39 unsigned long address,
40 pmd_t *pmdp)
41{
42 pmd_t pmd = *pmdp;
43 int r = 1;
44 if (!pmd_young(pmd))
45 r = 0;
46 else
47 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
48 return r;
49}
50#else /* CONFIG_TRANSPARENT_HUGEPAGE */
51static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
52 unsigned long address,
53 pmd_t *pmdp)
54{
55 BUG();
56 return 0;
57}
58#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#endif
60
61#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
Andrea Arcangelie2cda322011-01-13 15:46:40 -080062int ptep_clear_flush_young(struct vm_area_struct *vma,
63 unsigned long address, pte_t *ptep);
64#endif
65
66#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
67int pmdp_clear_flush_young(struct vm_area_struct *vma,
68 unsigned long address, pmd_t *pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#endif
70
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
Andrea Arcangelie2cda322011-01-13 15:46:40 -080072static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
73 unsigned long address,
74 pte_t *ptep)
75{
76 pte_t pte = *ptep;
77 pte_clear(mm, address, ptep);
78 return pte;
79}
80#endif
81
82#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
83#ifdef CONFIG_TRANSPARENT_HUGEPAGE
84static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
85 unsigned long address,
86 pmd_t *pmdp)
87{
88 pmd_t pmd = *pmdp;
89 pmd_clear(mm, address, pmdp);
90 return pmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091})
Andrea Arcangelie2cda322011-01-13 15:46:40 -080092#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#endif
94
Zachary Amsdena6003882005-09-03 15:55:04 -070095#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
Andrea Arcangelie2cda322011-01-13 15:46:40 -080096static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
97 unsigned long address, pte_t *ptep,
98 int full)
99{
100 pte_t pte;
101 pte = ptep_get_and_clear(mm, address, ptep);
102 return pte;
103}
Zachary Amsdena6003882005-09-03 15:55:04 -0700104#endif
105
Zachary Amsden9888a1c2006-09-30 23:29:31 -0700106/*
107 * Some architectures may be able to avoid expensive synchronization
108 * primitives when modifications are made to PTE's which are already
109 * not present, or in the process of an address space destruction.
110 */
111#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800112static inline void pte_clear_not_present_full(struct mm_struct *mm,
113 unsigned long address,
114 pte_t *ptep,
115 int full)
116{
117 pte_clear(mm, address, ptep);
118}
Zachary Amsdena6003882005-09-03 15:55:04 -0700119#endif
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800122extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
123 unsigned long address,
124 pte_t *ptep);
125#endif
126
127#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
128extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
129 unsigned long address,
130 pmd_t *pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131#endif
132
133#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
Tim Schmielau8c65b4a2005-11-07 00:59:43 -0800134struct mm_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
136{
137 pte_t old_pte = *ptep;
138 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
139}
140#endif
141
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800142#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
143#ifdef CONFIG_TRANSPARENT_HUGEPAGE
144static inline void pmdp_set_wrprotect(struct mm_struct *mm,
145 unsigned long address, pmd_t *pmdp)
146{
147 pmd_t old_pmd = *pmdp;
148 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
149}
150#else /* CONFIG_TRANSPARENT_HUGEPAGE */
151static inline void pmdp_set_wrprotect(struct mm_struct *mm,
152 unsigned long address, pmd_t *pmdp)
153{
154 BUG();
155}
156#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
157#endif
158
159#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
Andrea Arcangelib3697c02011-01-16 13:10:39 -0800160extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma,
161 unsigned long address,
162 pmd_t *pmdp);
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800163#endif
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165#ifndef __HAVE_ARCH_PTE_SAME
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800166static inline int pte_same(pte_t pte_a, pte_t pte_b)
167{
168 return pte_val(pte_a) == pte_val(pte_b);
169}
170#endif
171
172#ifndef __HAVE_ARCH_PMD_SAME
173#ifdef CONFIG_TRANSPARENT_HUGEPAGE
174static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
175{
176 return pmd_val(pmd_a) == pmd_val(pmd_b);
177}
178#else /* CONFIG_TRANSPARENT_HUGEPAGE */
179static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
180{
181 BUG();
182 return 0;
183}
184#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185#endif
186
Martin Schwidefsky6c210482007-04-27 16:01:57 +0200187#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
188#define page_test_dirty(page) (0)
189#endif
190
191#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
Martin Schwidefskye2b8d7a2010-10-25 16:10:14 +0200192#define page_clear_dirty(page, mapped) do { } while (0)
Martin Schwidefsky6c210482007-04-27 16:01:57 +0200193#endif
194
195#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
Abhijit Karmarkarb4955ce2005-06-21 17:15:13 -0700196#define pte_maybe_dirty(pte) pte_dirty(pte)
197#else
198#define pte_maybe_dirty(pte) (1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199#endif
200
201#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
202#define page_test_and_clear_young(page) (0)
203#endif
204
205#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
206#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
207#endif
208
David S. Miller0b0968a2006-06-01 17:47:25 -0700209#ifndef __HAVE_ARCH_MOVE_PTE
Nick Piggin8b1f3122005-09-27 21:45:18 -0700210#define move_pte(pte, prot, old_addr, new_addr) (pte)
Nick Piggin8b1f3122005-09-27 21:45:18 -0700211#endif
212
Shaohua Li61c77322010-08-16 09:16:55 +0800213#ifndef flush_tlb_fix_spurious_fault
214#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
215#endif
216
Paul Mundt0634a632009-06-23 13:51:19 +0200217#ifndef pgprot_noncached
218#define pgprot_noncached(prot) (prot)
219#endif
220
venkatesh.pallipadi@intel.com2520bd32008-12-18 11:41:32 -0800221#ifndef pgprot_writecombine
222#define pgprot_writecombine pgprot_noncached
223#endif
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225/*
Hugh Dickins8f6c99c2005-04-19 13:29:17 -0700226 * When walking page tables, get the address of the next boundary,
227 * or the end address of the range if that comes earlier. Although no
228 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 */
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231#define pgd_addr_end(addr, end) \
232({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
233 (__boundary - 1 < (end) - 1)? __boundary: (end); \
234})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
236#ifndef pud_addr_end
237#define pud_addr_end(addr, end) \
238({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
239 (__boundary - 1 < (end) - 1)? __boundary: (end); \
240})
241#endif
242
243#ifndef pmd_addr_end
244#define pmd_addr_end(addr, end) \
245({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
246 (__boundary - 1 < (end) - 1)? __boundary: (end); \
247})
248#endif
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250/*
251 * When walking page tables, we usually want to skip any p?d_none entries;
252 * and any p?d_bad entries - reporting the error before resetting to none.
253 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
254 */
255void pgd_clear_bad(pgd_t *);
256void pud_clear_bad(pud_t *);
257void pmd_clear_bad(pmd_t *);
258
259static inline int pgd_none_or_clear_bad(pgd_t *pgd)
260{
261 if (pgd_none(*pgd))
262 return 1;
263 if (unlikely(pgd_bad(*pgd))) {
264 pgd_clear_bad(pgd);
265 return 1;
266 }
267 return 0;
268}
269
270static inline int pud_none_or_clear_bad(pud_t *pud)
271{
272 if (pud_none(*pud))
273 return 1;
274 if (unlikely(pud_bad(*pud))) {
275 pud_clear_bad(pud);
276 return 1;
277 }
278 return 0;
279}
280
281static inline int pmd_none_or_clear_bad(pmd_t *pmd)
282{
283 if (pmd_none(*pmd))
284 return 1;
285 if (unlikely(pmd_bad(*pmd))) {
286 pmd_clear_bad(pmd);
287 return 1;
288 }
289 return 0;
290}
Greg Ungerer95352392007-08-10 13:01:20 -0700291
Jeremy Fitzhardinge1ea07042008-06-16 04:30:00 -0700292static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
293 unsigned long addr,
294 pte_t *ptep)
295{
296 /*
297 * Get the current pte state, but zero it out to make it
298 * non-present, preventing the hardware from asynchronously
299 * updating it.
300 */
301 return ptep_get_and_clear(mm, addr, ptep);
302}
303
304static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
305 unsigned long addr,
306 pte_t *ptep, pte_t pte)
307{
308 /*
309 * The pte is non-present, so there's no hardware state to
310 * preserve.
311 */
312 set_pte_at(mm, addr, ptep, pte);
313}
314
315#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
316/*
317 * Start a pte protection read-modify-write transaction, which
318 * protects against asynchronous hardware modifications to the pte.
319 * The intention is not to prevent the hardware from making pte
320 * updates, but to prevent any updates it may make from being lost.
321 *
322 * This does not protect against other software modifications of the
323 * pte; the appropriate pte lock must be held over the transation.
324 *
325 * Note that this interface is intended to be batchable, meaning that
326 * ptep_modify_prot_commit may not actually update the pte, but merely
327 * queue the update to be done at some later time. The update must be
328 * actually committed before the pte lock is released, however.
329 */
330static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
331 unsigned long addr,
332 pte_t *ptep)
333{
334 return __ptep_modify_prot_start(mm, addr, ptep);
335}
336
337/*
338 * Commit an update to a pte, leaving any hardware-controlled bits in
339 * the PTE unmodified.
340 */
341static inline void ptep_modify_prot_commit(struct mm_struct *mm,
342 unsigned long addr,
343 pte_t *ptep, pte_t pte)
344{
345 __ptep_modify_prot_commit(mm, addr, ptep, pte);
346}
347#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
Sebastian Siewiorfe1a6872008-07-15 22:28:46 +0200348#endif /* CONFIG_MMU */
Jeremy Fitzhardinge1ea07042008-06-16 04:30:00 -0700349
Greg Ungerer95352392007-08-10 13:01:20 -0700350/*
351 * A facility to provide lazy MMU batching. This allows PTE updates and
352 * page invalidations to be delayed until a call to leave lazy MMU mode
353 * is issued. Some architectures may benefit from doing this, and it is
354 * beneficial for both shadow and direct mode hypervisors, which may batch
355 * the PTE updates which happen during this window. Note that using this
356 * interface requires that read hazards be removed from the code. A read
357 * hazard could result in the direct mode hypervisor case, since the actual
358 * write to the page tables may not yet have taken place, so reads though
359 * a raw PTE pointer after it has been modified are not guaranteed to be
360 * up to date. This mode can only be entered and left under the protection of
361 * the page table locks for all page tables which may be modified. In the UP
362 * case, this is required so that preemption is disabled, and in the SMP case,
363 * it must synchronize the delayed page table writes properly on other CPUs.
364 */
365#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
366#define arch_enter_lazy_mmu_mode() do {} while (0)
367#define arch_leave_lazy_mmu_mode() do {} while (0)
368#define arch_flush_lazy_mmu_mode() do {} while (0)
369#endif
370
371/*
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -0800372 * A facility to provide batching of the reload of page tables and
373 * other process state with the actual context switch code for
374 * paravirtualized guests. By convention, only one of the batched
375 * update (lazy) modes (CPU, MMU) should be active at any given time,
376 * entry should never be nested, and entry and exits should always be
377 * paired. This is for sanity of maintaining and reasoning about the
378 * kernel code. In this case, the exit (end of the context switch) is
379 * in architecture-specific code, and so doesn't need a generic
380 * definition.
Greg Ungerer95352392007-08-10 13:01:20 -0700381 */
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -0800382#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800383#define arch_start_context_switch(prev) do {} while (0)
Greg Ungerer95352392007-08-10 13:01:20 -0700384#endif
385
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800386#ifndef __HAVE_PFNMAP_TRACKING
387/*
388 * Interface that can be used by architecture code to keep track of
389 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
390 *
391 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
392 * for physical range indicated by pfn and size.
393 */
venkatesh.pallipadi@intel.come4b866e2009-01-09 16:13:11 -0800394static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800395 unsigned long pfn, unsigned long size)
396{
397 return 0;
398}
399
400/*
401 * Interface that can be used by architecture code to keep track of
402 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
403 *
404 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
405 * copied through copy_page_range().
406 */
407static inline int track_pfn_vma_copy(struct vm_area_struct *vma)
408{
409 return 0;
410}
411
412/*
413 * Interface that can be used by architecture code to keep track of
414 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
415 *
416 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
417 * untrack can be called for a specific region indicated by pfn and size or
418 * can be for the entire vma (in which case size can be zero).
419 */
420static inline void untrack_pfn_vma(struct vm_area_struct *vma,
421 unsigned long pfn, unsigned long size)
422{
423}
424#else
venkatesh.pallipadi@intel.come4b866e2009-01-09 16:13:11 -0800425extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
venkatesh.pallipadi@intel.com34801ba2008-12-19 13:47:29 -0800426 unsigned long pfn, unsigned long size);
427extern int track_pfn_vma_copy(struct vm_area_struct *vma);
428extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
429 unsigned long size);
430#endif
431
Andrea Arcangeli5f6e8da2011-01-13 15:46:40 -0800432#ifndef CONFIG_TRANSPARENT_HUGEPAGE
433static inline int pmd_trans_huge(pmd_t pmd)
434{
435 return 0;
436}
437static inline int pmd_trans_splitting(pmd_t pmd)
438{
439 return 0;
440}
Andrea Arcangelie2cda322011-01-13 15:46:40 -0800441#ifndef __HAVE_ARCH_PMD_WRITE
442static inline int pmd_write(pmd_t pmd)
443{
444 BUG();
445 return 0;
446}
447#endif /* __HAVE_ARCH_PMD_WRITE */
Andrea Arcangeli5f6e8da2011-01-13 15:46:40 -0800448#endif
449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450#endif /* !__ASSEMBLY__ */
451
452#endif /* _ASM_GENERIC_PGTABLE_H */