| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_GENERIC_PGTABLE_H | 
|  | 2 | #define _ASM_GENERIC_PGTABLE_H | 
|  | 3 |  | 
| Rusty Russell | 673eae8 | 2006-09-25 23:32:29 -0700 | [diff] [blame] | 4 | #ifndef __ASSEMBLY__ | 
| Greg Ungerer | 9535239 | 2007-08-10 13:01:20 -0700 | [diff] [blame] | 5 | #ifdef CONFIG_MMU | 
| Rusty Russell | 673eae8 | 2006-09-25 23:32:29 -0700 | [diff] [blame] | 6 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 
|  | 8 | /* | 
|  | 9 | * Largely same as above, but only sets the access flags (dirty, | 
|  | 10 | * accessed, and writable). Furthermore, we know it always gets set | 
|  | 11 | * to a "more permissive" setting, which allows most architectures | 
| Benjamin Herrenschmidt | 8dab524 | 2007-06-16 10:16:12 -0700 | [diff] [blame] | 12 | * to optimize this. We return whether the PTE actually changed, which | 
|  | 13 | * in turn instructs the caller to do things like update__mmu_cache. | 
|  | 14 | * This used to be done in the caller, but sparc needs minor faults to | 
|  | 15 | * force that call on sun4c so we changed this macro slightly | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | */ | 
|  | 17 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | 
| Benjamin Herrenschmidt | 8dab524 | 2007-06-16 10:16:12 -0700 | [diff] [blame] | 18 | ({									  \ | 
|  | 19 | int __changed = !pte_same(*(__ptep), __entry);			  \ | 
|  | 20 | if (__changed) {						  \ | 
|  | 21 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ | 
|  | 22 | flush_tlb_page(__vma, __address);			  \ | 
|  | 23 | }								  \ | 
|  | 24 | __changed;							  \ | 
|  | 25 | }) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #endif | 
|  | 27 |  | 
|  | 28 | #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 
|  | 29 | #define ptep_test_and_clear_young(__vma, __address, __ptep)		\ | 
|  | 30 | ({									\ | 
|  | 31 | pte_t __pte = *(__ptep);					\ | 
|  | 32 | int r = 1;							\ | 
|  | 33 | if (!pte_young(__pte))						\ | 
|  | 34 | r = 0;							\ | 
|  | 35 | else								\ | 
|  | 36 | set_pte_at((__vma)->vm_mm, (__address),			\ | 
|  | 37 | (__ptep), pte_mkold(__pte));			\ | 
|  | 38 | r;								\ | 
|  | 39 | }) | 
|  | 40 | #endif | 
|  | 41 |  | 
|  | 42 | #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | 
|  | 43 | #define ptep_clear_flush_young(__vma, __address, __ptep)		\ | 
|  | 44 | ({									\ | 
|  | 45 | int __young;							\ | 
|  | 46 | __young = ptep_test_and_clear_young(__vma, __address, __ptep);	\ | 
|  | 47 | if (__young)							\ | 
|  | 48 | flush_tlb_page(__vma, __address);			\ | 
|  | 49 | __young;							\ | 
|  | 50 | }) | 
|  | 51 | #endif | 
|  | 52 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR | 
|  | 54 | #define ptep_get_and_clear(__mm, __address, __ptep)			\ | 
|  | 55 | ({									\ | 
|  | 56 | pte_t __pte = *(__ptep);					\ | 
|  | 57 | pte_clear((__mm), (__address), (__ptep));			\ | 
|  | 58 | __pte;								\ | 
|  | 59 | }) | 
|  | 60 | #endif | 
|  | 61 |  | 
| Zachary Amsden | a600388 | 2005-09-03 15:55:04 -0700 | [diff] [blame] | 62 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | 
|  | 63 | #define ptep_get_and_clear_full(__mm, __address, __ptep, __full)	\ | 
|  | 64 | ({									\ | 
|  | 65 | pte_t __pte;							\ | 
|  | 66 | __pte = ptep_get_and_clear((__mm), (__address), (__ptep));	\ | 
|  | 67 | __pte;								\ | 
|  | 68 | }) | 
|  | 69 | #endif | 
|  | 70 |  | 
| Zachary Amsden | 9888a1c | 2006-09-30 23:29:31 -0700 | [diff] [blame] | 71 | /* | 
|  | 72 | * Some architectures may be able to avoid expensive synchronization | 
|  | 73 | * primitives when modifications are made to PTE's which are already | 
|  | 74 | * not present, or in the process of an address space destruction. | 
|  | 75 | */ | 
|  | 76 | #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL | 
|  | 77 | #define pte_clear_not_present_full(__mm, __address, __ptep, __full)	\ | 
| Zachary Amsden | a600388 | 2005-09-03 15:55:04 -0700 | [diff] [blame] | 78 | do {									\ | 
|  | 79 | pte_clear((__mm), (__address), (__ptep));			\ | 
|  | 80 | } while (0) | 
|  | 81 | #endif | 
|  | 82 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH | 
|  | 84 | #define ptep_clear_flush(__vma, __address, __ptep)			\ | 
|  | 85 | ({									\ | 
|  | 86 | pte_t __pte;							\ | 
|  | 87 | __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep);	\ | 
|  | 88 | flush_tlb_page(__vma, __address);				\ | 
|  | 89 | __pte;								\ | 
|  | 90 | }) | 
|  | 91 | #endif | 
|  | 92 |  | 
|  | 93 | #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT | 
| Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 94 | struct mm_struct; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) | 
|  | 96 | { | 
|  | 97 | pte_t old_pte = *ptep; | 
|  | 98 | set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); | 
|  | 99 | } | 
|  | 100 | #endif | 
|  | 101 |  | 
|  | 102 | #ifndef __HAVE_ARCH_PTE_SAME | 
|  | 103 | #define pte_same(A,B)	(pte_val(A) == pte_val(B)) | 
|  | 104 | #endif | 
|  | 105 |  | 
| Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 106 | #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY | 
|  | 107 | #define page_test_dirty(page)		(0) | 
|  | 108 | #endif | 
|  | 109 |  | 
|  | 110 | #ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY | 
|  | 111 | #define page_clear_dirty(page)		do { } while (0) | 
|  | 112 | #endif | 
|  | 113 |  | 
|  | 114 | #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY | 
| Abhijit Karmarkar | b4955ce | 2005-06-21 17:15:13 -0700 | [diff] [blame] | 115 | #define pte_maybe_dirty(pte)		pte_dirty(pte) | 
|  | 116 | #else | 
|  | 117 | #define pte_maybe_dirty(pte)		(1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | #endif | 
|  | 119 |  | 
|  | 120 | #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | 
|  | 121 | #define page_test_and_clear_young(page) (0) | 
|  | 122 | #endif | 
|  | 123 |  | 
|  | 124 | #ifndef __HAVE_ARCH_PGD_OFFSET_GATE | 
|  | 125 | #define pgd_offset_gate(mm, addr)	pgd_offset(mm, addr) | 
|  | 126 | #endif | 
|  | 127 |  | 
| David S. Miller | 0b0968a | 2006-06-01 17:47:25 -0700 | [diff] [blame] | 128 | #ifndef __HAVE_ARCH_MOVE_PTE | 
| Nick Piggin | 8b1f312 | 2005-09-27 21:45:18 -0700 | [diff] [blame] | 129 | #define move_pte(pte, prot, old_addr, new_addr)	(pte) | 
| Nick Piggin | 8b1f312 | 2005-09-27 21:45:18 -0700 | [diff] [blame] | 130 | #endif | 
|  | 131 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | /* | 
| Hugh Dickins | 8f6c99c | 2005-04-19 13:29:17 -0700 | [diff] [blame] | 133 | * When walking page tables, get the address of the next boundary, | 
|  | 134 | * or the end address of the range if that comes earlier.  Although no | 
|  | 135 | * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | */ | 
|  | 137 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | #define pgd_addr_end(addr, end)						\ | 
|  | 139 | ({	unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;	\ | 
|  | 140 | (__boundary - 1 < (end) - 1)? __boundary: (end);		\ | 
|  | 141 | }) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 |  | 
|  | 143 | #ifndef pud_addr_end | 
|  | 144 | #define pud_addr_end(addr, end)						\ | 
|  | 145 | ({	unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK;	\ | 
|  | 146 | (__boundary - 1 < (end) - 1)? __boundary: (end);		\ | 
|  | 147 | }) | 
|  | 148 | #endif | 
|  | 149 |  | 
|  | 150 | #ifndef pmd_addr_end | 
|  | 151 | #define pmd_addr_end(addr, end)						\ | 
|  | 152 | ({	unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK;	\ | 
|  | 153 | (__boundary - 1 < (end) - 1)? __boundary: (end);		\ | 
|  | 154 | }) | 
|  | 155 | #endif | 
|  | 156 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | /* | 
|  | 158 | * When walking page tables, we usually want to skip any p?d_none entries; | 
|  | 159 | * and any p?d_bad entries - reporting the error before resetting to none. | 
|  | 160 | * Do the tests inline, but report and clear the bad entry in mm/memory.c. | 
|  | 161 | */ | 
|  | 162 | void pgd_clear_bad(pgd_t *); | 
|  | 163 | void pud_clear_bad(pud_t *); | 
|  | 164 | void pmd_clear_bad(pmd_t *); | 
|  | 165 |  | 
|  | 166 | static inline int pgd_none_or_clear_bad(pgd_t *pgd) | 
|  | 167 | { | 
|  | 168 | if (pgd_none(*pgd)) | 
|  | 169 | return 1; | 
|  | 170 | if (unlikely(pgd_bad(*pgd))) { | 
|  | 171 | pgd_clear_bad(pgd); | 
|  | 172 | return 1; | 
|  | 173 | } | 
|  | 174 | return 0; | 
|  | 175 | } | 
|  | 176 |  | 
|  | 177 | static inline int pud_none_or_clear_bad(pud_t *pud) | 
|  | 178 | { | 
|  | 179 | if (pud_none(*pud)) | 
|  | 180 | return 1; | 
|  | 181 | if (unlikely(pud_bad(*pud))) { | 
|  | 182 | pud_clear_bad(pud); | 
|  | 183 | return 1; | 
|  | 184 | } | 
|  | 185 | return 0; | 
|  | 186 | } | 
|  | 187 |  | 
|  | 188 | static inline int pmd_none_or_clear_bad(pmd_t *pmd) | 
|  | 189 | { | 
|  | 190 | if (pmd_none(*pmd)) | 
|  | 191 | return 1; | 
|  | 192 | if (unlikely(pmd_bad(*pmd))) { | 
|  | 193 | pmd_clear_bad(pmd); | 
|  | 194 | return 1; | 
|  | 195 | } | 
|  | 196 | return 0; | 
|  | 197 | } | 
| Greg Ungerer | 9535239 | 2007-08-10 13:01:20 -0700 | [diff] [blame] | 198 | #endif /* CONFIG_MMU */ | 
|  | 199 |  | 
|  | 200 | /* | 
|  | 201 | * A facility to provide lazy MMU batching.  This allows PTE updates and | 
|  | 202 | * page invalidations to be delayed until a call to leave lazy MMU mode | 
|  | 203 | * is issued.  Some architectures may benefit from doing this, and it is | 
|  | 204 | * beneficial for both shadow and direct mode hypervisors, which may batch | 
|  | 205 | * the PTE updates which happen during this window.  Note that using this | 
|  | 206 | * interface requires that read hazards be removed from the code.  A read | 
|  | 207 | * hazard could result in the direct mode hypervisor case, since the actual | 
|  | 208 | * write to the page tables may not yet have taken place, so reads though | 
|  | 209 | * a raw PTE pointer after it has been modified are not guaranteed to be | 
|  | 210 | * up to date.  This mode can only be entered and left under the protection of | 
|  | 211 | * the page table locks for all page tables which may be modified.  In the UP | 
|  | 212 | * case, this is required so that preemption is disabled, and in the SMP case, | 
|  | 213 | * it must synchronize the delayed page table writes properly on other CPUs. | 
|  | 214 | */ | 
|  | 215 | #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE | 
|  | 216 | #define arch_enter_lazy_mmu_mode()	do {} while (0) | 
|  | 217 | #define arch_leave_lazy_mmu_mode()	do {} while (0) | 
|  | 218 | #define arch_flush_lazy_mmu_mode()	do {} while (0) | 
|  | 219 | #endif | 
|  | 220 |  | 
|  | 221 | /* | 
|  | 222 | * A facility to provide batching of the reload of page tables with the | 
|  | 223 | * actual context switch code for paravirtualized guests.  By convention, | 
|  | 224 | * only one of the lazy modes (CPU, MMU) should be active at any given | 
|  | 225 | * time, entry should never be nested, and entry and exits should always | 
|  | 226 | * be paired.  This is for sanity of maintaining and reasoning about the | 
|  | 227 | * kernel code. | 
|  | 228 | */ | 
|  | 229 | #ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE | 
|  | 230 | #define arch_enter_lazy_cpu_mode()	do {} while (0) | 
|  | 231 | #define arch_leave_lazy_cpu_mode()	do {} while (0) | 
|  | 232 | #define arch_flush_lazy_cpu_mode()	do {} while (0) | 
|  | 233 | #endif | 
|  | 234 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | #endif /* !__ASSEMBLY__ */ | 
|  | 236 |  | 
|  | 237 | #endif /* _ASM_GENERIC_PGTABLE_H */ |