Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_GENERIC_PGTABLE_H |
| 2 | #define _ASM_GENERIC_PGTABLE_H |
| 3 | |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 4 | #include <linux/pfn.h> |
| 5 | |
Rusty Russell | 673eae8 | 2006-09-25 23:32:29 -0700 | [diff] [blame] | 6 | #ifndef __ASSEMBLY__ |
Greg Ungerer | 9535239 | 2007-08-10 13:01:20 -0700 | [diff] [blame] | 7 | #ifdef CONFIG_MMU |
Rusty Russell | 673eae8 | 2006-09-25 23:32:29 -0700 | [diff] [blame] | 8 | |
Ben Hutchings | fbd7184 | 2011-02-27 05:41:35 +0000 | [diff] [blame] | 9 | #include <linux/mm_types.h> |
Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 10 | #include <linux/bug.h> |
Toshi Kani | e61ce6a | 2015-04-14 15:47:23 -0700 | [diff] [blame] | 11 | #include <linux/errno.h> |
Ben Hutchings | fbd7184 | 2011-02-27 05:41:35 +0000 | [diff] [blame] | 12 | |
Kirill A. Shutemov | 235a8f0 | 2015-04-14 15:46:17 -0700 | [diff] [blame] | 13 | #if 4 - defined(__PAGETABLE_PUD_FOLDED) - defined(__PAGETABLE_PMD_FOLDED) != \ |
| 14 | CONFIG_PGTABLE_LEVELS |
| 15 | #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{PUD,PMD}_FOLDED |
| 16 | #endif |
| 17 | |
Hugh Dickins | 6ee8630 | 2013-04-29 15:07:44 -0700 | [diff] [blame] | 18 | /* |
| 19 | * On almost all architectures and configurations, 0 can be used as the |
| 20 | * upper ceiling to free_pgtables(): on many architectures it has the same |
| 21 | * effect as using TASK_SIZE. However, there is one configuration which |
| 22 | * must impose a more careful limit, to avoid freeing kernel pgtables. |
| 23 | */ |
| 24 | #ifndef USER_PGTABLES_CEILING |
| 25 | #define USER_PGTABLES_CEILING 0UL |
| 26 | #endif |
| 27 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 29 | extern int ptep_set_access_flags(struct vm_area_struct *vma, |
| 30 | unsigned long address, pte_t *ptep, |
| 31 | pte_t entry, int dirty); |
| 32 | #endif |
| 33 | |
| 34 | #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS |
Vineet Gupta | bd5e88a | 2015-07-09 17:22:44 +0530 | [diff] [blame] | 35 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 36 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, |
| 37 | unsigned long address, pmd_t *pmdp, |
| 38 | pmd_t entry, int dirty); |
Vineet Gupta | bd5e88a | 2015-07-09 17:22:44 +0530 | [diff] [blame] | 39 | #else |
| 40 | static inline int pmdp_set_access_flags(struct vm_area_struct *vma, |
| 41 | unsigned long address, pmd_t *pmdp, |
| 42 | pmd_t entry, int dirty) |
| 43 | { |
| 44 | BUILD_BUG(); |
| 45 | return 0; |
| 46 | } |
| 47 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #endif |
| 49 | |
| 50 | #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 51 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, |
| 52 | unsigned long address, |
| 53 | pte_t *ptep) |
| 54 | { |
| 55 | pte_t pte = *ptep; |
| 56 | int r = 1; |
| 57 | if (!pte_young(pte)) |
| 58 | r = 0; |
| 59 | else |
| 60 | set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); |
| 61 | return r; |
| 62 | } |
| 63 | #endif |
| 64 | |
| 65 | #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG |
| 66 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 67 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
| 68 | unsigned long address, |
| 69 | pmd_t *pmdp) |
| 70 | { |
| 71 | pmd_t pmd = *pmdp; |
| 72 | int r = 1; |
| 73 | if (!pmd_young(pmd)) |
| 74 | r = 0; |
| 75 | else |
| 76 | set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); |
| 77 | return r; |
| 78 | } |
Vineet Gupta | bd5e88a | 2015-07-09 17:22:44 +0530 | [diff] [blame] | 79 | #else |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 80 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
| 81 | unsigned long address, |
| 82 | pmd_t *pmdp) |
| 83 | { |
Vineet Gupta | bd5e88a | 2015-07-09 17:22:44 +0530 | [diff] [blame] | 84 | BUILD_BUG(); |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 85 | return 0; |
| 86 | } |
| 87 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | #endif |
| 89 | |
| 90 | #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 91 | int ptep_clear_flush_young(struct vm_area_struct *vma, |
| 92 | unsigned long address, pte_t *ptep); |
| 93 | #endif |
| 94 | |
| 95 | #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH |
Vineet Gupta | bd5e88a | 2015-07-09 17:22:44 +0530 | [diff] [blame] | 96 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 97 | extern int pmdp_clear_flush_young(struct vm_area_struct *vma, |
| 98 | unsigned long address, pmd_t *pmdp); |
| 99 | #else |
| 100 | /* |
| 101 | * Despite relevant to THP only, this API is called from generic rmap code |
| 102 | * under PageTransHuge(), hence needs a dummy implementation for !THP |
| 103 | */ |
| 104 | static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, |
| 105 | unsigned long address, pmd_t *pmdp) |
| 106 | { |
| 107 | BUILD_BUG(); |
| 108 | return 0; |
| 109 | } |
| 110 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | #endif |
| 112 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 114 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
| 115 | unsigned long address, |
| 116 | pte_t *ptep) |
| 117 | { |
| 118 | pte_t pte = *ptep; |
| 119 | pte_clear(mm, address, ptep); |
| 120 | return pte; |
| 121 | } |
| 122 | #endif |
| 123 | |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 124 | #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 125 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 126 | static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, |
| 127 | unsigned long address, |
| 128 | pmd_t *pmdp) |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 129 | { |
| 130 | pmd_t pmd = *pmdp; |
Catalin Marinas | 2d28a22 | 2012-10-08 16:32:59 -0700 | [diff] [blame] | 131 | pmd_clear(pmdp); |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 132 | return pmd; |
Nicolas Kaiser | 49b24d6 | 2011-06-15 15:08:34 -0700 | [diff] [blame] | 133 | } |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 134 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | #endif |
| 136 | |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 137 | #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL |
Martin Schwidefsky | fcbe08d6 | 2014-10-24 10:52:29 +0200 | [diff] [blame] | 138 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 139 | static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, |
Martin Schwidefsky | fcbe08d6 | 2014-10-24 10:52:29 +0200 | [diff] [blame] | 140 | unsigned long address, pmd_t *pmdp, |
| 141 | int full) |
| 142 | { |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 143 | return pmdp_huge_get_and_clear(mm, address, pmdp); |
Martin Schwidefsky | fcbe08d6 | 2014-10-24 10:52:29 +0200 | [diff] [blame] | 144 | } |
| 145 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 146 | #endif |
| 147 | |
Zachary Amsden | a600388 | 2005-09-03 15:55:04 -0700 | [diff] [blame] | 148 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 149 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
| 150 | unsigned long address, pte_t *ptep, |
| 151 | int full) |
| 152 | { |
| 153 | pte_t pte; |
| 154 | pte = ptep_get_and_clear(mm, address, ptep); |
| 155 | return pte; |
| 156 | } |
Zachary Amsden | a600388 | 2005-09-03 15:55:04 -0700 | [diff] [blame] | 157 | #endif |
| 158 | |
Zachary Amsden | 9888a1c | 2006-09-30 23:29:31 -0700 | [diff] [blame] | 159 | /* |
| 160 | * Some architectures may be able to avoid expensive synchronization |
| 161 | * primitives when modifications are made to PTE's which are already |
| 162 | * not present, or in the process of an address space destruction. |
| 163 | */ |
| 164 | #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 165 | static inline void pte_clear_not_present_full(struct mm_struct *mm, |
| 166 | unsigned long address, |
| 167 | pte_t *ptep, |
| 168 | int full) |
| 169 | { |
| 170 | pte_clear(mm, address, ptep); |
| 171 | } |
Zachary Amsden | a600388 | 2005-09-03 15:55:04 -0700 | [diff] [blame] | 172 | #endif |
| 173 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 175 | extern pte_t ptep_clear_flush(struct vm_area_struct *vma, |
| 176 | unsigned long address, |
| 177 | pte_t *ptep); |
| 178 | #endif |
| 179 | |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 180 | #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH |
| 181 | extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 182 | unsigned long address, |
| 183 | pmd_t *pmdp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | #endif |
| 185 | |
| 186 | #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT |
Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 187 | struct mm_struct; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) |
| 189 | { |
| 190 | pte_t old_pte = *ptep; |
| 191 | set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); |
| 192 | } |
| 193 | #endif |
| 194 | |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 195 | #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT |
| 196 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 197 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, |
| 198 | unsigned long address, pmd_t *pmdp) |
| 199 | { |
| 200 | pmd_t old_pmd = *pmdp; |
| 201 | set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); |
| 202 | } |
Vineet Gupta | bd5e88a | 2015-07-09 17:22:44 +0530 | [diff] [blame] | 203 | #else |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 204 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, |
| 205 | unsigned long address, pmd_t *pmdp) |
| 206 | { |
Vineet Gupta | bd5e88a | 2015-07-09 17:22:44 +0530 | [diff] [blame] | 207 | BUILD_BUG(); |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 208 | } |
| 209 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 210 | #endif |
| 211 | |
Aneesh Kumar K.V | 15a25b2 | 2015-06-24 16:57:39 -0700 | [diff] [blame] | 212 | #ifndef pmdp_collapse_flush |
| 213 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Aneesh Kumar K.V | f28b6ff | 2015-06-24 16:57:42 -0700 | [diff] [blame] | 214 | extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, |
| 215 | unsigned long address, pmd_t *pmdp); |
Aneesh Kumar K.V | 15a25b2 | 2015-06-24 16:57:39 -0700 | [diff] [blame] | 216 | #else |
| 217 | static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, |
| 218 | unsigned long address, |
| 219 | pmd_t *pmdp) |
| 220 | { |
| 221 | BUILD_BUG(); |
| 222 | return *pmdp; |
| 223 | } |
| 224 | #define pmdp_collapse_flush pmdp_collapse_flush |
| 225 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 226 | #endif |
| 227 | |
Gerald Schaefer | e3ebcf6 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 228 | #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 229 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 230 | pgtable_t pgtable); |
Gerald Schaefer | e3ebcf6 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 231 | #endif |
| 232 | |
| 233 | #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 234 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); |
Gerald Schaefer | e3ebcf6 | 2012-10-08 16:30:07 -0700 | [diff] [blame] | 235 | #endif |
| 236 | |
Gerald Schaefer | 46dcde7 | 2012-10-08 16:30:09 -0700 | [diff] [blame] | 237 | #ifndef __HAVE_ARCH_PMDP_INVALIDATE |
| 238 | extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
| 239 | pmd_t *pmdp); |
| 240 | #endif |
| 241 | |
Aneesh Kumar K.V | c777e2a | 2016-02-09 06:50:31 +0530 | [diff] [blame] | 242 | #ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE |
| 243 | static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma, |
| 244 | unsigned long address, pmd_t *pmdp) |
| 245 | { |
| 246 | |
| 247 | } |
| 248 | #endif |
| 249 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | #ifndef __HAVE_ARCH_PTE_SAME |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 251 | static inline int pte_same(pte_t pte_a, pte_t pte_b) |
| 252 | { |
| 253 | return pte_val(pte_a) == pte_val(pte_b); |
| 254 | } |
| 255 | #endif |
| 256 | |
Konstantin Weitz | 4596172 | 2013-04-17 13:59:32 +0200 | [diff] [blame] | 257 | #ifndef __HAVE_ARCH_PTE_UNUSED |
| 258 | /* |
| 259 | * Some architectures provide facilities to virtualization guests |
| 260 | * so that they can flag allocated pages as unused. This allows the |
| 261 | * host to transparently reclaim unused pages. This function returns |
| 262 | * whether the pte's page is unused. |
| 263 | */ |
| 264 | static inline int pte_unused(pte_t pte) |
| 265 | { |
| 266 | return 0; |
| 267 | } |
| 268 | #endif |
| 269 | |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 270 | #ifndef __HAVE_ARCH_PMD_SAME |
| 271 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 272 | static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) |
| 273 | { |
| 274 | return pmd_val(pmd_a) == pmd_val(pmd_b); |
| 275 | } |
| 276 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 277 | static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) |
| 278 | { |
Vineet Gupta | bd5e88a | 2015-07-09 17:22:44 +0530 | [diff] [blame] | 279 | BUILD_BUG(); |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 280 | return 0; |
| 281 | } |
| 282 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | #endif |
| 284 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | #ifndef __HAVE_ARCH_PGD_OFFSET_GATE |
| 286 | #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) |
| 287 | #endif |
| 288 | |
David S. Miller | 0b0968a | 2006-06-01 17:47:25 -0700 | [diff] [blame] | 289 | #ifndef __HAVE_ARCH_MOVE_PTE |
Nick Piggin | 8b1f312 | 2005-09-27 21:45:18 -0700 | [diff] [blame] | 290 | #define move_pte(pte, prot, old_addr, new_addr) (pte) |
Nick Piggin | 8b1f312 | 2005-09-27 21:45:18 -0700 | [diff] [blame] | 291 | #endif |
| 292 | |
Rik van Riel | 2c3cf55 | 2012-10-09 15:31:12 +0200 | [diff] [blame] | 293 | #ifndef pte_accessible |
Rik van Riel | 2084140 | 2013-12-18 17:08:44 -0800 | [diff] [blame] | 294 | # define pte_accessible(mm, pte) ((void)(pte), 1) |
Rik van Riel | 2c3cf55 | 2012-10-09 15:31:12 +0200 | [diff] [blame] | 295 | #endif |
| 296 | |
Shaohua Li | 61c7732 | 2010-08-16 09:16:55 +0800 | [diff] [blame] | 297 | #ifndef flush_tlb_fix_spurious_fault |
| 298 | #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) |
| 299 | #endif |
| 300 | |
Paul Mundt | 0634a63 | 2009-06-23 13:51:19 +0200 | [diff] [blame] | 301 | #ifndef pgprot_noncached |
| 302 | #define pgprot_noncached(prot) (prot) |
| 303 | #endif |
| 304 | |
venkatesh.pallipadi@intel.com | 2520bd3 | 2008-12-18 11:41:32 -0800 | [diff] [blame] | 305 | #ifndef pgprot_writecombine |
| 306 | #define pgprot_writecombine pgprot_noncached |
| 307 | #endif |
| 308 | |
Toshi Kani | d1b4bfb | 2015-06-04 18:55:18 +0200 | [diff] [blame] | 309 | #ifndef pgprot_writethrough |
| 310 | #define pgprot_writethrough pgprot_noncached |
| 311 | #endif |
| 312 | |
Liviu Dudau | 8b921ac | 2014-09-29 15:29:30 +0100 | [diff] [blame] | 313 | #ifndef pgprot_device |
| 314 | #define pgprot_device pgprot_noncached |
| 315 | #endif |
| 316 | |
Peter Feiner | 64e4550 | 2014-10-13 15:55:46 -0700 | [diff] [blame] | 317 | #ifndef pgprot_modify |
| 318 | #define pgprot_modify pgprot_modify |
| 319 | static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) |
| 320 | { |
| 321 | if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) |
| 322 | newprot = pgprot_noncached(newprot); |
| 323 | if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) |
| 324 | newprot = pgprot_writecombine(newprot); |
| 325 | if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) |
| 326 | newprot = pgprot_device(newprot); |
| 327 | return newprot; |
| 328 | } |
| 329 | #endif |
| 330 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | /* |
Hugh Dickins | 8f6c99c | 2005-04-19 13:29:17 -0700 | [diff] [blame] | 332 | * When walking page tables, get the address of the next boundary, |
| 333 | * or the end address of the range if that comes earlier. Although no |
| 334 | * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | */ |
| 336 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | #define pgd_addr_end(addr, end) \ |
| 338 | ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ |
| 339 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ |
| 340 | }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | |
| 342 | #ifndef pud_addr_end |
| 343 | #define pud_addr_end(addr, end) \ |
| 344 | ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ |
| 345 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ |
| 346 | }) |
| 347 | #endif |
| 348 | |
| 349 | #ifndef pmd_addr_end |
| 350 | #define pmd_addr_end(addr, end) \ |
| 351 | ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ |
| 352 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ |
| 353 | }) |
| 354 | #endif |
| 355 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | /* |
| 357 | * When walking page tables, we usually want to skip any p?d_none entries; |
| 358 | * and any p?d_bad entries - reporting the error before resetting to none. |
| 359 | * Do the tests inline, but report and clear the bad entry in mm/memory.c. |
| 360 | */ |
| 361 | void pgd_clear_bad(pgd_t *); |
| 362 | void pud_clear_bad(pud_t *); |
| 363 | void pmd_clear_bad(pmd_t *); |
| 364 | |
| 365 | static inline int pgd_none_or_clear_bad(pgd_t *pgd) |
| 366 | { |
| 367 | if (pgd_none(*pgd)) |
| 368 | return 1; |
| 369 | if (unlikely(pgd_bad(*pgd))) { |
| 370 | pgd_clear_bad(pgd); |
| 371 | return 1; |
| 372 | } |
| 373 | return 0; |
| 374 | } |
| 375 | |
| 376 | static inline int pud_none_or_clear_bad(pud_t *pud) |
| 377 | { |
| 378 | if (pud_none(*pud)) |
| 379 | return 1; |
| 380 | if (unlikely(pud_bad(*pud))) { |
| 381 | pud_clear_bad(pud); |
| 382 | return 1; |
| 383 | } |
| 384 | return 0; |
| 385 | } |
| 386 | |
| 387 | static inline int pmd_none_or_clear_bad(pmd_t *pmd) |
| 388 | { |
| 389 | if (pmd_none(*pmd)) |
| 390 | return 1; |
| 391 | if (unlikely(pmd_bad(*pmd))) { |
| 392 | pmd_clear_bad(pmd); |
| 393 | return 1; |
| 394 | } |
| 395 | return 0; |
| 396 | } |
Greg Ungerer | 9535239 | 2007-08-10 13:01:20 -0700 | [diff] [blame] | 397 | |
Jeremy Fitzhardinge | 1ea0704 | 2008-06-16 04:30:00 -0700 | [diff] [blame] | 398 | static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm, |
| 399 | unsigned long addr, |
| 400 | pte_t *ptep) |
| 401 | { |
| 402 | /* |
| 403 | * Get the current pte state, but zero it out to make it |
| 404 | * non-present, preventing the hardware from asynchronously |
| 405 | * updating it. |
| 406 | */ |
| 407 | return ptep_get_and_clear(mm, addr, ptep); |
| 408 | } |
| 409 | |
| 410 | static inline void __ptep_modify_prot_commit(struct mm_struct *mm, |
| 411 | unsigned long addr, |
| 412 | pte_t *ptep, pte_t pte) |
| 413 | { |
| 414 | /* |
| 415 | * The pte is non-present, so there's no hardware state to |
| 416 | * preserve. |
| 417 | */ |
| 418 | set_pte_at(mm, addr, ptep, pte); |
| 419 | } |
| 420 | |
| 421 | #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION |
| 422 | /* |
| 423 | * Start a pte protection read-modify-write transaction, which |
| 424 | * protects against asynchronous hardware modifications to the pte. |
| 425 | * The intention is not to prevent the hardware from making pte |
| 426 | * updates, but to prevent any updates it may make from being lost. |
| 427 | * |
| 428 | * This does not protect against other software modifications of the |
| 429 | * pte; the appropriate pte lock must be held over the transation. |
| 430 | * |
| 431 | * Note that this interface is intended to be batchable, meaning that |
| 432 | * ptep_modify_prot_commit may not actually update the pte, but merely |
| 433 | * queue the update to be done at some later time. The update must be |
| 434 | * actually committed before the pte lock is released, however. |
| 435 | */ |
| 436 | static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, |
| 437 | unsigned long addr, |
| 438 | pte_t *ptep) |
| 439 | { |
| 440 | return __ptep_modify_prot_start(mm, addr, ptep); |
| 441 | } |
| 442 | |
| 443 | /* |
| 444 | * Commit an update to a pte, leaving any hardware-controlled bits in |
| 445 | * the PTE unmodified. |
| 446 | */ |
| 447 | static inline void ptep_modify_prot_commit(struct mm_struct *mm, |
| 448 | unsigned long addr, |
| 449 | pte_t *ptep, pte_t pte) |
| 450 | { |
| 451 | __ptep_modify_prot_commit(mm, addr, ptep, pte); |
| 452 | } |
| 453 | #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ |
Sebastian Siewior | fe1a687 | 2008-07-15 22:28:46 +0200 | [diff] [blame] | 454 | #endif /* CONFIG_MMU */ |
Jeremy Fitzhardinge | 1ea0704 | 2008-06-16 04:30:00 -0700 | [diff] [blame] | 455 | |
Greg Ungerer | 9535239 | 2007-08-10 13:01:20 -0700 | [diff] [blame] | 456 | /* |
| 457 | * A facility to provide lazy MMU batching. This allows PTE updates and |
| 458 | * page invalidations to be delayed until a call to leave lazy MMU mode |
| 459 | * is issued. Some architectures may benefit from doing this, and it is |
| 460 | * beneficial for both shadow and direct mode hypervisors, which may batch |
| 461 | * the PTE updates which happen during this window. Note that using this |
| 462 | * interface requires that read hazards be removed from the code. A read |
| 463 | * hazard could result in the direct mode hypervisor case, since the actual |
| 464 | * write to the page tables may not yet have taken place, so reads though |
| 465 | * a raw PTE pointer after it has been modified are not guaranteed to be |
| 466 | * up to date. This mode can only be entered and left under the protection of |
| 467 | * the page table locks for all page tables which may be modified. In the UP |
| 468 | * case, this is required so that preemption is disabled, and in the SMP case, |
| 469 | * it must synchronize the delayed page table writes properly on other CPUs. |
| 470 | */ |
| 471 | #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE |
| 472 | #define arch_enter_lazy_mmu_mode() do {} while (0) |
| 473 | #define arch_leave_lazy_mmu_mode() do {} while (0) |
| 474 | #define arch_flush_lazy_mmu_mode() do {} while (0) |
| 475 | #endif |
| 476 | |
| 477 | /* |
Jeremy Fitzhardinge | 7fd7d83 | 2009-02-17 23:24:03 -0800 | [diff] [blame] | 478 | * A facility to provide batching of the reload of page tables and |
| 479 | * other process state with the actual context switch code for |
| 480 | * paravirtualized guests. By convention, only one of the batched |
| 481 | * update (lazy) modes (CPU, MMU) should be active at any given time, |
| 482 | * entry should never be nested, and entry and exits should always be |
| 483 | * paired. This is for sanity of maintaining and reasoning about the |
| 484 | * kernel code. In this case, the exit (end of the context switch) is |
| 485 | * in architecture-specific code, and so doesn't need a generic |
| 486 | * definition. |
Greg Ungerer | 9535239 | 2007-08-10 13:01:20 -0700 | [diff] [blame] | 487 | */ |
Jeremy Fitzhardinge | 7fd7d83 | 2009-02-17 23:24:03 -0800 | [diff] [blame] | 488 | #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH |
Jeremy Fitzhardinge | 224101e | 2009-02-18 11:18:57 -0800 | [diff] [blame] | 489 | #define arch_start_context_switch(prev) do {} while (0) |
Greg Ungerer | 9535239 | 2007-08-10 13:01:20 -0700 | [diff] [blame] | 490 | #endif |
| 491 | |
Pavel Emelyanov | 0f8975e | 2013-07-03 15:01:20 -0700 | [diff] [blame] | 492 | #ifndef CONFIG_HAVE_ARCH_SOFT_DIRTY |
| 493 | static inline int pte_soft_dirty(pte_t pte) |
| 494 | { |
| 495 | return 0; |
| 496 | } |
| 497 | |
| 498 | static inline int pmd_soft_dirty(pmd_t pmd) |
| 499 | { |
| 500 | return 0; |
| 501 | } |
| 502 | |
| 503 | static inline pte_t pte_mksoft_dirty(pte_t pte) |
| 504 | { |
| 505 | return pte; |
| 506 | } |
| 507 | |
| 508 | static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) |
| 509 | { |
| 510 | return pmd; |
| 511 | } |
Cyrill Gorcunov | 179ef71 | 2013-08-13 16:00:49 -0700 | [diff] [blame] | 512 | |
Martin Schwidefsky | a7b7617 | 2015-04-22 14:20:47 +0200 | [diff] [blame] | 513 | static inline pte_t pte_clear_soft_dirty(pte_t pte) |
| 514 | { |
| 515 | return pte; |
| 516 | } |
| 517 | |
| 518 | static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) |
| 519 | { |
| 520 | return pmd; |
| 521 | } |
| 522 | |
Cyrill Gorcunov | 179ef71 | 2013-08-13 16:00:49 -0700 | [diff] [blame] | 523 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) |
| 524 | { |
| 525 | return pte; |
| 526 | } |
| 527 | |
| 528 | static inline int pte_swp_soft_dirty(pte_t pte) |
| 529 | { |
| 530 | return 0; |
| 531 | } |
| 532 | |
| 533 | static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) |
| 534 | { |
| 535 | return pte; |
| 536 | } |
Pavel Emelyanov | 0f8975e | 2013-07-03 15:01:20 -0700 | [diff] [blame] | 537 | #endif |
| 538 | |
venkatesh.pallipadi@intel.com | 34801ba | 2008-12-19 13:47:29 -0800 | [diff] [blame] | 539 | #ifndef __HAVE_PFNMAP_TRACKING |
| 540 | /* |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 541 | * Interfaces that can be used by architecture code to keep track of |
| 542 | * memory type of pfn mappings specified by the remap_pfn_range, |
| 543 | * vm_insert_pfn. |
venkatesh.pallipadi@intel.com | 34801ba | 2008-12-19 13:47:29 -0800 | [diff] [blame] | 544 | */ |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 545 | |
| 546 | /* |
| 547 | * track_pfn_remap is called when a _new_ pfn mapping is being established |
| 548 | * by remap_pfn_range() for physical range indicated by pfn and size. |
| 549 | */ |
| 550 | static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 551 | unsigned long pfn, unsigned long addr, |
| 552 | unsigned long size) |
venkatesh.pallipadi@intel.com | 34801ba | 2008-12-19 13:47:29 -0800 | [diff] [blame] | 553 | { |
| 554 | return 0; |
| 555 | } |
| 556 | |
| 557 | /* |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 558 | * track_pfn_insert is called when a _new_ single pfn is established |
| 559 | * by vm_insert_pfn(). |
| 560 | */ |
| 561 | static inline int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 562 | pfn_t pfn) |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 563 | { |
| 564 | return 0; |
| 565 | } |
| 566 | |
| 567 | /* |
| 568 | * track_pfn_copy is called when vma that is covering the pfnmap gets |
venkatesh.pallipadi@intel.com | 34801ba | 2008-12-19 13:47:29 -0800 | [diff] [blame] | 569 | * copied through copy_page_range(). |
| 570 | */ |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 571 | static inline int track_pfn_copy(struct vm_area_struct *vma) |
venkatesh.pallipadi@intel.com | 34801ba | 2008-12-19 13:47:29 -0800 | [diff] [blame] | 572 | { |
| 573 | return 0; |
| 574 | } |
| 575 | |
| 576 | /* |
Toshi Kani | d9fe4fa | 2015-12-22 17:54:23 -0700 | [diff] [blame] | 577 | * untrack_pfn is called while unmapping a pfnmap for a region. |
venkatesh.pallipadi@intel.com | 34801ba | 2008-12-19 13:47:29 -0800 | [diff] [blame] | 578 | * untrack can be called for a specific region indicated by pfn and size or |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 579 | * can be for the entire vma (in which case pfn, size are zero). |
venkatesh.pallipadi@intel.com | 34801ba | 2008-12-19 13:47:29 -0800 | [diff] [blame] | 580 | */ |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 581 | static inline void untrack_pfn(struct vm_area_struct *vma, |
| 582 | unsigned long pfn, unsigned long size) |
venkatesh.pallipadi@intel.com | 34801ba | 2008-12-19 13:47:29 -0800 | [diff] [blame] | 583 | { |
| 584 | } |
Toshi Kani | d9fe4fa | 2015-12-22 17:54:23 -0700 | [diff] [blame] | 585 | |
| 586 | /* |
| 587 | * untrack_pfn_moved is called while mremapping a pfnmap for a new region. |
| 588 | */ |
| 589 | static inline void untrack_pfn_moved(struct vm_area_struct *vma) |
| 590 | { |
| 591 | } |
venkatesh.pallipadi@intel.com | 34801ba | 2008-12-19 13:47:29 -0800 | [diff] [blame] | 592 | #else |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 593 | extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 594 | unsigned long pfn, unsigned long addr, |
| 595 | unsigned long size); |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 596 | extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 597 | pfn_t pfn); |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 598 | extern int track_pfn_copy(struct vm_area_struct *vma); |
| 599 | extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, |
| 600 | unsigned long size); |
Toshi Kani | d9fe4fa | 2015-12-22 17:54:23 -0700 | [diff] [blame] | 601 | extern void untrack_pfn_moved(struct vm_area_struct *vma); |
venkatesh.pallipadi@intel.com | 34801ba | 2008-12-19 13:47:29 -0800 | [diff] [blame] | 602 | #endif |
| 603 | |
Kirill A. Shutemov | 816422a | 2012-12-12 13:52:36 -0800 | [diff] [blame] | 604 | #ifdef __HAVE_COLOR_ZERO_PAGE |
| 605 | static inline int is_zero_pfn(unsigned long pfn) |
| 606 | { |
| 607 | extern unsigned long zero_pfn; |
| 608 | unsigned long offset_from_zero_pfn = pfn - zero_pfn; |
| 609 | return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); |
| 610 | } |
| 611 | |
Kirill A. Shutemov | 2f91ec8 | 2012-12-26 03:19:55 +0300 | [diff] [blame] | 612 | #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) |
| 613 | |
Kirill A. Shutemov | 816422a | 2012-12-12 13:52:36 -0800 | [diff] [blame] | 614 | #else |
| 615 | static inline int is_zero_pfn(unsigned long pfn) |
| 616 | { |
| 617 | extern unsigned long zero_pfn; |
| 618 | return pfn == zero_pfn; |
| 619 | } |
| 620 | |
| 621 | static inline unsigned long my_zero_pfn(unsigned long addr) |
| 622 | { |
| 623 | extern unsigned long zero_pfn; |
| 624 | return zero_pfn; |
| 625 | } |
| 626 | #endif |
| 627 | |
Andrea Arcangeli | 1a5a990 | 2012-03-21 16:33:42 -0700 | [diff] [blame] | 628 | #ifdef CONFIG_MMU |
| 629 | |
Andrea Arcangeli | 5f6e8da | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 630 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE |
| 631 | static inline int pmd_trans_huge(pmd_t pmd) |
| 632 | { |
| 633 | return 0; |
| 634 | } |
Andrea Arcangeli | e2cda32 | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 635 | #ifndef __HAVE_ARCH_PMD_WRITE |
| 636 | static inline int pmd_write(pmd_t pmd) |
| 637 | { |
| 638 | BUG(); |
| 639 | return 0; |
| 640 | } |
| 641 | #endif /* __HAVE_ARCH_PMD_WRITE */ |
Andrea Arcangeli | 1a5a990 | 2012-03-21 16:33:42 -0700 | [diff] [blame] | 642 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 643 | |
Andrea Arcangeli | 26c1917 | 2012-05-29 15:06:49 -0700 | [diff] [blame] | 644 | #ifndef pmd_read_atomic |
| 645 | static inline pmd_t pmd_read_atomic(pmd_t *pmdp) |
| 646 | { |
| 647 | /* |
| 648 | * Depend on compiler for an atomic pmd read. NOTE: this is |
| 649 | * only going to work, if the pmdval_t isn't larger than |
| 650 | * an unsigned long. |
| 651 | */ |
| 652 | return *pmdp; |
| 653 | } |
| 654 | #endif |
| 655 | |
Aneesh Kumar K.V | b3084f4 | 2014-01-13 11:34:24 +0530 | [diff] [blame] | 656 | #ifndef pmd_move_must_withdraw |
| 657 | static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl, |
| 658 | spinlock_t *old_pmd_ptl) |
| 659 | { |
| 660 | /* |
| 661 | * With split pmd lock we also need to move preallocated |
| 662 | * PTE page table if new_pmd is on different PMD page table. |
| 663 | */ |
| 664 | return new_pmd_ptl != old_pmd_ptl; |
| 665 | } |
| 666 | #endif |
| 667 | |
Andrea Arcangeli | 1a5a990 | 2012-03-21 16:33:42 -0700 | [diff] [blame] | 668 | /* |
| 669 | * This function is meant to be used by sites walking pagetables with |
| 670 | * the mmap_sem hold in read mode to protect against MADV_DONTNEED and |
| 671 | * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd |
| 672 | * into a null pmd and the transhuge page fault can convert a null pmd |
| 673 | * into an hugepmd or into a regular pmd (if the hugepage allocation |
| 674 | * fails). While holding the mmap_sem in read mode the pmd becomes |
| 675 | * stable and stops changing under us only if it's not null and not a |
| 676 | * transhuge pmd. When those races occurs and this function makes a |
| 677 | * difference vs the standard pmd_none_or_clear_bad, the result is |
| 678 | * undefined so behaving like if the pmd was none is safe (because it |
| 679 | * can return none anyway). The compiler level barrier() is critically |
| 680 | * important to compute the two checks atomically on the same pmdval. |
Andrea Arcangeli | 26c1917 | 2012-05-29 15:06:49 -0700 | [diff] [blame] | 681 | * |
| 682 | * For 32bit kernels with a 64bit large pmd_t this automatically takes |
| 683 | * care of reading the pmd atomically to avoid SMP race conditions |
| 684 | * against pmd_populate() when the mmap_sem is hold for reading by the |
| 685 | * caller (a special atomic read not done by "gcc" as in the generic |
| 686 | * version above, is also needed when THP is disabled because the page |
| 687 | * fault can populate the pmd from under us). |
Andrea Arcangeli | 1a5a990 | 2012-03-21 16:33:42 -0700 | [diff] [blame] | 688 | */ |
| 689 | static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) |
| 690 | { |
Andrea Arcangeli | 26c1917 | 2012-05-29 15:06:49 -0700 | [diff] [blame] | 691 | pmd_t pmdval = pmd_read_atomic(pmd); |
Andrea Arcangeli | 1a5a990 | 2012-03-21 16:33:42 -0700 | [diff] [blame] | 692 | /* |
| 693 | * The barrier will stabilize the pmdval in a register or on |
| 694 | * the stack so that it will stop changing under the code. |
Andrea Arcangeli | e4eed03 | 2012-06-20 12:52:57 -0700 | [diff] [blame] | 695 | * |
| 696 | * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, |
| 697 | * pmd_read_atomic is allowed to return a not atomic pmdval |
| 698 | * (for example pointing to an hugepage that has never been |
| 699 | * mapped in the pmd). The below checks will only care about |
| 700 | * the low part of the pmd with 32bit PAE x86 anyway, with the |
| 701 | * exception of pmd_none(). So the important thing is that if |
| 702 | * the low part of the pmd is found null, the high part will |
| 703 | * be also null or the pmd_none() check below would be |
| 704 | * confused. |
Andrea Arcangeli | 1a5a990 | 2012-03-21 16:33:42 -0700 | [diff] [blame] | 705 | */ |
| 706 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 707 | barrier(); |
Andrea Arcangeli | 5f6e8da | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 708 | #endif |
Kirill A. Shutemov | ee53664 | 2013-12-20 15:10:03 +0200 | [diff] [blame] | 709 | if (pmd_none(pmdval) || pmd_trans_huge(pmdval)) |
Andrea Arcangeli | 1a5a990 | 2012-03-21 16:33:42 -0700 | [diff] [blame] | 710 | return 1; |
| 711 | if (unlikely(pmd_bad(pmdval))) { |
Kirill A. Shutemov | ee53664 | 2013-12-20 15:10:03 +0200 | [diff] [blame] | 712 | pmd_clear_bad(pmd); |
Andrea Arcangeli | 1a5a990 | 2012-03-21 16:33:42 -0700 | [diff] [blame] | 713 | return 1; |
| 714 | } |
| 715 | return 0; |
| 716 | } |
| 717 | |
| 718 | /* |
| 719 | * This is a noop if Transparent Hugepage Support is not built into |
| 720 | * the kernel. Otherwise it is equivalent to |
| 721 | * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in |
| 722 | * places that already verified the pmd is not none and they want to |
| 723 | * walk ptes while holding the mmap sem in read mode (write mode don't |
| 724 | * need this). If THP is not enabled, the pmd can't go away under the |
| 725 | * code even if MADV_DONTNEED runs, but if THP is enabled we need to |
| 726 | * run a pmd_trans_unstable before walking the ptes after |
| 727 | * split_huge_page_pmd returns (because it may have run when the pmd |
| 728 | * become null, but then a page fault can map in a THP and not a |
| 729 | * regular page). |
| 730 | */ |
| 731 | static inline int pmd_trans_unstable(pmd_t *pmd) |
| 732 | { |
| 733 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 734 | return pmd_none_or_trans_huge_or_clear_bad(pmd); |
| 735 | #else |
| 736 | return 0; |
| 737 | #endif |
| 738 | } |
| 739 | |
Mel Gorman | e7bb4b6d | 2015-02-12 14:58:19 -0800 | [diff] [blame] | 740 | #ifndef CONFIG_NUMA_BALANCING |
| 741 | /* |
| 742 | * Technically a PTE can be PROTNONE even when not doing NUMA balancing but |
| 743 | * the only case the kernel cares is for NUMA balancing and is only ever set |
| 744 | * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked |
| 745 | * _PAGE_PROTNONE so by by default, implement the helper as "always no". It |
| 746 | * is the responsibility of the caller to distinguish between PROT_NONE |
| 747 | * protections and NUMA hinting fault protections. |
| 748 | */ |
| 749 | static inline int pte_protnone(pte_t pte) |
| 750 | { |
| 751 | return 0; |
| 752 | } |
| 753 | |
| 754 | static inline int pmd_protnone(pmd_t pmd) |
| 755 | { |
| 756 | return 0; |
| 757 | } |
| 758 | #endif /* CONFIG_NUMA_BALANCING */ |
| 759 | |
Andrea Arcangeli | 1a5a990 | 2012-03-21 16:33:42 -0700 | [diff] [blame] | 760 | #endif /* CONFIG_MMU */ |
Andrea Arcangeli | 5f6e8da | 2011-01-13 15:46:40 -0800 | [diff] [blame] | 761 | |
Toshi Kani | e61ce6a | 2015-04-14 15:47:23 -0700 | [diff] [blame] | 762 | #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP |
| 763 | int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); |
| 764 | int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); |
Toshi Kani | b9820d8 | 2015-04-14 15:47:26 -0700 | [diff] [blame] | 765 | int pud_clear_huge(pud_t *pud); |
| 766 | int pmd_clear_huge(pmd_t *pmd); |
Toshi Kani | e61ce6a | 2015-04-14 15:47:23 -0700 | [diff] [blame] | 767 | #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ |
| 768 | static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) |
| 769 | { |
| 770 | return 0; |
| 771 | } |
| 772 | static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) |
| 773 | { |
| 774 | return 0; |
| 775 | } |
Toshi Kani | b9820d8 | 2015-04-14 15:47:26 -0700 | [diff] [blame] | 776 | static inline int pud_clear_huge(pud_t *pud) |
| 777 | { |
| 778 | return 0; |
| 779 | } |
| 780 | static inline int pmd_clear_huge(pmd_t *pmd) |
| 781 | { |
| 782 | return 0; |
| 783 | } |
Toshi Kani | e61ce6a | 2015-04-14 15:47:23 -0700 | [diff] [blame] | 784 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |
| 785 | |
Aneesh Kumar K.V | 458aa76 | 2016-03-17 14:18:56 -0700 | [diff] [blame] | 786 | #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE |
| 787 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 788 | /* |
| 789 | * ARCHes with special requirements for evicting THP backing TLB entries can |
| 790 | * implement this. Otherwise also, it can help optimize normal TLB flush in |
| 791 | * THP regime. stock flush_tlb_range() typically has optimization to nuke the |
| 792 | * entire TLB TLB if flush span is greater than a threshold, which will |
| 793 | * likely be true for a single huge page. Thus a single thp flush will |
| 794 | * invalidate the entire TLB which is not desitable. |
| 795 | * e.g. see arch/arc: flush_pmd_tlb_range |
| 796 | */ |
| 797 | #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) |
| 798 | #else |
| 799 | #define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() |
| 800 | #endif |
| 801 | #endif |
| 802 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | #endif /* !__ASSEMBLY__ */ |
| 804 | |
Al Viro | 40d158e | 2013-05-11 12:13:10 -0400 | [diff] [blame] | 805 | #ifndef io_remap_pfn_range |
| 806 | #define io_remap_pfn_range remap_pfn_range |
| 807 | #endif |
| 808 | |
Hugh Dickins | fd8cfd3 | 2016-05-19 17:13:00 -0700 | [diff] [blame] | 809 | #ifndef has_transparent_hugepage |
| 810 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 811 | #define has_transparent_hugepage() 1 |
| 812 | #else |
| 813 | #define has_transparent_hugepage() 0 |
| 814 | #endif |
| 815 | #endif |
| 816 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 817 | #endif /* _ASM_GENERIC_PGTABLE_H */ |