Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 2 | #ifndef _ASM_X86_PGTABLE_3LEVEL_H |
| 3 | #define _ASM_X86_PGTABLE_3LEVEL_H |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | /* |
| 6 | * Intel Physical Address Extension (PAE) Mode - three-level page |
| 7 | * tables on PPro+ CPUs. |
| 8 | * |
| 9 | * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> |
| 10 | */ |
| 11 | |
Joe Perches | 4b01fef | 2008-03-23 01:03:10 -0700 | [diff] [blame] | 12 | #define pte_ERROR(e) \ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 13 | pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \ |
Joe Perches | 4b01fef | 2008-03-23 01:03:10 -0700 | [diff] [blame] | 14 | __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) |
| 15 | #define pmd_ERROR(e) \ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 16 | pr_err("%s:%d: bad pmd %p(%016Lx)\n", \ |
Joe Perches | 4b01fef | 2008-03-23 01:03:10 -0700 | [diff] [blame] | 17 | __FILE__, __LINE__, &(e), pmd_val(e)) |
| 18 | #define pgd_ERROR(e) \ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 19 | pr_err("%s:%d: bad pgd %p(%016Lx)\n", \ |
Joe Perches | 4b01fef | 2008-03-23 01:03:10 -0700 | [diff] [blame] | 20 | __FILE__, __LINE__, &(e), pgd_val(e)) |
Jeremy Fitzhardinge | 6194ba6 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 21 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | /* Rules for using set_pte: the pte being assigned *must* be |
| 23 | * either not present or in a state where the hardware will |
| 24 | * not attempt to update the pte. In places where this is |
| 25 | * not possible, use pte_get_and_clear to obtain the old pte |
| 26 | * value and then use set_pte to update it. -ben |
| 27 | */ |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 28 | static inline void native_set_pte(pte_t *ptep, pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | { |
| 30 | ptep->pte_high = pte.pte_high; |
| 31 | smp_wmb(); |
| 32 | ptep->pte_low = pte.pte_low; |
| 33 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | |
Andrea Arcangeli | 26c1917 | 2012-05-29 15:06:49 -0700 | [diff] [blame] | 35 | #define pmd_read_atomic pmd_read_atomic |
| 36 | /* |
| 37 | * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with |
| 38 | * a "*pmdp" dereference done by gcc. Problem is, in certain places |
| 39 | * where pte_offset_map_lock is called, concurrent page faults are |
| 40 | * allowed, if the mmap_sem is hold for reading. An example is mincore |
| 41 | * vs page faults vs MADV_DONTNEED. On the page fault side |
| 42 | * pmd_populate rightfully does a set_64bit, but if we're reading the |
| 43 | * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen |
| 44 | * because gcc will not read the 64bit of the pmd atomically. To fix |
| 45 | * this all places running pmd_offset_map_lock() while holding the |
| 46 | * mmap_sem in read mode, shall read the pmdp pointer using this |
| 47 | * function to know if the pmd is null nor not, and in turn to know if |
| 48 | * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd |
| 49 | * operations. |
| 50 | * |
Andrea Arcangeli | e4eed03 | 2012-06-20 12:52:57 -0700 | [diff] [blame] | 51 | * Without THP if the mmap_sem is hold for reading, the pmd can only |
| 52 | * transition from null to not null while pmd_read_atomic runs. So |
| 53 | * we can always return atomic pmd values with this function. |
Andrea Arcangeli | 26c1917 | 2012-05-29 15:06:49 -0700 | [diff] [blame] | 54 | * |
| 55 | * With THP if the mmap_sem is hold for reading, the pmd can become |
Andrea Arcangeli | e4eed03 | 2012-06-20 12:52:57 -0700 | [diff] [blame] | 56 | * trans_huge or none or point to a pte (and in turn become "stable") |
| 57 | * at any time under pmd_read_atomic. We could read it really |
| 58 | * atomically here with a atomic64_read for the THP enabled case (and |
| 59 | * it would be a whole lot simpler), but to avoid using cmpxchg8b we |
| 60 | * only return an atomic pmdval if the low part of the pmdval is later |
| 61 | * found stable (i.e. pointing to a pte). And we're returning a none |
| 62 | * pmdval if the low part of the pmd is none. In some cases the high |
| 63 | * and low part of the pmdval returned may not be consistent if THP is |
| 64 | * enabled (the low part may point to previously mapped hugepage, |
| 65 | * while the high part may point to a more recently mapped hugepage), |
| 66 | * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part |
| 67 | * of the pmd to be read atomically to decide if the pmd is unstable |
| 68 | * or not, with the only exception of when the low part of the pmd is |
| 69 | * zero in which case we return a none pmd. |
Andrea Arcangeli | 26c1917 | 2012-05-29 15:06:49 -0700 | [diff] [blame] | 70 | */ |
Andrea Arcangeli | 26c1917 | 2012-05-29 15:06:49 -0700 | [diff] [blame] | 71 | static inline pmd_t pmd_read_atomic(pmd_t *pmdp) |
| 72 | { |
| 73 | pmdval_t ret; |
| 74 | u32 *tmp = (u32 *)pmdp; |
| 75 | |
| 76 | ret = (pmdval_t) (*tmp); |
| 77 | if (ret) { |
| 78 | /* |
| 79 | * If the low part is null, we must not read the high part |
| 80 | * or we can end up with a partial pmd. |
| 81 | */ |
| 82 | smp_rmb(); |
| 83 | ret |= ((pmdval_t)*(tmp + 1)) << 32; |
| 84 | } |
| 85 | |
| 86 | return (pmd_t) { ret }; |
| 87 | } |
Andrea Arcangeli | 26c1917 | 2012-05-29 15:06:49 -0700 | [diff] [blame] | 88 | |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 89 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
| 90 | { |
Joe Perches | 4b01fef | 2008-03-23 01:03:10 -0700 | [diff] [blame] | 91 | set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 92 | } |
Joe Perches | 4b01fef | 2008-03-23 01:03:10 -0700 | [diff] [blame] | 93 | |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 94 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
| 95 | { |
Joe Perches | 4b01fef | 2008-03-23 01:03:10 -0700 | [diff] [blame] | 96 | set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 97 | } |
Joe Perches | 4b01fef | 2008-03-23 01:03:10 -0700 | [diff] [blame] | 98 | |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 99 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
| 100 | { |
Joe Perches | 4b01fef | 2008-03-23 01:03:10 -0700 | [diff] [blame] | 101 | set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 102 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | |
| 104 | /* |
Zachary Amsden | 6e5882c | 2006-04-27 11:32:29 -0700 | [diff] [blame] | 105 | * For PTEs and PDEs, we must clear the P-bit first when clearing a page table |
| 106 | * entry, so clear the bottom half first and enforce ordering with a compiler |
| 107 | * barrier. |
| 108 | */ |
Joe Perches | 4b01fef | 2008-03-23 01:03:10 -0700 | [diff] [blame] | 109 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, |
| 110 | pte_t *ptep) |
Zachary Amsden | 6e5882c | 2006-04-27 11:32:29 -0700 | [diff] [blame] | 111 | { |
| 112 | ptep->pte_low = 0; |
| 113 | smp_wmb(); |
| 114 | ptep->pte_high = 0; |
| 115 | } |
| 116 | |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 117 | static inline void native_pmd_clear(pmd_t *pmd) |
Zachary Amsden | 6e5882c | 2006-04-27 11:32:29 -0700 | [diff] [blame] | 118 | { |
| 119 | u32 *tmp = (u32 *)pmd; |
| 120 | *tmp = 0; |
| 121 | smp_wmb(); |
| 122 | *(tmp + 1) = 0; |
| 123 | } |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 124 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 125 | static inline void native_pud_clear(pud_t *pudp) |
| 126 | { |
| 127 | } |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 128 | |
Jeremy Fitzhardinge | 6194ba6 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 129 | static inline void pud_clear(pud_t *pudp) |
| 130 | { |
| 131 | set_pud(pudp, __pud(0)); |
| 132 | |
| 133 | /* |
Jeremy Fitzhardinge | f5430f9 | 2008-02-04 16:48:02 +0100 | [diff] [blame] | 134 | * According to Intel App note "TLBs, Paging-Structure Caches, |
| 135 | * and Their Invalidation", April 2007, document 317080-001, |
| 136 | * section 8.1: in PAE mode we explicitly have to flush the |
| 137 | * TLB via cr3 if the top-level pgd is changed... |
Jeremy Fitzhardinge | 6194ba6 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 138 | * |
Shaohua Li | 4981d01 | 2011-03-16 11:37:29 +0800 | [diff] [blame] | 139 | * Currently all places where pud_clear() is called either have |
| 140 | * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or |
| 141 | * pud_clear_bad()), so we don't need TLB flush here. |
Jeremy Fitzhardinge | 6194ba6 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 142 | */ |
Jeremy Fitzhardinge | 6194ba6 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 143 | } |
Rusty Russell | da181a8 | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 144 | |
Zachary Amsden | 142dd97 | 2007-05-02 19:27:19 +0200 | [diff] [blame] | 145 | #ifdef CONFIG_SMP |
Jeremy Fitzhardinge | 3dc494e | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 146 | static inline pte_t native_ptep_get_and_clear(pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | { |
| 148 | pte_t res; |
| 149 | |
| 150 | /* xchg acts as a barrier before the setting of the high bits */ |
| 151 | res.pte_low = xchg(&ptep->pte_low, 0); |
| 152 | res.pte_high = ptep->pte_high; |
| 153 | ptep->pte_high = 0; |
| 154 | |
| 155 | return res; |
| 156 | } |
Zachary Amsden | 142dd97 | 2007-05-02 19:27:19 +0200 | [diff] [blame] | 157 | #else |
| 158 | #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) |
| 159 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | |
Johannes Weiner | f2d6bfe | 2011-01-13 15:47:01 -0800 | [diff] [blame] | 161 | #ifdef CONFIG_SMP |
| 162 | union split_pmd { |
| 163 | struct { |
| 164 | u32 pmd_low; |
| 165 | u32 pmd_high; |
| 166 | }; |
| 167 | pmd_t pmd; |
| 168 | }; |
| 169 | static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) |
| 170 | { |
| 171 | union split_pmd res, *orig = (union split_pmd *)pmdp; |
| 172 | |
| 173 | /* xchg acts as a barrier before setting of the high bits */ |
| 174 | res.pmd_low = xchg(&orig->pmd_low, 0); |
| 175 | res.pmd_high = orig->pmd_high; |
| 176 | orig->pmd_high = 0; |
| 177 | |
| 178 | return res.pmd; |
| 179 | } |
| 180 | #else |
| 181 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) |
| 182 | #endif |
| 183 | |
Matthew Wilcox | a00cc7d | 2017-02-24 14:57:02 -0800 | [diff] [blame] | 184 | #ifdef CONFIG_SMP |
| 185 | union split_pud { |
| 186 | struct { |
| 187 | u32 pud_low; |
| 188 | u32 pud_high; |
| 189 | }; |
| 190 | pud_t pud; |
| 191 | }; |
| 192 | |
| 193 | static inline pud_t native_pudp_get_and_clear(pud_t *pudp) |
| 194 | { |
| 195 | union split_pud res, *orig = (union split_pud *)pudp; |
| 196 | |
| 197 | /* xchg acts as a barrier before setting of the high bits */ |
| 198 | res.pud_low = xchg(&orig->pud_low, 0); |
| 199 | res.pud_high = orig->pud_high; |
| 200 | orig->pud_high = 0; |
| 201 | |
| 202 | return res.pud; |
| 203 | } |
| 204 | #else |
| 205 | #define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp) |
| 206 | #endif |
| 207 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | /* Encode and de-code a swap entry */ |
Jan Beulich | 1796316 | 2008-12-16 11:35:24 +0000 | [diff] [blame] | 209 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | #define __swp_type(x) (((x).val) & 0x1f) |
| 211 | #define __swp_offset(x) ((x).val >> 5) |
| 212 | #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) |
| 213 | #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) |
Jeremy Fitzhardinge | c8e5393 | 2008-01-30 13:32:57 +0100 | [diff] [blame] | 214 | #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | |
Kirill A. Shutemov | e585513 | 2017-06-06 14:31:20 +0300 | [diff] [blame] | 216 | #define gup_get_pte gup_get_pte |
| 217 | /* |
| 218 | * WARNING: only to be used in the get_user_pages_fast() implementation. |
| 219 | * |
| 220 | * With get_user_pages_fast(), we walk down the pagetables without taking |
| 221 | * any locks. For this we would like to load the pointers atomically, |
| 222 | * but that is not possible (without expensive cmpxchg8b) on PAE. What |
| 223 | * we do have is the guarantee that a PTE will only either go from not |
| 224 | * present to present, or present to not present or both -- it will not |
| 225 | * switch to a completely different present page without a TLB flush in |
| 226 | * between; something that we are blocking by holding interrupts off. |
| 227 | * |
| 228 | * Setting ptes from not present to present goes: |
| 229 | * |
| 230 | * ptep->pte_high = h; |
| 231 | * smp_wmb(); |
| 232 | * ptep->pte_low = l; |
| 233 | * |
| 234 | * And present to not present goes: |
| 235 | * |
| 236 | * ptep->pte_low = 0; |
| 237 | * smp_wmb(); |
| 238 | * ptep->pte_high = 0; |
| 239 | * |
| 240 | * We must ensure here that the load of pte_low sees 'l' iff pte_high |
| 241 | * sees 'h'. We load pte_high *after* loading pte_low, which ensures we |
| 242 | * don't see an older value of pte_high. *Then* we recheck pte_low, |
| 243 | * which ensures that we haven't picked up a changed pte high. We might |
| 244 | * have gotten rubbish values from pte_low and pte_high, but we are |
| 245 | * guaranteed that pte_low will not have the present bit set *unless* |
| 246 | * it is 'l'. Because get_user_pages_fast() only operates on present ptes |
| 247 | * we're safe. |
| 248 | */ |
| 249 | static inline pte_t gup_get_pte(pte_t *ptep) |
| 250 | { |
| 251 | pte_t pte; |
| 252 | |
| 253 | do { |
| 254 | pte.pte_low = ptep->pte_low; |
| 255 | smp_rmb(); |
| 256 | pte.pte_high = ptep->pte_high; |
| 257 | smp_rmb(); |
| 258 | } while (unlikely(pte.pte_low != ptep->pte_low)); |
| 259 | |
| 260 | return pte; |
| 261 | } |
| 262 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 263 | #endif /* _ASM_X86_PGTABLE_3LEVEL_H */ |