H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_MMU_CONTEXT_H |
| 2 | #define _ASM_X86_MMU_CONTEXT_H |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 3 | |
| 4 | #include <asm/desc.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 5 | #include <linux/atomic.h> |
Dave Hansen | d17d8f9 | 2014-07-31 08:40:59 -0700 | [diff] [blame] | 6 | #include <linux/mm_types.h> |
Dave Hansen | 7d06d9c | 2016-07-29 09:30:12 -0700 | [diff] [blame] | 7 | #include <linux/pkeys.h> |
Dave Hansen | d17d8f9 | 2014-07-31 08:40:59 -0700 | [diff] [blame] | 8 | |
| 9 | #include <trace/events/tlb.h> |
| 10 | |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 11 | #include <asm/pgalloc.h> |
| 12 | #include <asm/tlbflush.h> |
| 13 | #include <asm/paravirt.h> |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 14 | #include <asm/mpx.h> |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 15 | #ifndef CONFIG_PARAVIRT |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 16 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
| 17 | struct mm_struct *next) |
| 18 | { |
| 19 | } |
| 20 | #endif /* !CONFIG_PARAVIRT */ |
| 21 | |
Andy Lutomirski | 7911d3f | 2014-10-24 15:58:12 -0700 | [diff] [blame] | 22 | #ifdef CONFIG_PERF_EVENTS |
Andy Lutomirski | a667342 | 2014-10-24 15:58:13 -0700 | [diff] [blame] | 23 | extern struct static_key rdpmc_always_available; |
| 24 | |
Andy Lutomirski | 7911d3f | 2014-10-24 15:58:12 -0700 | [diff] [blame] | 25 | static inline void load_mm_cr4(struct mm_struct *mm) |
| 26 | { |
Peter Zijlstra | a833581 | 2015-07-09 19:23:38 +0200 | [diff] [blame] | 27 | if (static_key_false(&rdpmc_always_available) || |
Andy Lutomirski | a667342 | 2014-10-24 15:58:13 -0700 | [diff] [blame] | 28 | atomic_read(&mm->context.perf_rdpmc_allowed)) |
Andy Lutomirski | 7911d3f | 2014-10-24 15:58:12 -0700 | [diff] [blame] | 29 | cr4_set_bits(X86_CR4_PCE); |
| 30 | else |
| 31 | cr4_clear_bits(X86_CR4_PCE); |
| 32 | } |
| 33 | #else |
| 34 | static inline void load_mm_cr4(struct mm_struct *mm) {} |
| 35 | #endif |
| 36 | |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 37 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 38 | /* |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 39 | * ldt_structs can be allocated, used, and freed, but they are never |
| 40 | * modified while live. |
| 41 | */ |
| 42 | struct ldt_struct { |
| 43 | /* |
| 44 | * Xen requires page-aligned LDTs with special permissions. This is |
| 45 | * needed to prevent us from installing evil descriptors such as |
| 46 | * call gates. On native, we could merge the ldt_struct and LDT |
| 47 | * allocations, but it's not worth trying to optimize. |
| 48 | */ |
| 49 | struct desc_struct *entries; |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame^] | 50 | unsigned int nr_entries; |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 51 | }; |
| 52 | |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 53 | /* |
| 54 | * Used for LDT copy/destruction. |
| 55 | */ |
Dave Hansen | 39a0526 | 2016-02-12 13:02:34 -0800 | [diff] [blame] | 56 | int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); |
| 57 | void destroy_context_ldt(struct mm_struct *mm); |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 58 | #else /* CONFIG_MODIFY_LDT_SYSCALL */ |
Dave Hansen | 39a0526 | 2016-02-12 13:02:34 -0800 | [diff] [blame] | 59 | static inline int init_new_context_ldt(struct task_struct *tsk, |
| 60 | struct mm_struct *mm) |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 61 | { |
| 62 | return 0; |
| 63 | } |
Dave Hansen | 39a0526 | 2016-02-12 13:02:34 -0800 | [diff] [blame] | 64 | static inline void destroy_context_ldt(struct mm_struct *mm) {} |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 65 | #endif |
| 66 | |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 67 | static inline void load_mm_ldt(struct mm_struct *mm) |
| 68 | { |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 69 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 70 | struct ldt_struct *ldt; |
| 71 | |
| 72 | /* lockless_dereference synchronizes with smp_store_release */ |
| 73 | ldt = lockless_dereference(mm->context.ldt); |
| 74 | |
| 75 | /* |
| 76 | * Any change to mm->context.ldt is followed by an IPI to all |
| 77 | * CPUs with the mm active. The LDT will not be freed until |
| 78 | * after the IPI is handled by all such CPUs. This means that, |
| 79 | * if the ldt_struct changes before we return, the values we see |
| 80 | * will be safe, and the new values will be loaded before we run |
| 81 | * any user code. |
| 82 | * |
| 83 | * NB: don't try to convert this to use RCU without extreme care. |
| 84 | * We would still need IRQs off, because we don't want to change |
| 85 | * the local LDT after an IPI loaded a newer value than the one |
| 86 | * that we can see. |
| 87 | */ |
| 88 | |
| 89 | if (unlikely(ldt)) |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame^] | 90 | set_ldt(ldt->entries, ldt->nr_entries); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 91 | else |
| 92 | clear_LDT(); |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 93 | #else |
| 94 | clear_LDT(); |
| 95 | #endif |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 96 | |
| 97 | DEBUG_LOCKS_WARN_ON(preemptible()); |
| 98 | } |
| 99 | |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 100 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
| 101 | { |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 102 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
| 103 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 104 | } |
| 105 | |
Dave Hansen | 39a0526 | 2016-02-12 13:02:34 -0800 | [diff] [blame] | 106 | static inline int init_new_context(struct task_struct *tsk, |
| 107 | struct mm_struct *mm) |
| 108 | { |
Dave Hansen | e8c24d3 | 2016-07-29 09:30:15 -0700 | [diff] [blame] | 109 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
| 110 | if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { |
| 111 | /* pkey 0 is the default and always allocated */ |
| 112 | mm->context.pkey_allocation_map = 0x1; |
| 113 | /* -1 means unallocated or invalid */ |
| 114 | mm->context.execute_only_pkey = -1; |
| 115 | } |
| 116 | #endif |
Dave Hansen | 39a0526 | 2016-02-12 13:02:34 -0800 | [diff] [blame] | 117 | init_new_context_ldt(tsk, mm); |
Dave Hansen | e8c24d3 | 2016-07-29 09:30:15 -0700 | [diff] [blame] | 118 | |
Dave Hansen | 39a0526 | 2016-02-12 13:02:34 -0800 | [diff] [blame] | 119 | return 0; |
| 120 | } |
| 121 | static inline void destroy_context(struct mm_struct *mm) |
| 122 | { |
| 123 | destroy_context_ldt(mm); |
| 124 | } |
| 125 | |
Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 126 | extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 127 | struct task_struct *tsk); |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 128 | |
Andy Lutomirski | 078194f | 2016-04-26 09:39:09 -0700 | [diff] [blame] | 129 | extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
| 130 | struct task_struct *tsk); |
| 131 | #define switch_mm_irqs_off switch_mm_irqs_off |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 132 | |
| 133 | #define activate_mm(prev, next) \ |
| 134 | do { \ |
| 135 | paravirt_activate_mm((prev), (next)); \ |
| 136 | switch_mm((prev), (next), NULL); \ |
| 137 | } while (0); |
| 138 | |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 139 | #ifdef CONFIG_X86_32 |
| 140 | #define deactivate_mm(tsk, mm) \ |
| 141 | do { \ |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 142 | lazy_load_gs(0); \ |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 143 | } while (0) |
| 144 | #else |
| 145 | #define deactivate_mm(tsk, mm) \ |
| 146 | do { \ |
| 147 | load_gs_index(0); \ |
| 148 | loadsegment(fs, 0); \ |
| 149 | } while (0) |
| 150 | #endif |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 151 | |
Dave Hansen | a1ea1c0 | 2014-11-18 10:23:49 -0800 | [diff] [blame] | 152 | static inline void arch_dup_mmap(struct mm_struct *oldmm, |
| 153 | struct mm_struct *mm) |
| 154 | { |
| 155 | paravirt_arch_dup_mmap(oldmm, mm); |
| 156 | } |
| 157 | |
| 158 | static inline void arch_exit_mmap(struct mm_struct *mm) |
| 159 | { |
| 160 | paravirt_arch_exit_mmap(mm); |
| 161 | } |
| 162 | |
Dave Hansen | b0e9b09 | 2015-06-07 11:37:04 -0700 | [diff] [blame] | 163 | #ifdef CONFIG_X86_64 |
| 164 | static inline bool is_64bit_mm(struct mm_struct *mm) |
| 165 | { |
Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 166 | return !IS_ENABLED(CONFIG_IA32_EMULATION) || |
Dave Hansen | b0e9b09 | 2015-06-07 11:37:04 -0700 | [diff] [blame] | 167 | !(mm->context.ia32_compat == TIF_IA32); |
| 168 | } |
| 169 | #else |
| 170 | static inline bool is_64bit_mm(struct mm_struct *mm) |
| 171 | { |
| 172 | return false; |
| 173 | } |
| 174 | #endif |
| 175 | |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 176 | static inline void arch_bprm_mm_init(struct mm_struct *mm, |
| 177 | struct vm_area_struct *vma) |
| 178 | { |
| 179 | mpx_mm_init(mm); |
| 180 | } |
| 181 | |
Dave Hansen | 1de4fa1 | 2014-11-14 07:18:31 -0800 | [diff] [blame] | 182 | static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, |
| 183 | unsigned long start, unsigned long end) |
| 184 | { |
Dave Hansen | c922228 | 2015-01-08 14:30:21 -0800 | [diff] [blame] | 185 | /* |
| 186 | * mpx_notify_unmap() goes and reads a rarely-hot |
| 187 | * cacheline in the mm_struct. That can be expensive |
| 188 | * enough to be seen in profiles. |
| 189 | * |
| 190 | * The mpx_notify_unmap() call and its contents have been |
| 191 | * observed to affect munmap() performance on hardware |
| 192 | * where MPX is not present. |
| 193 | * |
| 194 | * The unlikely() optimizes for the fast case: no MPX |
| 195 | * in the CPU, or no MPX use in the process. Even if |
| 196 | * we get this wrong (in the unlikely event that MPX |
| 197 | * is widely enabled on some system) the overhead of |
| 198 | * MPX itself (reading bounds tables) is expected to |
| 199 | * overwhelm the overhead of getting this unlikely() |
| 200 | * consistently wrong. |
| 201 | */ |
| 202 | if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) |
| 203 | mpx_notify_unmap(mm, vma, start, end); |
Dave Hansen | 1de4fa1 | 2014-11-14 07:18:31 -0800 | [diff] [blame] | 204 | } |
| 205 | |
Dave Hansen | 7d06d9c | 2016-07-29 09:30:12 -0700 | [diff] [blame] | 206 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
Dave Hansen | 8f62c88 | 2016-02-12 13:02:10 -0800 | [diff] [blame] | 207 | static inline int vma_pkey(struct vm_area_struct *vma) |
| 208 | { |
Dave Hansen | 8f62c88 | 2016-02-12 13:02:10 -0800 | [diff] [blame] | 209 | unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | |
| 210 | VM_PKEY_BIT2 | VM_PKEY_BIT3; |
Dave Hansen | 7d06d9c | 2016-07-29 09:30:12 -0700 | [diff] [blame] | 211 | |
| 212 | return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; |
Dave Hansen | 8f62c88 | 2016-02-12 13:02:10 -0800 | [diff] [blame] | 213 | } |
Dave Hansen | 7d06d9c | 2016-07-29 09:30:12 -0700 | [diff] [blame] | 214 | #else |
| 215 | static inline int vma_pkey(struct vm_area_struct *vma) |
| 216 | { |
| 217 | return 0; |
| 218 | } |
| 219 | #endif |
Dave Hansen | 8f62c88 | 2016-02-12 13:02:10 -0800 | [diff] [blame] | 220 | |
Ingo Molnar | 6dd29b3 | 2017-04-23 11:37:17 +0200 | [diff] [blame] | 221 | static inline bool __pkru_allows_pkey(u16 pkey, bool write) |
| 222 | { |
| 223 | u32 pkru = read_pkru(); |
| 224 | |
| 225 | if (!__pkru_allows_read(pkru, pkey)) |
| 226 | return false; |
| 227 | if (write && !__pkru_allows_write(pkru, pkey)) |
| 228 | return false; |
| 229 | |
| 230 | return true; |
| 231 | } |
| 232 | |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 233 | /* |
| 234 | * We only want to enforce protection keys on the current process |
| 235 | * because we effectively have no access to PKRU for other |
| 236 | * processes or any way to tell *which * PKRU in a threaded |
| 237 | * process we could use. |
| 238 | * |
| 239 | * So do not enforce things if the VMA is not from the current |
| 240 | * mm, or if we are in a kernel thread. |
| 241 | */ |
| 242 | static inline bool vma_is_foreign(struct vm_area_struct *vma) |
| 243 | { |
| 244 | if (!current->mm) |
| 245 | return true; |
| 246 | /* |
| 247 | * Should PKRU be enforced on the access to this VMA? If |
| 248 | * the VMA is from another process, then PKRU has no |
| 249 | * relevance and should not be enforced. |
| 250 | */ |
| 251 | if (current->mm != vma->vm_mm) |
| 252 | return true; |
| 253 | |
| 254 | return false; |
| 255 | } |
| 256 | |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 257 | static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, |
Dave Hansen | d61172b | 2016-02-12 13:02:24 -0800 | [diff] [blame] | 258 | bool write, bool execute, bool foreign) |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 259 | { |
Dave Hansen | d61172b | 2016-02-12 13:02:24 -0800 | [diff] [blame] | 260 | /* pkeys never affect instruction fetches */ |
| 261 | if (execute) |
| 262 | return true; |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 263 | /* allow access if the VMA is not one from this process */ |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 264 | if (foreign || vma_is_foreign(vma)) |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 265 | return true; |
| 266 | return __pkru_allows_pkey(vma_pkey(vma), write); |
| 267 | } |
| 268 | |
Andy Lutomirski | d6e41f1 | 2017-05-28 10:00:17 -0700 | [diff] [blame] | 269 | |
| 270 | /* |
| 271 | * This can be used from process context to figure out what the value of |
| 272 | * CR3 is without needing to do a (slow) read_cr3(). |
| 273 | * |
| 274 | * It's intended to be used for code like KVM that sneakily changes CR3 |
| 275 | * and needs to restore it. It needs to be used very carefully. |
| 276 | */ |
| 277 | static inline unsigned long __get_current_cr3_fast(void) |
| 278 | { |
| 279 | unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd); |
| 280 | |
| 281 | /* For now, be very restrictive about when this can be called. */ |
| 282 | VM_WARN_ON(in_nmi() || !in_atomic()); |
| 283 | |
| 284 | VM_BUG_ON(cr3 != read_cr3()); |
| 285 | return cr3; |
| 286 | } |
| 287 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 288 | #endif /* _ASM_X86_MMU_CONTEXT_H */ |