| Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
| H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 2 | #ifndef _ASM_X86_MMU_CONTEXT_H | 
|  | 3 | #define _ASM_X86_MMU_CONTEXT_H | 
| Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 4 |  | 
|  | 5 | #include <asm/desc.h> | 
| Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 6 | #include <linux/atomic.h> | 
| Dave Hansen | d17d8f9 | 2014-07-31 08:40:59 -0700 | [diff] [blame] | 7 | #include <linux/mm_types.h> | 
| Dave Hansen | 7d06d9c | 2016-07-29 09:30:12 -0700 | [diff] [blame] | 8 | #include <linux/pkeys.h> | 
| Dave Hansen | d17d8f9 | 2014-07-31 08:40:59 -0700 | [diff] [blame] | 9 |  | 
|  | 10 | #include <trace/events/tlb.h> | 
|  | 11 |  | 
| Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 12 | #include <asm/pgalloc.h> | 
|  | 13 | #include <asm/tlbflush.h> | 
|  | 14 | #include <asm/paravirt.h> | 
| Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 15 | #include <asm/mpx.h> | 
| Andy Lutomirski | f39681e | 2017-06-29 08:53:15 -0700 | [diff] [blame] | 16 |  | 
|  | 17 | extern atomic64_t last_mm_ctx_id; | 
|  | 18 |  | 
| Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 19 | #ifndef CONFIG_PARAVIRT | 
| Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 20 | static inline void paravirt_activate_mm(struct mm_struct *prev, | 
|  | 21 | struct mm_struct *next) | 
|  | 22 | { | 
|  | 23 | } | 
|  | 24 | #endif	/* !CONFIG_PARAVIRT */ | 
|  | 25 |  | 
| Andy Lutomirski | 7911d3f | 2014-10-24 15:58:12 -0700 | [diff] [blame] | 26 | #ifdef CONFIG_PERF_EVENTS | 
| Andy Lutomirski | a667342 | 2014-10-24 15:58:13 -0700 | [diff] [blame] | 27 | extern struct static_key rdpmc_always_available; | 
|  | 28 |  | 
| Andy Lutomirski | 7911d3f | 2014-10-24 15:58:12 -0700 | [diff] [blame] | 29 | static inline void load_mm_cr4(struct mm_struct *mm) | 
|  | 30 | { | 
| Peter Zijlstra | a833581 | 2015-07-09 19:23:38 +0200 | [diff] [blame] | 31 | if (static_key_false(&rdpmc_always_available) || | 
| Andy Lutomirski | a667342 | 2014-10-24 15:58:13 -0700 | [diff] [blame] | 32 | atomic_read(&mm->context.perf_rdpmc_allowed)) | 
| Andy Lutomirski | 7911d3f | 2014-10-24 15:58:12 -0700 | [diff] [blame] | 33 | cr4_set_bits(X86_CR4_PCE); | 
|  | 34 | else | 
|  | 35 | cr4_clear_bits(X86_CR4_PCE); | 
|  | 36 | } | 
|  | 37 | #else | 
|  | 38 | static inline void load_mm_cr4(struct mm_struct *mm) {} | 
|  | 39 | #endif | 
|  | 40 |  | 
| Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 41 | #ifdef CONFIG_MODIFY_LDT_SYSCALL | 
| Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 42 | /* | 
| Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 43 | * ldt_structs can be allocated, used, and freed, but they are never | 
|  | 44 | * modified while live. | 
|  | 45 | */ | 
|  | 46 | struct ldt_struct { | 
|  | 47 | /* | 
|  | 48 | * Xen requires page-aligned LDTs with special permissions.  This is | 
|  | 49 | * needed to prevent us from installing evil descriptors such as | 
|  | 50 | * call gates.  On native, we could merge the ldt_struct and LDT | 
|  | 51 | * allocations, but it's not worth trying to optimize. | 
|  | 52 | */ | 
| Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 53 | struct desc_struct	*entries; | 
|  | 54 | unsigned int		nr_entries; | 
|  | 55 |  | 
|  | 56 | /* | 
|  | 57 | * If PTI is in use, then the entries array is not mapped while we're | 
|  | 58 | * in user mode.  The whole array will be aliased at the addressed | 
|  | 59 | * given by ldt_slot_va(slot).  We use two slots so that we can allocate | 
|  | 60 | * and map, and enable a new LDT without invalidating the mapping | 
|  | 61 | * of an older, still-in-use LDT. | 
|  | 62 | * | 
|  | 63 | * slot will be -1 if this LDT doesn't have an alias mapping. | 
|  | 64 | */ | 
|  | 65 | int			slot; | 
| Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 66 | }; | 
|  | 67 |  | 
| Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 68 | /* This is a multiple of PAGE_SIZE. */ | 
|  | 69 | #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE) | 
|  | 70 |  | 
|  | 71 | static inline void *ldt_slot_va(int slot) | 
|  | 72 | { | 
|  | 73 | #ifdef CONFIG_X86_64 | 
|  | 74 | return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot); | 
|  | 75 | #else | 
|  | 76 | BUG(); | 
| Jan Beulich | f2f18b1 | 2018-02-19 07:52:10 -0700 | [diff] [blame^] | 77 | return (void *)fix_to_virt(FIX_HOLE); | 
| Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 78 | #endif | 
|  | 79 | } | 
|  | 80 |  | 
| Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 81 | /* | 
|  | 82 | * Used for LDT copy/destruction. | 
|  | 83 | */ | 
| Thomas Gleixner | a4828f8 | 2017-12-14 12:27:31 +0100 | [diff] [blame] | 84 | static inline void init_new_context_ldt(struct mm_struct *mm) | 
|  | 85 | { | 
|  | 86 | mm->context.ldt = NULL; | 
|  | 87 | init_rwsem(&mm->context.ldt_usr_sem); | 
|  | 88 | } | 
|  | 89 | int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); | 
| Dave Hansen | 39a0526 | 2016-02-12 13:02:34 -0800 | [diff] [blame] | 90 | void destroy_context_ldt(struct mm_struct *mm); | 
| Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 91 | void ldt_arch_exit_mmap(struct mm_struct *mm); | 
| Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 92 | #else	/* CONFIG_MODIFY_LDT_SYSCALL */ | 
| Thomas Gleixner | a4828f8 | 2017-12-14 12:27:31 +0100 | [diff] [blame] | 93 | static inline void init_new_context_ldt(struct mm_struct *mm) { } | 
|  | 94 | static inline int ldt_dup_context(struct mm_struct *oldmm, | 
|  | 95 | struct mm_struct *mm) | 
| Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 96 | { | 
|  | 97 | return 0; | 
|  | 98 | } | 
| Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 99 | static inline void destroy_context_ldt(struct mm_struct *mm) { } | 
|  | 100 | static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } | 
| Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 101 | #endif | 
|  | 102 |  | 
| Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 103 | static inline void load_mm_ldt(struct mm_struct *mm) | 
|  | 104 | { | 
| Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 105 | #ifdef CONFIG_MODIFY_LDT_SYSCALL | 
| Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 106 | struct ldt_struct *ldt; | 
|  | 107 |  | 
| Will Deacon | 3382290 | 2017-10-24 11:22:48 +0100 | [diff] [blame] | 108 | /* READ_ONCE synchronizes with smp_store_release */ | 
|  | 109 | ldt = READ_ONCE(mm->context.ldt); | 
| Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 110 |  | 
|  | 111 | /* | 
|  | 112 | * Any change to mm->context.ldt is followed by an IPI to all | 
|  | 113 | * CPUs with the mm active.  The LDT will not be freed until | 
|  | 114 | * after the IPI is handled by all such CPUs.  This means that, | 
|  | 115 | * if the ldt_struct changes before we return, the values we see | 
|  | 116 | * will be safe, and the new values will be loaded before we run | 
|  | 117 | * any user code. | 
|  | 118 | * | 
|  | 119 | * NB: don't try to convert this to use RCU without extreme care. | 
|  | 120 | * We would still need IRQs off, because we don't want to change | 
|  | 121 | * the local LDT after an IPI loaded a newer value than the one | 
|  | 122 | * that we can see. | 
|  | 123 | */ | 
|  | 124 |  | 
| Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 125 | if (unlikely(ldt)) { | 
|  | 126 | if (static_cpu_has(X86_FEATURE_PTI)) { | 
|  | 127 | if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) { | 
|  | 128 | /* | 
|  | 129 | * Whoops -- either the new LDT isn't mapped | 
|  | 130 | * (if slot == -1) or is mapped into a bogus | 
|  | 131 | * slot (if slot > 1). | 
|  | 132 | */ | 
|  | 133 | clear_LDT(); | 
|  | 134 | return; | 
|  | 135 | } | 
|  | 136 |  | 
|  | 137 | /* | 
|  | 138 | * If page table isolation is enabled, ldt->entries | 
|  | 139 | * will not be mapped in the userspace pagetables. | 
|  | 140 | * Tell the CPU to access the LDT through the alias | 
|  | 141 | * at ldt_slot_va(ldt->slot). | 
|  | 142 | */ | 
|  | 143 | set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries); | 
|  | 144 | } else { | 
|  | 145 | set_ldt(ldt->entries, ldt->nr_entries); | 
|  | 146 | } | 
|  | 147 | } else { | 
| Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 148 | clear_LDT(); | 
| Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 149 | } | 
| Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 150 | #else | 
|  | 151 | clear_LDT(); | 
|  | 152 | #endif | 
| Andy Lutomirski | 7353425 | 2017-06-20 22:22:08 -0700 | [diff] [blame] | 153 | } | 
|  | 154 |  | 
|  | 155 | static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) | 
|  | 156 | { | 
|  | 157 | #ifdef CONFIG_MODIFY_LDT_SYSCALL | 
|  | 158 | /* | 
|  | 159 | * Load the LDT if either the old or new mm had an LDT. | 
|  | 160 | * | 
|  | 161 | * An mm will never go from having an LDT to not having an LDT.  Two | 
|  | 162 | * mms never share an LDT, so we don't gain anything by checking to | 
|  | 163 | * see whether the LDT changed.  There's also no guarantee that | 
|  | 164 | * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL, | 
|  | 165 | * then prev->context.ldt will also be non-NULL. | 
|  | 166 | * | 
|  | 167 | * If we really cared, we could optimize the case where prev == next | 
|  | 168 | * and we're exiting lazy mode.  Most of the time, if this happens, | 
|  | 169 | * we don't actually need to reload LDTR, but modify_ldt() is mostly | 
|  | 170 | * used by legacy code and emulators where we don't need this level of | 
|  | 171 | * performance. | 
|  | 172 | * | 
|  | 173 | * This uses | instead of || because it generates better code. | 
|  | 174 | */ | 
|  | 175 | if (unlikely((unsigned long)prev->context.ldt | | 
|  | 176 | (unsigned long)next->context.ldt)) | 
|  | 177 | load_mm_ldt(next); | 
|  | 178 | #endif | 
| Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 179 |  | 
|  | 180 | DEBUG_LOCKS_WARN_ON(preemptible()); | 
|  | 181 | } | 
|  | 182 |  | 
| Andy Lutomirski | b956575 | 2017-10-09 09:50:49 -0700 | [diff] [blame] | 183 | void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); | 
| Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 184 |  | 
| Dave Hansen | 39a0526 | 2016-02-12 13:02:34 -0800 | [diff] [blame] | 185 | static inline int init_new_context(struct task_struct *tsk, | 
|  | 186 | struct mm_struct *mm) | 
|  | 187 | { | 
| Peter Zijlstra | c2b3496 | 2017-12-14 12:27:30 +0100 | [diff] [blame] | 188 | mutex_init(&mm->context.lock); | 
|  | 189 |  | 
| Andy Lutomirski | f39681e | 2017-06-29 08:53:15 -0700 | [diff] [blame] | 190 | mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); | 
|  | 191 | atomic64_set(&mm->context.tlb_gen, 0); | 
|  | 192 |  | 
| Thomas Gleixner | a4828f8 | 2017-12-14 12:27:31 +0100 | [diff] [blame] | 193 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS | 
| Dave Hansen | e8c24d3 | 2016-07-29 09:30:15 -0700 | [diff] [blame] | 194 | if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { | 
|  | 195 | /* pkey 0 is the default and always allocated */ | 
|  | 196 | mm->context.pkey_allocation_map = 0x1; | 
|  | 197 | /* -1 means unallocated or invalid */ | 
|  | 198 | mm->context.execute_only_pkey = -1; | 
|  | 199 | } | 
| Thomas Gleixner | a4828f8 | 2017-12-14 12:27:31 +0100 | [diff] [blame] | 200 | #endif | 
|  | 201 | init_new_context_ldt(mm); | 
|  | 202 | return 0; | 
| Dave Hansen | 39a0526 | 2016-02-12 13:02:34 -0800 | [diff] [blame] | 203 | } | 
|  | 204 | static inline void destroy_context(struct mm_struct *mm) | 
|  | 205 | { | 
|  | 206 | destroy_context_ldt(mm); | 
|  | 207 | } | 
|  | 208 |  | 
| Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 209 | extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 
|  | 210 | struct task_struct *tsk); | 
| Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 211 |  | 
| Andy Lutomirski | 078194f | 2016-04-26 09:39:09 -0700 | [diff] [blame] | 212 | extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | 
|  | 213 | struct task_struct *tsk); | 
|  | 214 | #define switch_mm_irqs_off switch_mm_irqs_off | 
| Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 215 |  | 
|  | 216 | #define activate_mm(prev, next)			\ | 
|  | 217 | do {						\ | 
|  | 218 | paravirt_activate_mm((prev), (next));	\ | 
|  | 219 | switch_mm((prev), (next), NULL);	\ | 
|  | 220 | } while (0); | 
|  | 221 |  | 
| Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 222 | #ifdef CONFIG_X86_32 | 
|  | 223 | #define deactivate_mm(tsk, mm)			\ | 
|  | 224 | do {						\ | 
| Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 225 | lazy_load_gs(0);			\ | 
| Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 226 | } while (0) | 
|  | 227 | #else | 
|  | 228 | #define deactivate_mm(tsk, mm)			\ | 
|  | 229 | do {						\ | 
|  | 230 | load_gs_index(0);			\ | 
|  | 231 | loadsegment(fs, 0);			\ | 
|  | 232 | } while (0) | 
|  | 233 | #endif | 
| Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 234 |  | 
| Thomas Gleixner | c10e83f | 2017-12-14 12:27:29 +0100 | [diff] [blame] | 235 | static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) | 
| Dave Hansen | a1ea1c0 | 2014-11-18 10:23:49 -0800 | [diff] [blame] | 236 | { | 
|  | 237 | paravirt_arch_dup_mmap(oldmm, mm); | 
| Thomas Gleixner | a4828f8 | 2017-12-14 12:27:31 +0100 | [diff] [blame] | 238 | return ldt_dup_context(oldmm, mm); | 
| Dave Hansen | a1ea1c0 | 2014-11-18 10:23:49 -0800 | [diff] [blame] | 239 | } | 
|  | 240 |  | 
|  | 241 | static inline void arch_exit_mmap(struct mm_struct *mm) | 
|  | 242 | { | 
|  | 243 | paravirt_arch_exit_mmap(mm); | 
| Andy Lutomirski | f55f050 | 2017-12-12 07:56:45 -0800 | [diff] [blame] | 244 | ldt_arch_exit_mmap(mm); | 
| Dave Hansen | a1ea1c0 | 2014-11-18 10:23:49 -0800 | [diff] [blame] | 245 | } | 
|  | 246 |  | 
| Dave Hansen | b0e9b09 | 2015-06-07 11:37:04 -0700 | [diff] [blame] | 247 | #ifdef CONFIG_X86_64 | 
|  | 248 | static inline bool is_64bit_mm(struct mm_struct *mm) | 
|  | 249 | { | 
| Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 250 | return	!IS_ENABLED(CONFIG_IA32_EMULATION) || | 
| Dave Hansen | b0e9b09 | 2015-06-07 11:37:04 -0700 | [diff] [blame] | 251 | !(mm->context.ia32_compat == TIF_IA32); | 
|  | 252 | } | 
|  | 253 | #else | 
|  | 254 | static inline bool is_64bit_mm(struct mm_struct *mm) | 
|  | 255 | { | 
|  | 256 | return false; | 
|  | 257 | } | 
|  | 258 | #endif | 
|  | 259 |  | 
| Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 260 | static inline void arch_bprm_mm_init(struct mm_struct *mm, | 
|  | 261 | struct vm_area_struct *vma) | 
|  | 262 | { | 
|  | 263 | mpx_mm_init(mm); | 
|  | 264 | } | 
|  | 265 |  | 
| Dave Hansen | 1de4fa1 | 2014-11-14 07:18:31 -0800 | [diff] [blame] | 266 | static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, | 
|  | 267 | unsigned long start, unsigned long end) | 
|  | 268 | { | 
| Dave Hansen | c922228 | 2015-01-08 14:30:21 -0800 | [diff] [blame] | 269 | /* | 
|  | 270 | * mpx_notify_unmap() goes and reads a rarely-hot | 
|  | 271 | * cacheline in the mm_struct.  That can be expensive | 
|  | 272 | * enough to be seen in profiles. | 
|  | 273 | * | 
|  | 274 | * The mpx_notify_unmap() call and its contents have been | 
|  | 275 | * observed to affect munmap() performance on hardware | 
|  | 276 | * where MPX is not present. | 
|  | 277 | * | 
|  | 278 | * The unlikely() optimizes for the fast case: no MPX | 
|  | 279 | * in the CPU, or no MPX use in the process.  Even if | 
|  | 280 | * we get this wrong (in the unlikely event that MPX | 
|  | 281 | * is widely enabled on some system) the overhead of | 
|  | 282 | * MPX itself (reading bounds tables) is expected to | 
|  | 283 | * overwhelm the overhead of getting this unlikely() | 
|  | 284 | * consistently wrong. | 
|  | 285 | */ | 
|  | 286 | if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) | 
|  | 287 | mpx_notify_unmap(mm, vma, start, end); | 
| Dave Hansen | 1de4fa1 | 2014-11-14 07:18:31 -0800 | [diff] [blame] | 288 | } | 
|  | 289 |  | 
| Dave Hansen | 7d06d9c | 2016-07-29 09:30:12 -0700 | [diff] [blame] | 290 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS | 
| Dave Hansen | 8f62c88 | 2016-02-12 13:02:10 -0800 | [diff] [blame] | 291 | static inline int vma_pkey(struct vm_area_struct *vma) | 
|  | 292 | { | 
| Dave Hansen | 8f62c88 | 2016-02-12 13:02:10 -0800 | [diff] [blame] | 293 | unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | | 
|  | 294 | VM_PKEY_BIT2 | VM_PKEY_BIT3; | 
| Dave Hansen | 7d06d9c | 2016-07-29 09:30:12 -0700 | [diff] [blame] | 295 |  | 
|  | 296 | return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; | 
| Dave Hansen | 8f62c88 | 2016-02-12 13:02:10 -0800 | [diff] [blame] | 297 | } | 
| Dave Hansen | 7d06d9c | 2016-07-29 09:30:12 -0700 | [diff] [blame] | 298 | #else | 
|  | 299 | static inline int vma_pkey(struct vm_area_struct *vma) | 
|  | 300 | { | 
|  | 301 | return 0; | 
|  | 302 | } | 
|  | 303 | #endif | 
| Dave Hansen | 8f62c88 | 2016-02-12 13:02:10 -0800 | [diff] [blame] | 304 |  | 
| Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 305 | /* | 
|  | 306 | * We only want to enforce protection keys on the current process | 
|  | 307 | * because we effectively have no access to PKRU for other | 
|  | 308 | * processes or any way to tell *which * PKRU in a threaded | 
|  | 309 | * process we could use. | 
|  | 310 | * | 
|  | 311 | * So do not enforce things if the VMA is not from the current | 
|  | 312 | * mm, or if we are in a kernel thread. | 
|  | 313 | */ | 
|  | 314 | static inline bool vma_is_foreign(struct vm_area_struct *vma) | 
|  | 315 | { | 
|  | 316 | if (!current->mm) | 
|  | 317 | return true; | 
|  | 318 | /* | 
|  | 319 | * Should PKRU be enforced on the access to this VMA?  If | 
|  | 320 | * the VMA is from another process, then PKRU has no | 
|  | 321 | * relevance and should not be enforced. | 
|  | 322 | */ | 
|  | 323 | if (current->mm != vma->vm_mm) | 
|  | 324 | return true; | 
|  | 325 |  | 
|  | 326 | return false; | 
|  | 327 | } | 
|  | 328 |  | 
| Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 329 | static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, | 
| Dave Hansen | d61172b | 2016-02-12 13:02:24 -0800 | [diff] [blame] | 330 | bool write, bool execute, bool foreign) | 
| Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 331 | { | 
| Dave Hansen | d61172b | 2016-02-12 13:02:24 -0800 | [diff] [blame] | 332 | /* pkeys never affect instruction fetches */ | 
|  | 333 | if (execute) | 
|  | 334 | return true; | 
| Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 335 | /* allow access if the VMA is not one from this process */ | 
| Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 336 | if (foreign || vma_is_foreign(vma)) | 
| Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 337 | return true; | 
|  | 338 | return __pkru_allows_pkey(vma_pkey(vma), write); | 
|  | 339 | } | 
|  | 340 |  | 
| Andy Lutomirski | 52a2af4 | 2017-09-17 09:03:49 -0700 | [diff] [blame] | 341 | /* | 
| Andy Lutomirski | d6e41f1 | 2017-05-28 10:00:17 -0700 | [diff] [blame] | 342 | * This can be used from process context to figure out what the value of | 
| Andy Lutomirski | 6c690ee | 2017-06-12 10:26:14 -0700 | [diff] [blame] | 343 | * CR3 is without needing to do a (slow) __read_cr3(). | 
| Andy Lutomirski | d6e41f1 | 2017-05-28 10:00:17 -0700 | [diff] [blame] | 344 | * | 
|  | 345 | * It's intended to be used for code like KVM that sneakily changes CR3 | 
|  | 346 | * and needs to restore it.  It needs to be used very carefully. | 
|  | 347 | */ | 
|  | 348 | static inline unsigned long __get_current_cr3_fast(void) | 
|  | 349 | { | 
| Dave Hansen | 50fb83a6 | 2017-12-04 15:07:54 +0100 | [diff] [blame] | 350 | unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd, | 
| Andy Lutomirski | 47061a2 | 2017-09-17 09:03:48 -0700 | [diff] [blame] | 351 | this_cpu_read(cpu_tlbstate.loaded_mm_asid)); | 
| Andy Lutomirski | 10af623 | 2017-07-24 21:41:38 -0700 | [diff] [blame] | 352 |  | 
| Andy Lutomirski | d6e41f1 | 2017-05-28 10:00:17 -0700 | [diff] [blame] | 353 | /* For now, be very restrictive about when this can be called. */ | 
| Roman Kagan | 4c07f90 | 2017-07-17 12:49:07 +0300 | [diff] [blame] | 354 | VM_WARN_ON(in_nmi() || preemptible()); | 
| Andy Lutomirski | d6e41f1 | 2017-05-28 10:00:17 -0700 | [diff] [blame] | 355 |  | 
| Andy Lutomirski | 6c690ee | 2017-06-12 10:26:14 -0700 | [diff] [blame] | 356 | VM_BUG_ON(cr3 != __read_cr3()); | 
| Andy Lutomirski | d6e41f1 | 2017-05-28 10:00:17 -0700 | [diff] [blame] | 357 | return cr3; | 
|  | 358 | } | 
|  | 359 |  | 
| H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 360 | #endif /* _ASM_X86_MMU_CONTEXT_H */ |