Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 2 | #ifndef _ASM_X86_MMU_CONTEXT_H |
| 3 | #define _ASM_X86_MMU_CONTEXT_H |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 4 | |
| 5 | #include <asm/desc.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 6 | #include <linux/atomic.h> |
Dave Hansen | d17d8f9 | 2014-07-31 08:40:59 -0700 | [diff] [blame] | 7 | #include <linux/mm_types.h> |
Dave Hansen | 7d06d9c | 2016-07-29 09:30:12 -0700 | [diff] [blame] | 8 | #include <linux/pkeys.h> |
Dave Hansen | d17d8f9 | 2014-07-31 08:40:59 -0700 | [diff] [blame] | 9 | |
| 10 | #include <trace/events/tlb.h> |
| 11 | |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 12 | #include <asm/pgalloc.h> |
| 13 | #include <asm/tlbflush.h> |
| 14 | #include <asm/paravirt.h> |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 15 | #include <asm/mpx.h> |
Andy Lutomirski | f39681e | 2017-06-29 08:53:15 -0700 | [diff] [blame] | 16 | |
| 17 | extern atomic64_t last_mm_ctx_id; |
| 18 | |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 19 | #ifndef CONFIG_PARAVIRT |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 20 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
| 21 | struct mm_struct *next) |
| 22 | { |
| 23 | } |
| 24 | #endif /* !CONFIG_PARAVIRT */ |
| 25 | |
Andy Lutomirski | 7911d3f | 2014-10-24 15:58:12 -0700 | [diff] [blame] | 26 | #ifdef CONFIG_PERF_EVENTS |
Andy Lutomirski | a667342 | 2014-10-24 15:58:13 -0700 | [diff] [blame] | 27 | extern struct static_key rdpmc_always_available; |
| 28 | |
Andy Lutomirski | 7911d3f | 2014-10-24 15:58:12 -0700 | [diff] [blame] | 29 | static inline void load_mm_cr4(struct mm_struct *mm) |
| 30 | { |
Peter Zijlstra | a833581 | 2015-07-09 19:23:38 +0200 | [diff] [blame] | 31 | if (static_key_false(&rdpmc_always_available) || |
Andy Lutomirski | a667342 | 2014-10-24 15:58:13 -0700 | [diff] [blame] | 32 | atomic_read(&mm->context.perf_rdpmc_allowed)) |
Andy Lutomirski | 7911d3f | 2014-10-24 15:58:12 -0700 | [diff] [blame] | 33 | cr4_set_bits(X86_CR4_PCE); |
| 34 | else |
| 35 | cr4_clear_bits(X86_CR4_PCE); |
| 36 | } |
| 37 | #else |
| 38 | static inline void load_mm_cr4(struct mm_struct *mm) {} |
| 39 | #endif |
| 40 | |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 41 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 42 | /* |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 43 | * ldt_structs can be allocated, used, and freed, but they are never |
| 44 | * modified while live. |
| 45 | */ |
| 46 | struct ldt_struct { |
| 47 | /* |
| 48 | * Xen requires page-aligned LDTs with special permissions. This is |
| 49 | * needed to prevent us from installing evil descriptors such as |
| 50 | * call gates. On native, we could merge the ldt_struct and LDT |
| 51 | * allocations, but it's not worth trying to optimize. |
| 52 | */ |
| 53 | struct desc_struct *entries; |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 54 | unsigned int nr_entries; |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 55 | }; |
| 56 | |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 57 | /* |
| 58 | * Used for LDT copy/destruction. |
| 59 | */ |
Dave Hansen | 39a0526 | 2016-02-12 13:02:34 -0800 | [diff] [blame] | 60 | int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); |
| 61 | void destroy_context_ldt(struct mm_struct *mm); |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 62 | #else /* CONFIG_MODIFY_LDT_SYSCALL */ |
Dave Hansen | 39a0526 | 2016-02-12 13:02:34 -0800 | [diff] [blame] | 63 | static inline int init_new_context_ldt(struct task_struct *tsk, |
| 64 | struct mm_struct *mm) |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 65 | { |
| 66 | return 0; |
| 67 | } |
Dave Hansen | 39a0526 | 2016-02-12 13:02:34 -0800 | [diff] [blame] | 68 | static inline void destroy_context_ldt(struct mm_struct *mm) {} |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 69 | #endif |
| 70 | |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 71 | static inline void load_mm_ldt(struct mm_struct *mm) |
| 72 | { |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 73 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 74 | struct ldt_struct *ldt; |
| 75 | |
Will Deacon | 3382290 | 2017-10-24 11:22:48 +0100 | [diff] [blame] | 76 | /* READ_ONCE synchronizes with smp_store_release */ |
| 77 | ldt = READ_ONCE(mm->context.ldt); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 78 | |
| 79 | /* |
| 80 | * Any change to mm->context.ldt is followed by an IPI to all |
| 81 | * CPUs with the mm active. The LDT will not be freed until |
| 82 | * after the IPI is handled by all such CPUs. This means that, |
| 83 | * if the ldt_struct changes before we return, the values we see |
| 84 | * will be safe, and the new values will be loaded before we run |
| 85 | * any user code. |
| 86 | * |
| 87 | * NB: don't try to convert this to use RCU without extreme care. |
| 88 | * We would still need IRQs off, because we don't want to change |
| 89 | * the local LDT after an IPI loaded a newer value than the one |
| 90 | * that we can see. |
| 91 | */ |
| 92 | |
| 93 | if (unlikely(ldt)) |
Borislav Petkov | bbf79d2 | 2017-06-06 19:31:16 +0200 | [diff] [blame] | 94 | set_ldt(ldt->entries, ldt->nr_entries); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 95 | else |
| 96 | clear_LDT(); |
Andy Lutomirski | a5b9e5a | 2015-07-30 14:31:34 -0700 | [diff] [blame] | 97 | #else |
| 98 | clear_LDT(); |
| 99 | #endif |
Andy Lutomirski | 7353425 | 2017-06-20 22:22:08 -0700 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) |
| 103 | { |
| 104 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
| 105 | /* |
| 106 | * Load the LDT if either the old or new mm had an LDT. |
| 107 | * |
| 108 | * An mm will never go from having an LDT to not having an LDT. Two |
| 109 | * mms never share an LDT, so we don't gain anything by checking to |
| 110 | * see whether the LDT changed. There's also no guarantee that |
| 111 | * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL, |
| 112 | * then prev->context.ldt will also be non-NULL. |
| 113 | * |
| 114 | * If we really cared, we could optimize the case where prev == next |
| 115 | * and we're exiting lazy mode. Most of the time, if this happens, |
| 116 | * we don't actually need to reload LDTR, but modify_ldt() is mostly |
| 117 | * used by legacy code and emulators where we don't need this level of |
| 118 | * performance. |
| 119 | * |
| 120 | * This uses | instead of || because it generates better code. |
| 121 | */ |
| 122 | if (unlikely((unsigned long)prev->context.ldt | |
| 123 | (unsigned long)next->context.ldt)) |
| 124 | load_mm_ldt(next); |
| 125 | #endif |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame] | 126 | |
| 127 | DEBUG_LOCKS_WARN_ON(preemptible()); |
| 128 | } |
| 129 | |
Andy Lutomirski | b956575 | 2017-10-09 09:50:49 -0700 | [diff] [blame] | 130 | void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 131 | |
Dave Hansen | 39a0526 | 2016-02-12 13:02:34 -0800 | [diff] [blame] | 132 | static inline int init_new_context(struct task_struct *tsk, |
| 133 | struct mm_struct *mm) |
| 134 | { |
Peter Zijlstra | c2b3496 | 2017-12-14 12:27:30 +0100 | [diff] [blame^] | 135 | mutex_init(&mm->context.lock); |
| 136 | |
Andy Lutomirski | f39681e | 2017-06-29 08:53:15 -0700 | [diff] [blame] | 137 | mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); |
| 138 | atomic64_set(&mm->context.tlb_gen, 0); |
| 139 | |
Dave Hansen | e8c24d3 | 2016-07-29 09:30:15 -0700 | [diff] [blame] | 140 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
| 141 | if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { |
| 142 | /* pkey 0 is the default and always allocated */ |
| 143 | mm->context.pkey_allocation_map = 0x1; |
| 144 | /* -1 means unallocated or invalid */ |
| 145 | mm->context.execute_only_pkey = -1; |
| 146 | } |
| 147 | #endif |
Eric Biggers | ccd5b32 | 2017-08-24 10:50:29 -0700 | [diff] [blame] | 148 | return init_new_context_ldt(tsk, mm); |
Dave Hansen | 39a0526 | 2016-02-12 13:02:34 -0800 | [diff] [blame] | 149 | } |
| 150 | static inline void destroy_context(struct mm_struct *mm) |
| 151 | { |
| 152 | destroy_context_ldt(mm); |
| 153 | } |
| 154 | |
Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 155 | extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 156 | struct task_struct *tsk); |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 157 | |
Andy Lutomirski | 078194f | 2016-04-26 09:39:09 -0700 | [diff] [blame] | 158 | extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
| 159 | struct task_struct *tsk); |
| 160 | #define switch_mm_irqs_off switch_mm_irqs_off |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 161 | |
| 162 | #define activate_mm(prev, next) \ |
| 163 | do { \ |
| 164 | paravirt_activate_mm((prev), (next)); \ |
| 165 | switch_mm((prev), (next), NULL); \ |
| 166 | } while (0); |
| 167 | |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 168 | #ifdef CONFIG_X86_32 |
| 169 | #define deactivate_mm(tsk, mm) \ |
| 170 | do { \ |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 171 | lazy_load_gs(0); \ |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 172 | } while (0) |
| 173 | #else |
| 174 | #define deactivate_mm(tsk, mm) \ |
| 175 | do { \ |
| 176 | load_gs_index(0); \ |
| 177 | loadsegment(fs, 0); \ |
| 178 | } while (0) |
| 179 | #endif |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 180 | |
Thomas Gleixner | c10e83f | 2017-12-14 12:27:29 +0100 | [diff] [blame] | 181 | static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) |
Dave Hansen | a1ea1c0 | 2014-11-18 10:23:49 -0800 | [diff] [blame] | 182 | { |
| 183 | paravirt_arch_dup_mmap(oldmm, mm); |
Thomas Gleixner | c10e83f | 2017-12-14 12:27:29 +0100 | [diff] [blame] | 184 | return 0; |
Dave Hansen | a1ea1c0 | 2014-11-18 10:23:49 -0800 | [diff] [blame] | 185 | } |
| 186 | |
| 187 | static inline void arch_exit_mmap(struct mm_struct *mm) |
| 188 | { |
| 189 | paravirt_arch_exit_mmap(mm); |
| 190 | } |
| 191 | |
Dave Hansen | b0e9b09 | 2015-06-07 11:37:04 -0700 | [diff] [blame] | 192 | #ifdef CONFIG_X86_64 |
| 193 | static inline bool is_64bit_mm(struct mm_struct *mm) |
| 194 | { |
Masahiro Yamada | 97f2645 | 2016-08-03 13:45:50 -0700 | [diff] [blame] | 195 | return !IS_ENABLED(CONFIG_IA32_EMULATION) || |
Dave Hansen | b0e9b09 | 2015-06-07 11:37:04 -0700 | [diff] [blame] | 196 | !(mm->context.ia32_compat == TIF_IA32); |
| 197 | } |
| 198 | #else |
| 199 | static inline bool is_64bit_mm(struct mm_struct *mm) |
| 200 | { |
| 201 | return false; |
| 202 | } |
| 203 | #endif |
| 204 | |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 205 | static inline void arch_bprm_mm_init(struct mm_struct *mm, |
| 206 | struct vm_area_struct *vma) |
| 207 | { |
| 208 | mpx_mm_init(mm); |
| 209 | } |
| 210 | |
Dave Hansen | 1de4fa1 | 2014-11-14 07:18:31 -0800 | [diff] [blame] | 211 | static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, |
| 212 | unsigned long start, unsigned long end) |
| 213 | { |
Dave Hansen | c922228 | 2015-01-08 14:30:21 -0800 | [diff] [blame] | 214 | /* |
| 215 | * mpx_notify_unmap() goes and reads a rarely-hot |
| 216 | * cacheline in the mm_struct. That can be expensive |
| 217 | * enough to be seen in profiles. |
| 218 | * |
| 219 | * The mpx_notify_unmap() call and its contents have been |
| 220 | * observed to affect munmap() performance on hardware |
| 221 | * where MPX is not present. |
| 222 | * |
| 223 | * The unlikely() optimizes for the fast case: no MPX |
| 224 | * in the CPU, or no MPX use in the process. Even if |
| 225 | * we get this wrong (in the unlikely event that MPX |
| 226 | * is widely enabled on some system) the overhead of |
| 227 | * MPX itself (reading bounds tables) is expected to |
| 228 | * overwhelm the overhead of getting this unlikely() |
| 229 | * consistently wrong. |
| 230 | */ |
| 231 | if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) |
| 232 | mpx_notify_unmap(mm, vma, start, end); |
Dave Hansen | 1de4fa1 | 2014-11-14 07:18:31 -0800 | [diff] [blame] | 233 | } |
| 234 | |
Dave Hansen | 7d06d9c | 2016-07-29 09:30:12 -0700 | [diff] [blame] | 235 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS |
Dave Hansen | 8f62c88 | 2016-02-12 13:02:10 -0800 | [diff] [blame] | 236 | static inline int vma_pkey(struct vm_area_struct *vma) |
| 237 | { |
Dave Hansen | 8f62c88 | 2016-02-12 13:02:10 -0800 | [diff] [blame] | 238 | unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | |
| 239 | VM_PKEY_BIT2 | VM_PKEY_BIT3; |
Dave Hansen | 7d06d9c | 2016-07-29 09:30:12 -0700 | [diff] [blame] | 240 | |
| 241 | return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; |
Dave Hansen | 8f62c88 | 2016-02-12 13:02:10 -0800 | [diff] [blame] | 242 | } |
Dave Hansen | 7d06d9c | 2016-07-29 09:30:12 -0700 | [diff] [blame] | 243 | #else |
| 244 | static inline int vma_pkey(struct vm_area_struct *vma) |
| 245 | { |
| 246 | return 0; |
| 247 | } |
| 248 | #endif |
Dave Hansen | 8f62c88 | 2016-02-12 13:02:10 -0800 | [diff] [blame] | 249 | |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 250 | /* |
| 251 | * We only want to enforce protection keys on the current process |
| 252 | * because we effectively have no access to PKRU for other |
| 253 | * processes or any way to tell *which * PKRU in a threaded |
| 254 | * process we could use. |
| 255 | * |
| 256 | * So do not enforce things if the VMA is not from the current |
| 257 | * mm, or if we are in a kernel thread. |
| 258 | */ |
| 259 | static inline bool vma_is_foreign(struct vm_area_struct *vma) |
| 260 | { |
| 261 | if (!current->mm) |
| 262 | return true; |
| 263 | /* |
| 264 | * Should PKRU be enforced on the access to this VMA? If |
| 265 | * the VMA is from another process, then PKRU has no |
| 266 | * relevance and should not be enforced. |
| 267 | */ |
| 268 | if (current->mm != vma->vm_mm) |
| 269 | return true; |
| 270 | |
| 271 | return false; |
| 272 | } |
| 273 | |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 274 | static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, |
Dave Hansen | d61172b | 2016-02-12 13:02:24 -0800 | [diff] [blame] | 275 | bool write, bool execute, bool foreign) |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 276 | { |
Dave Hansen | d61172b | 2016-02-12 13:02:24 -0800 | [diff] [blame] | 277 | /* pkeys never affect instruction fetches */ |
| 278 | if (execute) |
| 279 | return true; |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 280 | /* allow access if the VMA is not one from this process */ |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 281 | if (foreign || vma_is_foreign(vma)) |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 282 | return true; |
| 283 | return __pkru_allows_pkey(vma_pkey(vma), write); |
| 284 | } |
| 285 | |
Andy Lutomirski | 52a2af4 | 2017-09-17 09:03:49 -0700 | [diff] [blame] | 286 | /* |
| 287 | * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID |
| 288 | * bits. This serves two purposes. It prevents a nasty situation in |
| 289 | * which PCID-unaware code saves CR3, loads some other value (with PCID |
| 290 | * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if |
| 291 | * the saved ASID was nonzero. It also means that any bugs involving |
| 292 | * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger |
| 293 | * deterministically. |
| 294 | */ |
| 295 | |
Andy Lutomirski | 47061a2 | 2017-09-17 09:03:48 -0700 | [diff] [blame] | 296 | static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid) |
| 297 | { |
Andy Lutomirski | 52a2af4 | 2017-09-17 09:03:49 -0700 | [diff] [blame] | 298 | if (static_cpu_has(X86_FEATURE_PCID)) { |
| 299 | VM_WARN_ON_ONCE(asid > 4094); |
| 300 | return __sme_pa(mm->pgd) | (asid + 1); |
| 301 | } else { |
| 302 | VM_WARN_ON_ONCE(asid != 0); |
| 303 | return __sme_pa(mm->pgd); |
| 304 | } |
Andy Lutomirski | 47061a2 | 2017-09-17 09:03:48 -0700 | [diff] [blame] | 305 | } |
| 306 | |
| 307 | static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) |
| 308 | { |
Andy Lutomirski | 52a2af4 | 2017-09-17 09:03:49 -0700 | [diff] [blame] | 309 | VM_WARN_ON_ONCE(asid > 4094); |
| 310 | return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH; |
Andy Lutomirski | 47061a2 | 2017-09-17 09:03:48 -0700 | [diff] [blame] | 311 | } |
Andy Lutomirski | d6e41f1 | 2017-05-28 10:00:17 -0700 | [diff] [blame] | 312 | |
| 313 | /* |
| 314 | * This can be used from process context to figure out what the value of |
Andy Lutomirski | 6c690ee | 2017-06-12 10:26:14 -0700 | [diff] [blame] | 315 | * CR3 is without needing to do a (slow) __read_cr3(). |
Andy Lutomirski | d6e41f1 | 2017-05-28 10:00:17 -0700 | [diff] [blame] | 316 | * |
| 317 | * It's intended to be used for code like KVM that sneakily changes CR3 |
| 318 | * and needs to restore it. It needs to be used very carefully. |
| 319 | */ |
| 320 | static inline unsigned long __get_current_cr3_fast(void) |
| 321 | { |
Andy Lutomirski | 47061a2 | 2017-09-17 09:03:48 -0700 | [diff] [blame] | 322 | unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm), |
| 323 | this_cpu_read(cpu_tlbstate.loaded_mm_asid)); |
Andy Lutomirski | 10af623 | 2017-07-24 21:41:38 -0700 | [diff] [blame] | 324 | |
Andy Lutomirski | d6e41f1 | 2017-05-28 10:00:17 -0700 | [diff] [blame] | 325 | /* For now, be very restrictive about when this can be called. */ |
Roman Kagan | 4c07f90 | 2017-07-17 12:49:07 +0300 | [diff] [blame] | 326 | VM_WARN_ON(in_nmi() || preemptible()); |
Andy Lutomirski | d6e41f1 | 2017-05-28 10:00:17 -0700 | [diff] [blame] | 327 | |
Andy Lutomirski | 6c690ee | 2017-06-12 10:26:14 -0700 | [diff] [blame] | 328 | VM_BUG_ON(cr3 != __read_cr3()); |
Andy Lutomirski | d6e41f1 | 2017-05-28 10:00:17 -0700 | [diff] [blame] | 329 | return cr3; |
| 330 | } |
| 331 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 332 | #endif /* _ASM_X86_MMU_CONTEXT_H */ |