H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_MMU_CONTEXT_H |
| 2 | #define _ASM_X86_MMU_CONTEXT_H |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 3 | |
| 4 | #include <asm/desc.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 5 | #include <linux/atomic.h> |
Dave Hansen | d17d8f9 | 2014-07-31 08:40:59 -0700 | [diff] [blame] | 6 | #include <linux/mm_types.h> |
| 7 | |
| 8 | #include <trace/events/tlb.h> |
| 9 | |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 10 | #include <asm/pgalloc.h> |
| 11 | #include <asm/tlbflush.h> |
| 12 | #include <asm/paravirt.h> |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 13 | #include <asm/mpx.h> |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 14 | #ifndef CONFIG_PARAVIRT |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 15 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
| 16 | struct mm_struct *next) |
| 17 | { |
| 18 | } |
| 19 | #endif /* !CONFIG_PARAVIRT */ |
| 20 | |
Andy Lutomirski | 7911d3f | 2014-10-24 15:58:12 -0700 | [diff] [blame] | 21 | #ifdef CONFIG_PERF_EVENTS |
Andy Lutomirski | a667342 | 2014-10-24 15:58:13 -0700 | [diff] [blame] | 22 | extern struct static_key rdpmc_always_available; |
| 23 | |
Andy Lutomirski | 7911d3f | 2014-10-24 15:58:12 -0700 | [diff] [blame] | 24 | static inline void load_mm_cr4(struct mm_struct *mm) |
| 25 | { |
Peter Zijlstra | a833581 | 2015-07-09 19:23:38 +0200 | [diff] [blame] | 26 | if (static_key_false(&rdpmc_always_available) || |
Andy Lutomirski | a667342 | 2014-10-24 15:58:13 -0700 | [diff] [blame] | 27 | atomic_read(&mm->context.perf_rdpmc_allowed)) |
Andy Lutomirski | 7911d3f | 2014-10-24 15:58:12 -0700 | [diff] [blame] | 28 | cr4_set_bits(X86_CR4_PCE); |
| 29 | else |
| 30 | cr4_clear_bits(X86_CR4_PCE); |
| 31 | } |
| 32 | #else |
| 33 | static inline void load_mm_cr4(struct mm_struct *mm) {} |
| 34 | #endif |
| 35 | |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 36 | /* |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame^] | 37 | * ldt_structs can be allocated, used, and freed, but they are never |
| 38 | * modified while live. |
| 39 | */ |
| 40 | struct ldt_struct { |
| 41 | /* |
| 42 | * Xen requires page-aligned LDTs with special permissions. This is |
| 43 | * needed to prevent us from installing evil descriptors such as |
| 44 | * call gates. On native, we could merge the ldt_struct and LDT |
| 45 | * allocations, but it's not worth trying to optimize. |
| 46 | */ |
| 47 | struct desc_struct *entries; |
| 48 | int size; |
| 49 | }; |
| 50 | |
| 51 | static inline void load_mm_ldt(struct mm_struct *mm) |
| 52 | { |
| 53 | struct ldt_struct *ldt; |
| 54 | |
| 55 | /* lockless_dereference synchronizes with smp_store_release */ |
| 56 | ldt = lockless_dereference(mm->context.ldt); |
| 57 | |
| 58 | /* |
| 59 | * Any change to mm->context.ldt is followed by an IPI to all |
| 60 | * CPUs with the mm active. The LDT will not be freed until |
| 61 | * after the IPI is handled by all such CPUs. This means that, |
| 62 | * if the ldt_struct changes before we return, the values we see |
| 63 | * will be safe, and the new values will be loaded before we run |
| 64 | * any user code. |
| 65 | * |
| 66 | * NB: don't try to convert this to use RCU without extreme care. |
| 67 | * We would still need IRQs off, because we don't want to change |
| 68 | * the local LDT after an IPI loaded a newer value than the one |
| 69 | * that we can see. |
| 70 | */ |
| 71 | |
| 72 | if (unlikely(ldt)) |
| 73 | set_ldt(ldt->entries, ldt->size); |
| 74 | else |
| 75 | clear_LDT(); |
| 76 | |
| 77 | DEBUG_LOCKS_WARN_ON(preemptible()); |
| 78 | } |
| 79 | |
| 80 | /* |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 81 | * Used for LDT copy/destruction. |
| 82 | */ |
| 83 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
| 84 | void destroy_context(struct mm_struct *mm); |
| 85 | |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 86 | |
| 87 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
| 88 | { |
| 89 | #ifdef CONFIG_SMP |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 90 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
| 91 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 92 | #endif |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 96 | struct task_struct *tsk) |
| 97 | { |
| 98 | unsigned cpu = smp_processor_id(); |
| 99 | |
| 100 | if (likely(prev != next)) { |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 101 | #ifdef CONFIG_SMP |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 102 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
| 103 | this_cpu_write(cpu_tlbstate.active_mm, next); |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 104 | #endif |
Rusty Russell | 78f1c4d | 2009-09-24 09:34:51 -0600 | [diff] [blame] | 105 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 106 | |
| 107 | /* Re-load page tables */ |
| 108 | load_cr3(next->pgd); |
Dave Hansen | d17d8f9 | 2014-07-31 08:40:59 -0700 | [diff] [blame] | 109 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 110 | |
Rik van Riel | 8f898fb | 2013-07-31 22:14:21 -0400 | [diff] [blame] | 111 | /* Stop flush ipis for the previous mm */ |
Suresh Siddha | 831d52bc1 | 2011-02-03 12:20:04 -0800 | [diff] [blame] | 112 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); |
| 113 | |
Andy Lutomirski | 7911d3f | 2014-10-24 15:58:12 -0700 | [diff] [blame] | 114 | /* Load per-mm CR4 state */ |
| 115 | load_mm_cr4(next); |
| 116 | |
Andy Lutomirski | c4a7bba | 2014-10-06 12:36:47 -0700 | [diff] [blame] | 117 | /* |
| 118 | * Load the LDT, if the LDT is different. |
| 119 | * |
Andy Lutomirski | 22c4bd9 | 2014-10-24 15:58:09 -0700 | [diff] [blame] | 120 | * It's possible that prev->context.ldt doesn't match |
| 121 | * the LDT register. This can happen if leave_mm(prev) |
| 122 | * was called and then modify_ldt changed |
| 123 | * prev->context.ldt but suppressed an IPI to this CPU. |
| 124 | * In this case, prev->context.ldt != NULL, because we |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame^] | 125 | * never set context.ldt to NULL while the mm still |
| 126 | * exists. That means that next->context.ldt != |
| 127 | * prev->context.ldt, because mms never share an LDT. |
Andy Lutomirski | c4a7bba | 2014-10-06 12:36:47 -0700 | [diff] [blame] | 128 | */ |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 129 | if (unlikely(prev->context.ldt != next->context.ldt)) |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame^] | 130 | load_mm_ldt(next); |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 131 | } |
| 132 | #ifdef CONFIG_SMP |
Rik van Riel | 8f898fb | 2013-07-31 22:14:21 -0400 | [diff] [blame] | 133 | else { |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 134 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
| 135 | BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 136 | |
Rik van Riel | 8f898fb | 2013-07-31 22:14:21 -0400 | [diff] [blame] | 137 | if (!cpumask_test_cpu(cpu, mm_cpumask(next))) { |
| 138 | /* |
| 139 | * On established mms, the mm_cpumask is only changed |
| 140 | * from irq context, from ptep_clear_flush() while in |
| 141 | * lazy tlb mode, and here. Irqs are blocked during |
| 142 | * schedule, protecting us from simultaneous changes. |
| 143 | */ |
| 144 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
| 145 | /* |
| 146 | * We were in lazy tlb mode and leave_mm disabled |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 147 | * tlb flush IPI delivery. We must reload CR3 |
| 148 | * to make sure to use no freed page tables. |
| 149 | */ |
| 150 | load_cr3(next->pgd); |
Dave Hansen | d17d8f9 | 2014-07-31 08:40:59 -0700 | [diff] [blame] | 151 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
Andy Lutomirski | 7911d3f | 2014-10-24 15:58:12 -0700 | [diff] [blame] | 152 | load_mm_cr4(next); |
Andy Lutomirski | 37868fe | 2015-07-30 14:31:32 -0700 | [diff] [blame^] | 153 | load_mm_ldt(next); |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 154 | } |
| 155 | } |
| 156 | #endif |
| 157 | } |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 158 | |
| 159 | #define activate_mm(prev, next) \ |
| 160 | do { \ |
| 161 | paravirt_activate_mm((prev), (next)); \ |
| 162 | switch_mm((prev), (next), NULL); \ |
| 163 | } while (0); |
| 164 | |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 165 | #ifdef CONFIG_X86_32 |
| 166 | #define deactivate_mm(tsk, mm) \ |
| 167 | do { \ |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 168 | lazy_load_gs(0); \ |
Brian Gerst | 6826c8f | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 169 | } while (0) |
| 170 | #else |
| 171 | #define deactivate_mm(tsk, mm) \ |
| 172 | do { \ |
| 173 | load_gs_index(0); \ |
| 174 | loadsegment(fs, 0); \ |
| 175 | } while (0) |
| 176 | #endif |
Jeremy Fitzhardinge | c3c2fee | 2008-06-25 00:19:07 -0400 | [diff] [blame] | 177 | |
Dave Hansen | a1ea1c0 | 2014-11-18 10:23:49 -0800 | [diff] [blame] | 178 | static inline void arch_dup_mmap(struct mm_struct *oldmm, |
| 179 | struct mm_struct *mm) |
| 180 | { |
| 181 | paravirt_arch_dup_mmap(oldmm, mm); |
| 182 | } |
| 183 | |
| 184 | static inline void arch_exit_mmap(struct mm_struct *mm) |
| 185 | { |
| 186 | paravirt_arch_exit_mmap(mm); |
| 187 | } |
| 188 | |
Dave Hansen | b0e9b09 | 2015-06-07 11:37:04 -0700 | [diff] [blame] | 189 | #ifdef CONFIG_X86_64 |
| 190 | static inline bool is_64bit_mm(struct mm_struct *mm) |
| 191 | { |
| 192 | return !config_enabled(CONFIG_IA32_EMULATION) || |
| 193 | !(mm->context.ia32_compat == TIF_IA32); |
| 194 | } |
| 195 | #else |
| 196 | static inline bool is_64bit_mm(struct mm_struct *mm) |
| 197 | { |
| 198 | return false; |
| 199 | } |
| 200 | #endif |
| 201 | |
Dave Hansen | fe3d197 | 2014-11-14 07:18:29 -0800 | [diff] [blame] | 202 | static inline void arch_bprm_mm_init(struct mm_struct *mm, |
| 203 | struct vm_area_struct *vma) |
| 204 | { |
| 205 | mpx_mm_init(mm); |
| 206 | } |
| 207 | |
Dave Hansen | 1de4fa1 | 2014-11-14 07:18:31 -0800 | [diff] [blame] | 208 | static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, |
| 209 | unsigned long start, unsigned long end) |
| 210 | { |
Dave Hansen | c922228 | 2015-01-08 14:30:21 -0800 | [diff] [blame] | 211 | /* |
| 212 | * mpx_notify_unmap() goes and reads a rarely-hot |
| 213 | * cacheline in the mm_struct. That can be expensive |
| 214 | * enough to be seen in profiles. |
| 215 | * |
| 216 | * The mpx_notify_unmap() call and its contents have been |
| 217 | * observed to affect munmap() performance on hardware |
| 218 | * where MPX is not present. |
| 219 | * |
| 220 | * The unlikely() optimizes for the fast case: no MPX |
| 221 | * in the CPU, or no MPX use in the process. Even if |
| 222 | * we get this wrong (in the unlikely event that MPX |
| 223 | * is widely enabled on some system) the overhead of |
| 224 | * MPX itself (reading bounds tables) is expected to |
| 225 | * overwhelm the overhead of getting this unlikely() |
| 226 | * consistently wrong. |
| 227 | */ |
| 228 | if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) |
| 229 | mpx_notify_unmap(mm, vma, start, end); |
Dave Hansen | 1de4fa1 | 2014-11-14 07:18:31 -0800 | [diff] [blame] | 230 | } |
| 231 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 232 | #endif /* _ASM_X86_MMU_CONTEXT_H */ |