blob: 804a3a6030ca046500e7a092cfa0a854be77796f [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_MMU_CONTEXT_H
2#define _ASM_X86_MMU_CONTEXT_H
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -04003
4#include <asm/desc.h>
Arun Sharma600634972011-07-26 16:09:06 -07005#include <linux/atomic.h>
Dave Hansend17d8f92014-07-31 08:40:59 -07006#include <linux/mm_types.h>
7
8#include <trace/events/tlb.h>
9
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040010#include <asm/pgalloc.h>
11#include <asm/tlbflush.h>
12#include <asm/paravirt.h>
Dave Hansenfe3d1972014-11-14 07:18:29 -080013#include <asm/mpx.h>
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040014#ifndef CONFIG_PARAVIRT
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040015static inline void paravirt_activate_mm(struct mm_struct *prev,
16 struct mm_struct *next)
17{
18}
19#endif /* !CONFIG_PARAVIRT */
20
Andy Lutomirski7911d3f2014-10-24 15:58:12 -070021#ifdef CONFIG_PERF_EVENTS
Andy Lutomirskia6673422014-10-24 15:58:13 -070022extern struct static_key rdpmc_always_available;
23
Andy Lutomirski7911d3f2014-10-24 15:58:12 -070024static inline void load_mm_cr4(struct mm_struct *mm)
25{
Peter Zijlstraa8335812015-07-09 19:23:38 +020026 if (static_key_false(&rdpmc_always_available) ||
Andy Lutomirskia6673422014-10-24 15:58:13 -070027 atomic_read(&mm->context.perf_rdpmc_allowed))
Andy Lutomirski7911d3f2014-10-24 15:58:12 -070028 cr4_set_bits(X86_CR4_PCE);
29 else
30 cr4_clear_bits(X86_CR4_PCE);
31}
32#else
33static inline void load_mm_cr4(struct mm_struct *mm) {}
34#endif
35
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040036/*
37 * Used for LDT copy/destruction.
38 */
39int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
40void destroy_context(struct mm_struct *mm);
41
Brian Gerst6826c8f2009-01-21 17:26:06 +090042
43static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
44{
45#ifdef CONFIG_SMP
Alex Shic6ae41e2012-05-11 15:35:27 +080046 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
47 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
Thomas Gleixner96a388d2007-10-11 11:20:03 +020048#endif
Brian Gerst6826c8f2009-01-21 17:26:06 +090049}
50
51static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
52 struct task_struct *tsk)
53{
54 unsigned cpu = smp_processor_id();
55
56 if (likely(prev != next)) {
Brian Gerst6826c8f2009-01-21 17:26:06 +090057#ifdef CONFIG_SMP
Alex Shic6ae41e2012-05-11 15:35:27 +080058 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
59 this_cpu_write(cpu_tlbstate.active_mm, next);
Brian Gerst6826c8f2009-01-21 17:26:06 +090060#endif
Rusty Russell78f1c4d2009-09-24 09:34:51 -060061 cpumask_set_cpu(cpu, mm_cpumask(next));
Brian Gerst6826c8f2009-01-21 17:26:06 +090062
63 /* Re-load page tables */
64 load_cr3(next->pgd);
Dave Hansend17d8f92014-07-31 08:40:59 -070065 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
Brian Gerst6826c8f2009-01-21 17:26:06 +090066
Rik van Riel8f898fb2013-07-31 22:14:21 -040067 /* Stop flush ipis for the previous mm */
Suresh Siddha831d52bc12011-02-03 12:20:04 -080068 cpumask_clear_cpu(cpu, mm_cpumask(prev));
69
Andy Lutomirski7911d3f2014-10-24 15:58:12 -070070 /* Load per-mm CR4 state */
71 load_mm_cr4(next);
72
Andy Lutomirskic4a7bba2014-10-06 12:36:47 -070073 /*
74 * Load the LDT, if the LDT is different.
75 *
Andy Lutomirski22c4bd92014-10-24 15:58:09 -070076 * It's possible that prev->context.ldt doesn't match
77 * the LDT register. This can happen if leave_mm(prev)
78 * was called and then modify_ldt changed
79 * prev->context.ldt but suppressed an IPI to this CPU.
80 * In this case, prev->context.ldt != NULL, because we
81 * never free an LDT while the mm still exists. That
82 * means that next->context.ldt != prev->context.ldt,
83 * because mms never share an LDT.
Andy Lutomirskic4a7bba2014-10-06 12:36:47 -070084 */
Brian Gerst6826c8f2009-01-21 17:26:06 +090085 if (unlikely(prev->context.ldt != next->context.ldt))
86 load_LDT_nolock(&next->context);
87 }
88#ifdef CONFIG_SMP
Rik van Riel8f898fb2013-07-31 22:14:21 -040089 else {
Alex Shic6ae41e2012-05-11 15:35:27 +080090 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
91 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
Brian Gerst6826c8f2009-01-21 17:26:06 +090092
Rik van Riel8f898fb2013-07-31 22:14:21 -040093 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
94 /*
95 * On established mms, the mm_cpumask is only changed
96 * from irq context, from ptep_clear_flush() while in
97 * lazy tlb mode, and here. Irqs are blocked during
98 * schedule, protecting us from simultaneous changes.
99 */
100 cpumask_set_cpu(cpu, mm_cpumask(next));
101 /*
102 * We were in lazy tlb mode and leave_mm disabled
Brian Gerst6826c8f2009-01-21 17:26:06 +0900103 * tlb flush IPI delivery. We must reload CR3
104 * to make sure to use no freed page tables.
105 */
106 load_cr3(next->pgd);
Dave Hansend17d8f92014-07-31 08:40:59 -0700107 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
Andy Lutomirski7911d3f2014-10-24 15:58:12 -0700108 load_mm_cr4(next);
Brian Gerst6826c8f2009-01-21 17:26:06 +0900109 load_LDT_nolock(&next->context);
110 }
111 }
112#endif
113}
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -0400114
115#define activate_mm(prev, next) \
116do { \
117 paravirt_activate_mm((prev), (next)); \
118 switch_mm((prev), (next), NULL); \
119} while (0);
120
Brian Gerst6826c8f2009-01-21 17:26:06 +0900121#ifdef CONFIG_X86_32
122#define deactivate_mm(tsk, mm) \
123do { \
Tejun Heoccbeed32009-02-09 22:17:40 +0900124 lazy_load_gs(0); \
Brian Gerst6826c8f2009-01-21 17:26:06 +0900125} while (0)
126#else
127#define deactivate_mm(tsk, mm) \
128do { \
129 load_gs_index(0); \
130 loadsegment(fs, 0); \
131} while (0)
132#endif
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -0400133
Dave Hansena1ea1c02014-11-18 10:23:49 -0800134static inline void arch_dup_mmap(struct mm_struct *oldmm,
135 struct mm_struct *mm)
136{
137 paravirt_arch_dup_mmap(oldmm, mm);
138}
139
140static inline void arch_exit_mmap(struct mm_struct *mm)
141{
142 paravirt_arch_exit_mmap(mm);
143}
144
Dave Hansenb0e9b092015-06-07 11:37:04 -0700145#ifdef CONFIG_X86_64
146static inline bool is_64bit_mm(struct mm_struct *mm)
147{
148 return !config_enabled(CONFIG_IA32_EMULATION) ||
149 !(mm->context.ia32_compat == TIF_IA32);
150}
151#else
152static inline bool is_64bit_mm(struct mm_struct *mm)
153{
154 return false;
155}
156#endif
157
Dave Hansenfe3d1972014-11-14 07:18:29 -0800158static inline void arch_bprm_mm_init(struct mm_struct *mm,
159 struct vm_area_struct *vma)
160{
161 mpx_mm_init(mm);
162}
163
Dave Hansen1de4fa12014-11-14 07:18:31 -0800164static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
165 unsigned long start, unsigned long end)
166{
Dave Hansenc9222282015-01-08 14:30:21 -0800167 /*
168 * mpx_notify_unmap() goes and reads a rarely-hot
169 * cacheline in the mm_struct. That can be expensive
170 * enough to be seen in profiles.
171 *
172 * The mpx_notify_unmap() call and its contents have been
173 * observed to affect munmap() performance on hardware
174 * where MPX is not present.
175 *
176 * The unlikely() optimizes for the fast case: no MPX
177 * in the CPU, or no MPX use in the process. Even if
178 * we get this wrong (in the unlikely event that MPX
179 * is widely enabled on some system) the overhead of
180 * MPX itself (reading bounds tables) is expected to
181 * overwhelm the overhead of getting this unlikely()
182 * consistently wrong.
183 */
184 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
185 mpx_notify_unmap(mm, vma, start, end);
Dave Hansen1de4fa12014-11-14 07:18:31 -0800186}
187
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700188#endif /* _ASM_X86_MMU_CONTEXT_H */