blob: 52c18359f1dcaf68e093007e17a30a8f71acee37 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_MMU_CONTEXT_H
2#define _ASM_X86_MMU_CONTEXT_H
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -04003
4#include <asm/desc.h>
Arun Sharma600634972011-07-26 16:09:06 -07005#include <linux/atomic.h>
Dave Hansend17d8f92014-07-31 08:40:59 -07006#include <linux/mm_types.h>
7
8#include <trace/events/tlb.h>
9
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040010#include <asm/pgalloc.h>
11#include <asm/tlbflush.h>
12#include <asm/paravirt.h>
Dave Hansenfe3d1972014-11-14 07:18:29 -080013#include <asm/mpx.h>
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040014#ifndef CONFIG_PARAVIRT
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040015static inline void paravirt_activate_mm(struct mm_struct *prev,
16 struct mm_struct *next)
17{
18}
19#endif /* !CONFIG_PARAVIRT */
20
21/*
22 * Used for LDT copy/destruction.
23 */
24int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
25void destroy_context(struct mm_struct *mm);
26
Brian Gerst6826c8f2009-01-21 17:26:06 +090027
28static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
29{
30#ifdef CONFIG_SMP
Alex Shic6ae41e2012-05-11 15:35:27 +080031 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
32 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
Thomas Gleixner96a388d2007-10-11 11:20:03 +020033#endif
Brian Gerst6826c8f2009-01-21 17:26:06 +090034}
35
36static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
37 struct task_struct *tsk)
38{
39 unsigned cpu = smp_processor_id();
40
41 if (likely(prev != next)) {
Brian Gerst6826c8f2009-01-21 17:26:06 +090042#ifdef CONFIG_SMP
Alex Shic6ae41e2012-05-11 15:35:27 +080043 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
44 this_cpu_write(cpu_tlbstate.active_mm, next);
Brian Gerst6826c8f2009-01-21 17:26:06 +090045#endif
Rusty Russell78f1c4d2009-09-24 09:34:51 -060046 cpumask_set_cpu(cpu, mm_cpumask(next));
Brian Gerst6826c8f2009-01-21 17:26:06 +090047
48 /* Re-load page tables */
49 load_cr3(next->pgd);
Dave Hansend17d8f92014-07-31 08:40:59 -070050 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
Brian Gerst6826c8f2009-01-21 17:26:06 +090051
Rik van Riel8f898fb2013-07-31 22:14:21 -040052 /* Stop flush ipis for the previous mm */
Suresh Siddha831d52bc12011-02-03 12:20:04 -080053 cpumask_clear_cpu(cpu, mm_cpumask(prev));
54
Andy Lutomirskic4a7bba2014-10-06 12:36:47 -070055 /*
56 * Load the LDT, if the LDT is different.
57 *
Andy Lutomirski22c4bd92014-10-24 15:58:09 -070058 * It's possible that prev->context.ldt doesn't match
59 * the LDT register. This can happen if leave_mm(prev)
60 * was called and then modify_ldt changed
61 * prev->context.ldt but suppressed an IPI to this CPU.
62 * In this case, prev->context.ldt != NULL, because we
63 * never free an LDT while the mm still exists. That
64 * means that next->context.ldt != prev->context.ldt,
65 * because mms never share an LDT.
Andy Lutomirskic4a7bba2014-10-06 12:36:47 -070066 */
Brian Gerst6826c8f2009-01-21 17:26:06 +090067 if (unlikely(prev->context.ldt != next->context.ldt))
68 load_LDT_nolock(&next->context);
69 }
70#ifdef CONFIG_SMP
Rik van Riel8f898fb2013-07-31 22:14:21 -040071 else {
Alex Shic6ae41e2012-05-11 15:35:27 +080072 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
73 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
Brian Gerst6826c8f2009-01-21 17:26:06 +090074
Rik van Riel8f898fb2013-07-31 22:14:21 -040075 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
76 /*
77 * On established mms, the mm_cpumask is only changed
78 * from irq context, from ptep_clear_flush() while in
79 * lazy tlb mode, and here. Irqs are blocked during
80 * schedule, protecting us from simultaneous changes.
81 */
82 cpumask_set_cpu(cpu, mm_cpumask(next));
83 /*
84 * We were in lazy tlb mode and leave_mm disabled
Brian Gerst6826c8f2009-01-21 17:26:06 +090085 * tlb flush IPI delivery. We must reload CR3
86 * to make sure to use no freed page tables.
87 */
88 load_cr3(next->pgd);
Dave Hansend17d8f92014-07-31 08:40:59 -070089 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
Brian Gerst6826c8f2009-01-21 17:26:06 +090090 load_LDT_nolock(&next->context);
91 }
92 }
93#endif
94}
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040095
96#define activate_mm(prev, next) \
97do { \
98 paravirt_activate_mm((prev), (next)); \
99 switch_mm((prev), (next), NULL); \
100} while (0);
101
Brian Gerst6826c8f2009-01-21 17:26:06 +0900102#ifdef CONFIG_X86_32
103#define deactivate_mm(tsk, mm) \
104do { \
Tejun Heoccbeed32009-02-09 22:17:40 +0900105 lazy_load_gs(0); \
Brian Gerst6826c8f2009-01-21 17:26:06 +0900106} while (0)
107#else
108#define deactivate_mm(tsk, mm) \
109do { \
110 load_gs_index(0); \
111 loadsegment(fs, 0); \
112} while (0)
113#endif
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -0400114
Dave Hansena1ea1c02014-11-18 10:23:49 -0800115static inline void arch_dup_mmap(struct mm_struct *oldmm,
116 struct mm_struct *mm)
117{
118 paravirt_arch_dup_mmap(oldmm, mm);
119}
120
121static inline void arch_exit_mmap(struct mm_struct *mm)
122{
123 paravirt_arch_exit_mmap(mm);
124}
125
Dave Hansenfe3d1972014-11-14 07:18:29 -0800126static inline void arch_bprm_mm_init(struct mm_struct *mm,
127 struct vm_area_struct *vma)
128{
129 mpx_mm_init(mm);
130}
131
Dave Hansen1de4fa12014-11-14 07:18:31 -0800132static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
133 unsigned long start, unsigned long end)
134{
Dave Hansenc9222282015-01-08 14:30:21 -0800135 /*
136 * mpx_notify_unmap() goes and reads a rarely-hot
137 * cacheline in the mm_struct. That can be expensive
138 * enough to be seen in profiles.
139 *
140 * The mpx_notify_unmap() call and its contents have been
141 * observed to affect munmap() performance on hardware
142 * where MPX is not present.
143 *
144 * The unlikely() optimizes for the fast case: no MPX
145 * in the CPU, or no MPX use in the process. Even if
146 * we get this wrong (in the unlikely event that MPX
147 * is widely enabled on some system) the overhead of
148 * MPX itself (reading bounds tables) is expected to
149 * overwhelm the overhead of getting this unlikely()
150 * consistently wrong.
151 */
152 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
153 mpx_notify_unmap(mm, vma, start, end);
Dave Hansen1de4fa12014-11-14 07:18:31 -0800154}
155
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700156#endif /* _ASM_X86_MMU_CONTEXT_H */