blob: 0b0ba91ff1efc5156498c1d268cd202630a97291 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_MMU_CONTEXT_H
2#define _ASM_X86_MMU_CONTEXT_H
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -04003
4#include <asm/desc.h>
Arun Sharma600634972011-07-26 16:09:06 -07005#include <linux/atomic.h>
Dave Hansend17d8f92014-07-31 08:40:59 -07006#include <linux/mm_types.h>
7
8#include <trace/events/tlb.h>
9
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040010#include <asm/pgalloc.h>
11#include <asm/tlbflush.h>
12#include <asm/paravirt.h>
Dave Hansenfe3d1972014-11-14 07:18:29 -080013#include <asm/mpx.h>
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040014#ifndef CONFIG_PARAVIRT
15#include <asm-generic/mm_hooks.h>
16
17static inline void paravirt_activate_mm(struct mm_struct *prev,
18 struct mm_struct *next)
19{
20}
21#endif /* !CONFIG_PARAVIRT */
22
23/*
24 * Used for LDT copy/destruction.
25 */
26int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
27void destroy_context(struct mm_struct *mm);
28
Brian Gerst6826c8f2009-01-21 17:26:06 +090029
30static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
31{
32#ifdef CONFIG_SMP
Alex Shic6ae41e2012-05-11 15:35:27 +080033 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
34 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
Thomas Gleixner96a388d2007-10-11 11:20:03 +020035#endif
Brian Gerst6826c8f2009-01-21 17:26:06 +090036}
37
38static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
39 struct task_struct *tsk)
40{
41 unsigned cpu = smp_processor_id();
42
43 if (likely(prev != next)) {
Brian Gerst6826c8f2009-01-21 17:26:06 +090044#ifdef CONFIG_SMP
Alex Shic6ae41e2012-05-11 15:35:27 +080045 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
46 this_cpu_write(cpu_tlbstate.active_mm, next);
Brian Gerst6826c8f2009-01-21 17:26:06 +090047#endif
Rusty Russell78f1c4d2009-09-24 09:34:51 -060048 cpumask_set_cpu(cpu, mm_cpumask(next));
Brian Gerst6826c8f2009-01-21 17:26:06 +090049
50 /* Re-load page tables */
51 load_cr3(next->pgd);
Dave Hansend17d8f92014-07-31 08:40:59 -070052 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
Brian Gerst6826c8f2009-01-21 17:26:06 +090053
Rik van Riel8f898fb2013-07-31 22:14:21 -040054 /* Stop flush ipis for the previous mm */
Suresh Siddha831d52bc12011-02-03 12:20:04 -080055 cpumask_clear_cpu(cpu, mm_cpumask(prev));
56
Rik van Riel8f898fb2013-07-31 22:14:21 -040057 /* Load the LDT, if the LDT is different: */
Brian Gerst6826c8f2009-01-21 17:26:06 +090058 if (unlikely(prev->context.ldt != next->context.ldt))
59 load_LDT_nolock(&next->context);
60 }
61#ifdef CONFIG_SMP
Rik van Riel8f898fb2013-07-31 22:14:21 -040062 else {
Alex Shic6ae41e2012-05-11 15:35:27 +080063 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
64 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
Brian Gerst6826c8f2009-01-21 17:26:06 +090065
Rik van Riel8f898fb2013-07-31 22:14:21 -040066 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
67 /*
68 * On established mms, the mm_cpumask is only changed
69 * from irq context, from ptep_clear_flush() while in
70 * lazy tlb mode, and here. Irqs are blocked during
71 * schedule, protecting us from simultaneous changes.
72 */
73 cpumask_set_cpu(cpu, mm_cpumask(next));
74 /*
75 * We were in lazy tlb mode and leave_mm disabled
Brian Gerst6826c8f2009-01-21 17:26:06 +090076 * tlb flush IPI delivery. We must reload CR3
77 * to make sure to use no freed page tables.
78 */
79 load_cr3(next->pgd);
Dave Hansend17d8f92014-07-31 08:40:59 -070080 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
Brian Gerst6826c8f2009-01-21 17:26:06 +090081 load_LDT_nolock(&next->context);
82 }
83 }
84#endif
85}
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040086
87#define activate_mm(prev, next) \
88do { \
89 paravirt_activate_mm((prev), (next)); \
90 switch_mm((prev), (next), NULL); \
91} while (0);
92
Brian Gerst6826c8f2009-01-21 17:26:06 +090093#ifdef CONFIG_X86_32
94#define deactivate_mm(tsk, mm) \
95do { \
Tejun Heoccbeed32009-02-09 22:17:40 +090096 lazy_load_gs(0); \
Brian Gerst6826c8f2009-01-21 17:26:06 +090097} while (0)
98#else
99#define deactivate_mm(tsk, mm) \
100do { \
101 load_gs_index(0); \
102 loadsegment(fs, 0); \
103} while (0)
104#endif
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -0400105
Dave Hansenfe3d1972014-11-14 07:18:29 -0800106static inline void arch_bprm_mm_init(struct mm_struct *mm,
107 struct vm_area_struct *vma)
108{
109 mpx_mm_init(mm);
110}
111
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700112#endif /* _ASM_X86_MMU_CONTEXT_H */