blob: 4a2d4e0c18d99cf635b02820070330f82dccbe63 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_MMU_CONTEXT_H
2#define _ASM_X86_MMU_CONTEXT_H
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -04003
4#include <asm/desc.h>
5#include <asm/atomic.h>
6#include <asm/pgalloc.h>
7#include <asm/tlbflush.h>
8#include <asm/paravirt.h>
9#ifndef CONFIG_PARAVIRT
10#include <asm-generic/mm_hooks.h>
11
12static inline void paravirt_activate_mm(struct mm_struct *prev,
13 struct mm_struct *next)
14{
15}
16#endif /* !CONFIG_PARAVIRT */
17
18/*
19 * Used for LDT copy/destruction.
20 */
21int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
22void destroy_context(struct mm_struct *mm);
23
Brian Gerst6826c8f2009-01-21 17:26:06 +090024
25static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
26{
27#ifdef CONFIG_SMP
28 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
29 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
Thomas Gleixner96a388d2007-10-11 11:20:03 +020030#endif
Brian Gerst6826c8f2009-01-21 17:26:06 +090031}
32
33static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
34 struct task_struct *tsk)
35{
36 unsigned cpu = smp_processor_id();
37
38 if (likely(prev != next)) {
39 /* stop flush ipis for the previous mm */
Rusty Russell78f1c4d2009-09-24 09:34:51 -060040 cpumask_clear_cpu(cpu, mm_cpumask(prev));
Brian Gerst6826c8f2009-01-21 17:26:06 +090041#ifdef CONFIG_SMP
42 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
43 percpu_write(cpu_tlbstate.active_mm, next);
44#endif
Rusty Russell78f1c4d2009-09-24 09:34:51 -060045 cpumask_set_cpu(cpu, mm_cpumask(next));
Brian Gerst6826c8f2009-01-21 17:26:06 +090046
47 /* Re-load page tables */
48 load_cr3(next->pgd);
49
50 /*
51 * load the LDT, if the LDT is different:
52 */
53 if (unlikely(prev->context.ldt != next->context.ldt))
54 load_LDT_nolock(&next->context);
55 }
56#ifdef CONFIG_SMP
57 else {
58 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
59 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
60
Rusty Russell78f1c4d2009-09-24 09:34:51 -060061 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
Brian Gerst6826c8f2009-01-21 17:26:06 +090062 /* We were in lazy tlb mode and leave_mm disabled
63 * tlb flush IPI delivery. We must reload CR3
64 * to make sure to use no freed page tables.
65 */
66 load_cr3(next->pgd);
67 load_LDT_nolock(&next->context);
68 }
69 }
70#endif
71}
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040072
73#define activate_mm(prev, next) \
74do { \
75 paravirt_activate_mm((prev), (next)); \
76 switch_mm((prev), (next), NULL); \
77} while (0);
78
Brian Gerst6826c8f2009-01-21 17:26:06 +090079#ifdef CONFIG_X86_32
80#define deactivate_mm(tsk, mm) \
81do { \
Tejun Heoccbeed32009-02-09 22:17:40 +090082 lazy_load_gs(0); \
Brian Gerst6826c8f2009-01-21 17:26:06 +090083} while (0)
84#else
85#define deactivate_mm(tsk, mm) \
86do { \
87 load_gs_index(0); \
88 loadsegment(fs, 0); \
89} while (0)
90#endif
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040091
H. Peter Anvin1965aae2008-10-22 22:26:29 -070092#endif /* _ASM_X86_MMU_CONTEXT_H */