blob: 69021528b43c240c2c67c4c4ab86b579b40c4ae5 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_MMU_CONTEXT_H
2#define _ASM_X86_MMU_CONTEXT_H
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -04003
4#include <asm/desc.h>
Arun Sharma600634972011-07-26 16:09:06 -07005#include <linux/atomic.h>
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -04006#include <asm/pgalloc.h>
7#include <asm/tlbflush.h>
8#include <asm/paravirt.h>
9#ifndef CONFIG_PARAVIRT
10#include <asm-generic/mm_hooks.h>
11
12static inline void paravirt_activate_mm(struct mm_struct *prev,
13 struct mm_struct *next)
14{
15}
16#endif /* !CONFIG_PARAVIRT */
17
18/*
19 * Used for LDT copy/destruction.
20 */
21int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
22void destroy_context(struct mm_struct *mm);
23
Brian Gerst6826c8f2009-01-21 17:26:06 +090024
25static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
26{
27#ifdef CONFIG_SMP
28 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
29 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
Thomas Gleixner96a388d2007-10-11 11:20:03 +020030#endif
Brian Gerst6826c8f2009-01-21 17:26:06 +090031}
32
33static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
34 struct task_struct *tsk)
35{
36 unsigned cpu = smp_processor_id();
37
38 if (likely(prev != next)) {
Brian Gerst6826c8f2009-01-21 17:26:06 +090039#ifdef CONFIG_SMP
40 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
41 percpu_write(cpu_tlbstate.active_mm, next);
42#endif
Rusty Russell78f1c4d2009-09-24 09:34:51 -060043 cpumask_set_cpu(cpu, mm_cpumask(next));
Brian Gerst6826c8f2009-01-21 17:26:06 +090044
45 /* Re-load page tables */
46 load_cr3(next->pgd);
47
Suresh Siddha831d52b2011-02-03 12:20:04 -080048 /* stop flush ipis for the previous mm */
49 cpumask_clear_cpu(cpu, mm_cpumask(prev));
50
Brian Gerst6826c8f2009-01-21 17:26:06 +090051 /*
52 * load the LDT, if the LDT is different:
53 */
54 if (unlikely(prev->context.ldt != next->context.ldt))
55 load_LDT_nolock(&next->context);
56 }
57#ifdef CONFIG_SMP
58 else {
59 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
60 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
61
Rusty Russell78f1c4d2009-09-24 09:34:51 -060062 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
Brian Gerst6826c8f2009-01-21 17:26:06 +090063 /* We were in lazy tlb mode and leave_mm disabled
64 * tlb flush IPI delivery. We must reload CR3
65 * to make sure to use no freed page tables.
66 */
67 load_cr3(next->pgd);
68 load_LDT_nolock(&next->context);
69 }
70 }
71#endif
72}
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040073
74#define activate_mm(prev, next) \
75do { \
76 paravirt_activate_mm((prev), (next)); \
77 switch_mm((prev), (next), NULL); \
78} while (0);
79
Brian Gerst6826c8f2009-01-21 17:26:06 +090080#ifdef CONFIG_X86_32
81#define deactivate_mm(tsk, mm) \
82do { \
Tejun Heoccbeed32009-02-09 22:17:40 +090083 lazy_load_gs(0); \
Brian Gerst6826c8f2009-01-21 17:26:06 +090084} while (0)
85#else
86#define deactivate_mm(tsk, mm) \
87do { \
88 load_gs_index(0); \
89 loadsegment(fs, 0); \
90} while (0)
91#endif
Jeremy Fitzhardingec3c2fee2008-06-25 00:19:07 -040092
H. Peter Anvin1965aae2008-10-22 22:26:29 -070093#endif /* _ASM_X86_MMU_CONTEXT_H */