blob: 7e2aa23fccbf5f762f51be38e051273fde05897d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __X86_64_MMU_CONTEXT_H
2#define __X86_64_MMU_CONTEXT_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <asm/desc.h>
5#include <asm/atomic.h>
6#include <asm/pgalloc.h>
7#include <asm/pda.h>
8#include <asm/pgtable.h>
9#include <asm/tlbflush.h>
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +020010#include <asm-generic/mm_hooks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12/*
13 * possibly do the LDT unload here?
14 */
15int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
16void destroy_context(struct mm_struct *mm);
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
19{
Brian Gerste4b59392006-01-11 22:46:09 +010020#ifdef CONFIG_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 if (read_pda(mmu_state) == TLBSTATE_OK)
22 write_pda(mmu_state, TLBSTATE_LAZY);
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#endif
Brian Gerste4b59392006-01-11 22:46:09 +010024}
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Linus Torvalds1da177e2005-04-16 15:20:36 -070026static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
27 struct task_struct *tsk)
28{
29 unsigned cpu = smp_processor_id();
30 if (likely(prev != next)) {
31 /* stop flush ipis for the previous mm */
Akinobu Mita3d1712c2006-03-24 03:15:11 -080032 cpu_clear(cpu, prev->cpu_vm_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#ifdef CONFIG_SMP
34 write_pda(mmu_state, TLBSTATE_OK);
35 write_pda(active_mm, next);
36#endif
Akinobu Mita3d1712c2006-03-24 03:15:11 -080037 cpu_set(cpu, next->cpu_vm_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 load_cr3(next->pgd);
39
40 if (unlikely(next->context.ldt != prev->context.ldt))
Glauber de Oliveira Costa881c2972008-01-30 13:31:14 +010041 load_LDT_nolock(&next->context);
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 }
43#ifdef CONFIG_SMP
44 else {
45 write_pda(mmu_state, TLBSTATE_OK);
46 if (read_pda(active_mm) != next)
Thomas Gleixner3abf0242008-01-30 13:30:28 +010047 BUG();
Akinobu Mita3d1712c2006-03-24 03:15:11 -080048 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 /* We were in lazy tlb mode and leave_mm disabled
50 * tlb flush IPI delivery. We must reload CR3
51 * to make sure to use no freed page tables.
52 */
53 load_cr3(next->pgd);
Glauber de Oliveira Costa881c2972008-01-30 13:31:14 +010054 load_LDT_nolock(&next->context);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 }
56 }
57#endif
58}
59
60#define deactivate_mm(tsk,mm) do { \
61 load_gs_index(0); \
62 asm volatile("movl %0,%%fs"::"r"(0)); \
63} while(0)
64
65#define activate_mm(prev, next) \
66 switch_mm((prev),(next),NULL)
67
68
69#endif