H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_MMU_CONTEXT_64_H |
| 2 | #define _ASM_X86_MMU_CONTEXT_64_H |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <asm/pda.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
| 7 | { |
Brian Gerst | e4b5939 | 2006-01-11 22:46:09 +0100 | [diff] [blame] | 8 | #ifdef CONFIG_SMP |
Joe Perches | c4fe760 | 2008-03-23 01:02:43 -0700 | [diff] [blame] | 9 | if (read_pda(mmu_state) == TLBSTATE_OK) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | write_pda(mmu_state, TLBSTATE_LAZY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #endif |
Brian Gerst | e4b5939 | 2006-01-11 22:46:09 +0100 | [diff] [blame] | 12 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
Joe Perches | c4fe760 | 2008-03-23 01:02:43 -0700 | [diff] [blame] | 14 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | struct task_struct *tsk) |
| 16 | { |
| 17 | unsigned cpu = smp_processor_id(); |
| 18 | if (likely(prev != next)) { |
| 19 | /* stop flush ipis for the previous mm */ |
Akinobu Mita | 3d1712c | 2006-03-24 03:15:11 -0800 | [diff] [blame] | 20 | cpu_clear(cpu, prev->cpu_vm_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #ifdef CONFIG_SMP |
| 22 | write_pda(mmu_state, TLBSTATE_OK); |
| 23 | write_pda(active_mm, next); |
| 24 | #endif |
Akinobu Mita | 3d1712c | 2006-03-24 03:15:11 -0800 | [diff] [blame] | 25 | cpu_set(cpu, next->cpu_vm_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | load_cr3(next->pgd); |
| 27 | |
Joe Perches | c4fe760 | 2008-03-23 01:02:43 -0700 | [diff] [blame] | 28 | if (unlikely(next->context.ldt != prev->context.ldt)) |
Glauber de Oliveira Costa | 881c297 | 2008-01-30 13:31:14 +0100 | [diff] [blame] | 29 | load_LDT_nolock(&next->context); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | } |
| 31 | #ifdef CONFIG_SMP |
| 32 | else { |
| 33 | write_pda(mmu_state, TLBSTATE_OK); |
| 34 | if (read_pda(active_mm) != next) |
Thomas Gleixner | 3abf024 | 2008-01-30 13:30:28 +0100 | [diff] [blame] | 35 | BUG(); |
Akinobu Mita | 3d1712c | 2006-03-24 03:15:11 -0800 | [diff] [blame] | 36 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { |
Joe Perches | c4fe760 | 2008-03-23 01:02:43 -0700 | [diff] [blame] | 37 | /* We were in lazy tlb mode and leave_mm disabled |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | * tlb flush IPI delivery. We must reload CR3 |
| 39 | * to make sure to use no freed page tables. |
| 40 | */ |
| 41 | load_cr3(next->pgd); |
Glauber de Oliveira Costa | 881c297 | 2008-01-30 13:31:14 +0100 | [diff] [blame] | 42 | load_LDT_nolock(&next->context); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | } |
| 44 | } |
| 45 | #endif |
| 46 | } |
| 47 | |
Joe Perches | c4fe760 | 2008-03-23 01:02:43 -0700 | [diff] [blame] | 48 | #define deactivate_mm(tsk, mm) \ |
| 49 | do { \ |
| 50 | load_gs_index(0); \ |
| 51 | asm volatile("movl %0,%%fs"::"r"(0)); \ |
| 52 | } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 54 | #endif /* _ASM_X86_MMU_CONTEXT_64_H */ |