H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_TLBFLUSH_H |
| 2 | #define _ASM_X86_TLBFLUSH_H |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 3 | |
| 4 | #include <linux/mm.h> |
| 5 | #include <linux/sched.h> |
| 6 | |
| 7 | #include <asm/processor.h> |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 8 | #include <asm/special_insns.h> |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 9 | |
| 10 | #ifdef CONFIG_PARAVIRT |
| 11 | #include <asm/paravirt.h> |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 12 | #else |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 13 | #define __flush_tlb() __native_flush_tlb() |
| 14 | #define __flush_tlb_global() __native_flush_tlb_global() |
| 15 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 16 | #endif |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 17 | |
| 18 | static inline void __native_flush_tlb(void) |
| 19 | { |
Chris Wright | d7285c6 | 2009-04-23 10:21:38 -0700 | [diff] [blame] | 20 | native_write_cr3(native_read_cr3()); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 21 | } |
| 22 | |
Fenghua Yu | 086fc8f | 2012-12-20 23:44:27 -0800 | [diff] [blame^] | 23 | static inline void __native_flush_tlb_global_irq_disabled(void) |
| 24 | { |
| 25 | unsigned long cr4; |
| 26 | |
| 27 | cr4 = native_read_cr4(); |
| 28 | /* clear PGE */ |
| 29 | native_write_cr4(cr4 & ~X86_CR4_PGE); |
| 30 | /* write old PGE again and flush TLBs */ |
| 31 | native_write_cr4(cr4); |
| 32 | } |
| 33 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 34 | static inline void __native_flush_tlb_global(void) |
| 35 | { |
Ingo Molnar | b1979a5 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 36 | unsigned long flags; |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 37 | |
Ingo Molnar | b1979a5 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 38 | /* |
| 39 | * Read-modify-write to CR4 - protect it from preemption and |
| 40 | * from interrupts. (Use the raw variant because this code can |
| 41 | * be called from deep inside debugging code.) |
| 42 | */ |
| 43 | raw_local_irq_save(flags); |
| 44 | |
Fenghua Yu | 086fc8f | 2012-12-20 23:44:27 -0800 | [diff] [blame^] | 45 | __native_flush_tlb_global_irq_disabled(); |
Ingo Molnar | b1979a5 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 46 | |
| 47 | raw_local_irq_restore(flags); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 48 | } |
| 49 | |
| 50 | static inline void __native_flush_tlb_single(unsigned long addr) |
| 51 | { |
Joe Perches | 94cf8de | 2008-03-23 01:03:45 -0700 | [diff] [blame] | 52 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 53 | } |
| 54 | |
| 55 | static inline void __flush_tlb_all(void) |
| 56 | { |
| 57 | if (cpu_has_pge) |
| 58 | __flush_tlb_global(); |
| 59 | else |
| 60 | __flush_tlb(); |
| 61 | } |
| 62 | |
| 63 | static inline void __flush_tlb_one(unsigned long addr) |
| 64 | { |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 65 | __flush_tlb_single(addr); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 66 | } |
| 67 | |
Alex Shi | 3e7f3db | 2012-05-10 18:01:59 +0800 | [diff] [blame] | 68 | #define TLB_FLUSH_ALL -1UL |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 69 | |
| 70 | /* |
| 71 | * TLB flushing: |
| 72 | * |
| 73 | * - flush_tlb() flushes the current mm struct TLBs |
| 74 | * - flush_tlb_all() flushes all processes TLBs |
| 75 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
| 76 | * - flush_tlb_page(vma, vmaddr) flushes one page |
| 77 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
| 78 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 79 | * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 80 | * |
| 81 | * ..but the i386 has somewhat limited tlb flushing capabilities, |
| 82 | * and page-granular flushes are available only on i486 and up. |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 83 | */ |
| 84 | |
| 85 | #ifndef CONFIG_SMP |
| 86 | |
| 87 | #define flush_tlb() __flush_tlb() |
| 88 | #define flush_tlb_all() __flush_tlb_all() |
| 89 | #define local_flush_tlb() __flush_tlb() |
| 90 | |
| 91 | static inline void flush_tlb_mm(struct mm_struct *mm) |
| 92 | { |
| 93 | if (mm == current->active_mm) |
| 94 | __flush_tlb(); |
| 95 | } |
| 96 | |
| 97 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
| 98 | unsigned long addr) |
| 99 | { |
| 100 | if (vma->vm_mm == current->active_mm) |
| 101 | __flush_tlb_one(addr); |
| 102 | } |
| 103 | |
| 104 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
| 105 | unsigned long start, unsigned long end) |
| 106 | { |
| 107 | if (vma->vm_mm == current->active_mm) |
| 108 | __flush_tlb(); |
| 109 | } |
| 110 | |
Alex Shi | 7efa1c8 | 2012-07-20 09:18:23 +0800 | [diff] [blame] | 111 | static inline void flush_tlb_mm_range(struct mm_struct *mm, |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 112 | unsigned long start, unsigned long end, unsigned long vmflag) |
| 113 | { |
Alex Shi | 7efa1c8 | 2012-07-20 09:18:23 +0800 | [diff] [blame] | 114 | if (mm == current->active_mm) |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 115 | __flush_tlb(); |
| 116 | } |
| 117 | |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 118 | static inline void native_flush_tlb_others(const struct cpumask *cpumask, |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 119 | struct mm_struct *mm, |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 120 | unsigned long start, |
| 121 | unsigned long end) |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 122 | { |
| 123 | } |
| 124 | |
Alex Nixon | 913da64 | 2008-09-03 14:30:23 +0100 | [diff] [blame] | 125 | static inline void reset_lazy_tlbstate(void) |
| 126 | { |
| 127 | } |
| 128 | |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 129 | static inline void flush_tlb_kernel_range(unsigned long start, |
| 130 | unsigned long end) |
| 131 | { |
| 132 | flush_tlb_all(); |
| 133 | } |
| 134 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 135 | #else /* SMP */ |
| 136 | |
| 137 | #include <asm/smp.h> |
| 138 | |
| 139 | #define local_flush_tlb() __flush_tlb() |
| 140 | |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 141 | #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) |
| 142 | |
| 143 | #define flush_tlb_range(vma, start, end) \ |
| 144 | flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) |
| 145 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 146 | extern void flush_tlb_all(void); |
| 147 | extern void flush_tlb_current_task(void); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 148 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 149 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
| 150 | unsigned long end, unsigned long vmflag); |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 151 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 152 | |
| 153 | #define flush_tlb() flush_tlb_current_task() |
| 154 | |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 155 | void native_flush_tlb_others(const struct cpumask *cpumask, |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 156 | struct mm_struct *mm, |
| 157 | unsigned long start, unsigned long end); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 158 | |
| 159 | #define TLBSTATE_OK 1 |
| 160 | #define TLBSTATE_LAZY 2 |
| 161 | |
Joe Perches | 94cf8de | 2008-03-23 01:03:45 -0700 | [diff] [blame] | 162 | struct tlb_state { |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 163 | struct mm_struct *active_mm; |
| 164 | int state; |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 165 | }; |
David Howells | 9b8de74 | 2009-04-21 23:00:24 +0100 | [diff] [blame] | 166 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); |
Alex Nixon | 913da64 | 2008-09-03 14:30:23 +0100 | [diff] [blame] | 167 | |
Alex Nixon | 913da64 | 2008-09-03 14:30:23 +0100 | [diff] [blame] | 168 | static inline void reset_lazy_tlbstate(void) |
| 169 | { |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 170 | this_cpu_write(cpu_tlbstate.state, 0); |
| 171 | this_cpu_write(cpu_tlbstate.active_mm, &init_mm); |
Alex Nixon | 913da64 | 2008-09-03 14:30:23 +0100 | [diff] [blame] | 172 | } |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 173 | |
| 174 | #endif /* SMP */ |
| 175 | |
| 176 | #ifndef CONFIG_PARAVIRT |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 177 | #define flush_tlb_others(mask, mm, start, end) \ |
| 178 | native_flush_tlb_others(mask, mm, start, end) |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 179 | #endif |
| 180 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 181 | #endif /* _ASM_X86_TLBFLUSH_H */ |