H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_TLBFLUSH_H |
| 2 | #define _ASM_X86_TLBFLUSH_H |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 3 | |
| 4 | #include <linux/mm.h> |
| 5 | #include <linux/sched.h> |
| 6 | |
| 7 | #include <asm/processor.h> |
Borislav Petkov | cd4d09e | 2016-01-26 22:12:04 +0100 | [diff] [blame] | 8 | #include <asm/cpufeature.h> |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 9 | #include <asm/special_insns.h> |
Andy Lutomirski | ce4a4e56 | 2017-05-28 10:00:14 -0700 | [diff] [blame^] | 10 | #include <asm/smp.h> |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 11 | |
Andy Lutomirski | 060a402 | 2016-01-29 11:42:57 -0800 | [diff] [blame] | 12 | static inline void __invpcid(unsigned long pcid, unsigned long addr, |
| 13 | unsigned long type) |
| 14 | { |
Borislav Petkov | e2c7698c | 2016-02-10 15:51:16 +0100 | [diff] [blame] | 15 | struct { u64 d[2]; } desc = { { pcid, addr } }; |
Andy Lutomirski | 060a402 | 2016-01-29 11:42:57 -0800 | [diff] [blame] | 16 | |
| 17 | /* |
| 18 | * The memory clobber is because the whole point is to invalidate |
| 19 | * stale TLB entries and, especially if we're flushing global |
| 20 | * mappings, we don't want the compiler to reorder any subsequent |
| 21 | * memory accesses before the TLB flush. |
| 22 | * |
| 23 | * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and |
| 24 | * invpcid (%rcx), %rax in long mode. |
| 25 | */ |
| 26 | asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" |
Borislav Petkov | e2c7698c | 2016-02-10 15:51:16 +0100 | [diff] [blame] | 27 | : : "m" (desc), "a" (type), "c" (&desc) : "memory"); |
Andy Lutomirski | 060a402 | 2016-01-29 11:42:57 -0800 | [diff] [blame] | 28 | } |
| 29 | |
| 30 | #define INVPCID_TYPE_INDIV_ADDR 0 |
| 31 | #define INVPCID_TYPE_SINGLE_CTXT 1 |
| 32 | #define INVPCID_TYPE_ALL_INCL_GLOBAL 2 |
| 33 | #define INVPCID_TYPE_ALL_NON_GLOBAL 3 |
| 34 | |
| 35 | /* Flush all mappings for a given pcid and addr, not including globals. */ |
| 36 | static inline void invpcid_flush_one(unsigned long pcid, |
| 37 | unsigned long addr) |
| 38 | { |
| 39 | __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); |
| 40 | } |
| 41 | |
| 42 | /* Flush all mappings for a given PCID, not including globals. */ |
| 43 | static inline void invpcid_flush_single_context(unsigned long pcid) |
| 44 | { |
| 45 | __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); |
| 46 | } |
| 47 | |
| 48 | /* Flush all mappings, including globals, for all PCIDs. */ |
| 49 | static inline void invpcid_flush_all(void) |
| 50 | { |
| 51 | __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); |
| 52 | } |
| 53 | |
| 54 | /* Flush all mappings for all PCIDs except globals. */ |
| 55 | static inline void invpcid_flush_all_nonglobals(void) |
| 56 | { |
| 57 | __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); |
| 58 | } |
| 59 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 60 | #ifdef CONFIG_PARAVIRT |
| 61 | #include <asm/paravirt.h> |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 62 | #else |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 63 | #define __flush_tlb() __native_flush_tlb() |
| 64 | #define __flush_tlb_global() __native_flush_tlb_global() |
| 65 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 66 | #endif |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 67 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 68 | struct tlb_state { |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 69 | struct mm_struct *active_mm; |
| 70 | int state; |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 71 | |
| 72 | /* |
| 73 | * Access to this CR4 shadow and to H/W CR4 is protected by |
| 74 | * disabling interrupts when modifying either one. |
| 75 | */ |
| 76 | unsigned long cr4; |
| 77 | }; |
| 78 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); |
| 79 | |
| 80 | /* Initialize cr4 shadow for this CPU. */ |
| 81 | static inline void cr4_init_shadow(void) |
| 82 | { |
Andy Lutomirski | 1ef55be1 | 2016-09-29 12:48:12 -0700 | [diff] [blame] | 83 | this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 84 | } |
| 85 | |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 86 | /* Set in this cpu's CR4. */ |
| 87 | static inline void cr4_set_bits(unsigned long mask) |
| 88 | { |
| 89 | unsigned long cr4; |
| 90 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 91 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 92 | if ((cr4 | mask) != cr4) { |
| 93 | cr4 |= mask; |
| 94 | this_cpu_write(cpu_tlbstate.cr4, cr4); |
| 95 | __write_cr4(cr4); |
| 96 | } |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 97 | } |
| 98 | |
| 99 | /* Clear in this cpu's CR4. */ |
| 100 | static inline void cr4_clear_bits(unsigned long mask) |
| 101 | { |
| 102 | unsigned long cr4; |
| 103 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 104 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 105 | if ((cr4 & ~mask) != cr4) { |
| 106 | cr4 &= ~mask; |
| 107 | this_cpu_write(cpu_tlbstate.cr4, cr4); |
| 108 | __write_cr4(cr4); |
| 109 | } |
| 110 | } |
| 111 | |
Thomas Gleixner | 5a92015 | 2017-02-14 00:11:04 -0800 | [diff] [blame] | 112 | static inline void cr4_toggle_bits(unsigned long mask) |
| 113 | { |
| 114 | unsigned long cr4; |
| 115 | |
| 116 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
| 117 | cr4 ^= mask; |
| 118 | this_cpu_write(cpu_tlbstate.cr4, cr4); |
| 119 | __write_cr4(cr4); |
| 120 | } |
| 121 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 122 | /* Read the CR4 shadow. */ |
| 123 | static inline unsigned long cr4_read_shadow(void) |
| 124 | { |
| 125 | return this_cpu_read(cpu_tlbstate.cr4); |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | /* |
| 129 | * Save some of cr4 feature set we're using (e.g. Pentium 4MB |
| 130 | * enable and PPro Global page enable), so that any CPU's that boot |
| 131 | * up after us can get the correct flags. This should only be used |
| 132 | * during boot on the boot cpu. |
| 133 | */ |
| 134 | extern unsigned long mmu_cr4_features; |
| 135 | extern u32 *trampoline_cr4_features; |
| 136 | |
| 137 | static inline void cr4_set_bits_and_update_boot(unsigned long mask) |
| 138 | { |
| 139 | mmu_cr4_features |= mask; |
| 140 | if (trampoline_cr4_features) |
| 141 | *trampoline_cr4_features = mmu_cr4_features; |
| 142 | cr4_set_bits(mask); |
| 143 | } |
| 144 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 145 | static inline void __native_flush_tlb(void) |
| 146 | { |
Sebastian Andrzej Siewior | 5cf0791 | 2016-08-05 15:37:39 +0200 | [diff] [blame] | 147 | /* |
| 148 | * If current->mm == NULL then we borrow a mm which may change during a |
| 149 | * task switch and therefore we must not be preempted while we write CR3 |
| 150 | * back: |
| 151 | */ |
| 152 | preempt_disable(); |
Chris Wright | d7285c6 | 2009-04-23 10:21:38 -0700 | [diff] [blame] | 153 | native_write_cr3(native_read_cr3()); |
Sebastian Andrzej Siewior | 5cf0791 | 2016-08-05 15:37:39 +0200 | [diff] [blame] | 154 | preempt_enable(); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 155 | } |
| 156 | |
Fenghua Yu | 086fc8f | 2012-12-20 23:44:27 -0800 | [diff] [blame] | 157 | static inline void __native_flush_tlb_global_irq_disabled(void) |
| 158 | { |
| 159 | unsigned long cr4; |
| 160 | |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 161 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
Fenghua Yu | 086fc8f | 2012-12-20 23:44:27 -0800 | [diff] [blame] | 162 | /* clear PGE */ |
| 163 | native_write_cr4(cr4 & ~X86_CR4_PGE); |
| 164 | /* write old PGE again and flush TLBs */ |
| 165 | native_write_cr4(cr4); |
| 166 | } |
| 167 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 168 | static inline void __native_flush_tlb_global(void) |
| 169 | { |
Ingo Molnar | b1979a5 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 170 | unsigned long flags; |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 171 | |
Andy Lutomirski | d8bced7 | 2016-01-29 11:42:59 -0800 | [diff] [blame] | 172 | if (static_cpu_has(X86_FEATURE_INVPCID)) { |
| 173 | /* |
| 174 | * Using INVPCID is considerably faster than a pair of writes |
| 175 | * to CR4 sandwiched inside an IRQ flag save/restore. |
| 176 | */ |
| 177 | invpcid_flush_all(); |
| 178 | return; |
| 179 | } |
| 180 | |
Ingo Molnar | b1979a5 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 181 | /* |
| 182 | * Read-modify-write to CR4 - protect it from preemption and |
| 183 | * from interrupts. (Use the raw variant because this code can |
| 184 | * be called from deep inside debugging code.) |
| 185 | */ |
| 186 | raw_local_irq_save(flags); |
| 187 | |
Fenghua Yu | 086fc8f | 2012-12-20 23:44:27 -0800 | [diff] [blame] | 188 | __native_flush_tlb_global_irq_disabled(); |
Ingo Molnar | b1979a5 | 2008-05-12 21:21:15 +0200 | [diff] [blame] | 189 | |
| 190 | raw_local_irq_restore(flags); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 191 | } |
| 192 | |
| 193 | static inline void __native_flush_tlb_single(unsigned long addr) |
| 194 | { |
Joe Perches | 94cf8de | 2008-03-23 01:03:45 -0700 | [diff] [blame] | 195 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 196 | } |
| 197 | |
| 198 | static inline void __flush_tlb_all(void) |
| 199 | { |
Daniel Borkmann | 2c4ea6e | 2017-03-11 01:31:19 +0100 | [diff] [blame] | 200 | if (boot_cpu_has(X86_FEATURE_PGE)) |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 201 | __flush_tlb_global(); |
| 202 | else |
| 203 | __flush_tlb(); |
| 204 | } |
| 205 | |
| 206 | static inline void __flush_tlb_one(unsigned long addr) |
| 207 | { |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 208 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); |
Michael Wang | e8747f1 | 2013-06-04 14:28:18 +0800 | [diff] [blame] | 209 | __flush_tlb_single(addr); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 210 | } |
| 211 | |
Alex Shi | 3e7f3db | 2012-05-10 18:01:59 +0800 | [diff] [blame] | 212 | #define TLB_FLUSH_ALL -1UL |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 213 | |
| 214 | /* |
| 215 | * TLB flushing: |
| 216 | * |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 217 | * - flush_tlb_all() flushes all processes TLBs |
| 218 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
| 219 | * - flush_tlb_page(vma, vmaddr) flushes one page |
| 220 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
| 221 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 222 | * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 223 | * |
| 224 | * ..but the i386 has somewhat limited tlb flushing capabilities, |
| 225 | * and page-granular flushes are available only on i486 and up. |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 226 | */ |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 227 | struct flush_tlb_info { |
| 228 | struct mm_struct *mm; |
| 229 | unsigned long start; |
| 230 | unsigned long end; |
| 231 | }; |
| 232 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 233 | #define local_flush_tlb() __flush_tlb() |
| 234 | |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 235 | #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) |
| 236 | |
| 237 | #define flush_tlb_range(vma, start, end) \ |
| 238 | flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) |
| 239 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 240 | extern void flush_tlb_all(void); |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 241 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
| 242 | unsigned long end, unsigned long vmflag); |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 243 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 244 | |
Andy Lutomirski | ca6c99c0 | 2017-05-22 15:30:01 -0700 | [diff] [blame] | 245 | static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) |
| 246 | { |
| 247 | flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE); |
| 248 | } |
| 249 | |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 250 | void native_flush_tlb_others(const struct cpumask *cpumask, |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 251 | const struct flush_tlb_info *info); |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 252 | |
| 253 | #define TLBSTATE_OK 1 |
| 254 | #define TLBSTATE_LAZY 2 |
| 255 | |
Alex Nixon | 913da64 | 2008-09-03 14:30:23 +0100 | [diff] [blame] | 256 | static inline void reset_lazy_tlbstate(void) |
| 257 | { |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 258 | this_cpu_write(cpu_tlbstate.state, 0); |
| 259 | this_cpu_write(cpu_tlbstate.active_mm, &init_mm); |
Alex Nixon | 913da64 | 2008-09-03 14:30:23 +0100 | [diff] [blame] | 260 | } |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 261 | |
Andy Lutomirski | e73ad5f | 2017-05-22 15:30:03 -0700 | [diff] [blame] | 262 | static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, |
| 263 | struct mm_struct *mm) |
| 264 | { |
| 265 | cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); |
| 266 | } |
| 267 | |
| 268 | extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); |
| 269 | |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 270 | #ifndef CONFIG_PARAVIRT |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 271 | #define flush_tlb_others(mask, info) \ |
| 272 | native_flush_tlb_others(mask, info) |
Thomas Gleixner | d291cf8 | 2008-01-30 13:30:35 +0100 | [diff] [blame] | 273 | #endif |
| 274 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 275 | #endif /* _ASM_X86_TLBFLUSH_H */ |