blob: b9db0f8fef552a6b21eeb6aa78338f9a7f696055 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_TLBFLUSH_H
2#define _ASM_X86_TLBFLUSH_H
Thomas Gleixnerd291cf82008-01-30 13:30:35 +01003
4#include <linux/mm.h>
5#include <linux/sched.h>
6
7#include <asm/processor.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +01008#include <asm/cpufeature.h>
David Howellsf05e7982012-03-28 18:11:12 +01009#include <asm/special_insns.h>
Thomas Gleixnerd291cf82008-01-30 13:30:35 +010010
Andy Lutomirski060a4022016-01-29 11:42:57 -080011static inline void __invpcid(unsigned long pcid, unsigned long addr,
12 unsigned long type)
13{
Borislav Petkove2c7698c2016-02-10 15:51:16 +010014 struct { u64 d[2]; } desc = { { pcid, addr } };
Andy Lutomirski060a4022016-01-29 11:42:57 -080015
16 /*
17 * The memory clobber is because the whole point is to invalidate
18 * stale TLB entries and, especially if we're flushing global
19 * mappings, we don't want the compiler to reorder any subsequent
20 * memory accesses before the TLB flush.
21 *
22 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
23 * invpcid (%rcx), %rax in long mode.
24 */
25 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
Borislav Petkove2c7698c2016-02-10 15:51:16 +010026 : : "m" (desc), "a" (type), "c" (&desc) : "memory");
Andy Lutomirski060a4022016-01-29 11:42:57 -080027}
28
29#define INVPCID_TYPE_INDIV_ADDR 0
30#define INVPCID_TYPE_SINGLE_CTXT 1
31#define INVPCID_TYPE_ALL_INCL_GLOBAL 2
32#define INVPCID_TYPE_ALL_NON_GLOBAL 3
33
34/* Flush all mappings for a given pcid and addr, not including globals. */
35static inline void invpcid_flush_one(unsigned long pcid,
36 unsigned long addr)
37{
38 __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
39}
40
41/* Flush all mappings for a given PCID, not including globals. */
42static inline void invpcid_flush_single_context(unsigned long pcid)
43{
44 __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
45}
46
47/* Flush all mappings, including globals, for all PCIDs. */
48static inline void invpcid_flush_all(void)
49{
50 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
51}
52
53/* Flush all mappings for all PCIDs except globals. */
54static inline void invpcid_flush_all_nonglobals(void)
55{
56 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
57}
58
Thomas Gleixnerd291cf82008-01-30 13:30:35 +010059#ifdef CONFIG_PARAVIRT
60#include <asm/paravirt.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +020061#else
Thomas Gleixnerd291cf82008-01-30 13:30:35 +010062#define __flush_tlb() __native_flush_tlb()
63#define __flush_tlb_global() __native_flush_tlb_global()
64#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
Thomas Gleixner96a388d2007-10-11 11:20:03 +020065#endif
Thomas Gleixnerd291cf82008-01-30 13:30:35 +010066
Andy Lutomirski1e02ce42014-10-24 15:58:08 -070067struct tlb_state {
68#ifdef CONFIG_SMP
69 struct mm_struct *active_mm;
70 int state;
71#endif
72
73 /*
74 * Access to this CR4 shadow and to H/W CR4 is protected by
75 * disabling interrupts when modifying either one.
76 */
77 unsigned long cr4;
78};
79DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
80
81/* Initialize cr4 shadow for this CPU. */
82static inline void cr4_init_shadow(void)
83{
Andy Lutomirski1ef55be12016-09-29 12:48:12 -070084 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
Andy Lutomirski1e02ce42014-10-24 15:58:08 -070085}
86
Andy Lutomirski375074c2014-10-24 15:58:07 -070087/* Set in this cpu's CR4. */
88static inline void cr4_set_bits(unsigned long mask)
89{
90 unsigned long cr4;
91
Andy Lutomirski1e02ce42014-10-24 15:58:08 -070092 cr4 = this_cpu_read(cpu_tlbstate.cr4);
93 if ((cr4 | mask) != cr4) {
94 cr4 |= mask;
95 this_cpu_write(cpu_tlbstate.cr4, cr4);
96 __write_cr4(cr4);
97 }
Andy Lutomirski375074c2014-10-24 15:58:07 -070098}
99
100/* Clear in this cpu's CR4. */
101static inline void cr4_clear_bits(unsigned long mask)
102{
103 unsigned long cr4;
104
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700105 cr4 = this_cpu_read(cpu_tlbstate.cr4);
106 if ((cr4 & ~mask) != cr4) {
107 cr4 &= ~mask;
108 this_cpu_write(cpu_tlbstate.cr4, cr4);
109 __write_cr4(cr4);
110 }
111}
112
Thomas Gleixner5a920152017-02-14 00:11:04 -0800113static inline void cr4_toggle_bits(unsigned long mask)
114{
115 unsigned long cr4;
116
117 cr4 = this_cpu_read(cpu_tlbstate.cr4);
118 cr4 ^= mask;
119 this_cpu_write(cpu_tlbstate.cr4, cr4);
120 __write_cr4(cr4);
121}
122
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700123/* Read the CR4 shadow. */
124static inline unsigned long cr4_read_shadow(void)
125{
126 return this_cpu_read(cpu_tlbstate.cr4);
Andy Lutomirski375074c2014-10-24 15:58:07 -0700127}
128
129/*
130 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
131 * enable and PPro Global page enable), so that any CPU's that boot
132 * up after us can get the correct flags. This should only be used
133 * during boot on the boot cpu.
134 */
135extern unsigned long mmu_cr4_features;
136extern u32 *trampoline_cr4_features;
137
138static inline void cr4_set_bits_and_update_boot(unsigned long mask)
139{
140 mmu_cr4_features |= mask;
141 if (trampoline_cr4_features)
142 *trampoline_cr4_features = mmu_cr4_features;
143 cr4_set_bits(mask);
144}
145
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100146static inline void __native_flush_tlb(void)
147{
Sebastian Andrzej Siewior5cf07912016-08-05 15:37:39 +0200148 /*
149 * If current->mm == NULL then we borrow a mm which may change during a
150 * task switch and therefore we must not be preempted while we write CR3
151 * back:
152 */
153 preempt_disable();
Chris Wrightd7285c62009-04-23 10:21:38 -0700154 native_write_cr3(native_read_cr3());
Sebastian Andrzej Siewior5cf07912016-08-05 15:37:39 +0200155 preempt_enable();
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100156}
157
Fenghua Yu086fc8f2012-12-20 23:44:27 -0800158static inline void __native_flush_tlb_global_irq_disabled(void)
159{
160 unsigned long cr4;
161
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700162 cr4 = this_cpu_read(cpu_tlbstate.cr4);
Fenghua Yu086fc8f2012-12-20 23:44:27 -0800163 /* clear PGE */
164 native_write_cr4(cr4 & ~X86_CR4_PGE);
165 /* write old PGE again and flush TLBs */
166 native_write_cr4(cr4);
167}
168
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100169static inline void __native_flush_tlb_global(void)
170{
Ingo Molnarb1979a52008-05-12 21:21:15 +0200171 unsigned long flags;
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100172
Andy Lutomirskid8bced72016-01-29 11:42:59 -0800173 if (static_cpu_has(X86_FEATURE_INVPCID)) {
174 /*
175 * Using INVPCID is considerably faster than a pair of writes
176 * to CR4 sandwiched inside an IRQ flag save/restore.
177 */
178 invpcid_flush_all();
179 return;
180 }
181
Ingo Molnarb1979a52008-05-12 21:21:15 +0200182 /*
183 * Read-modify-write to CR4 - protect it from preemption and
184 * from interrupts. (Use the raw variant because this code can
185 * be called from deep inside debugging code.)
186 */
187 raw_local_irq_save(flags);
188
Fenghua Yu086fc8f2012-12-20 23:44:27 -0800189 __native_flush_tlb_global_irq_disabled();
Ingo Molnarb1979a52008-05-12 21:21:15 +0200190
191 raw_local_irq_restore(flags);
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100192}
193
194static inline void __native_flush_tlb_single(unsigned long addr)
195{
Joe Perches94cf8de2008-03-23 01:03:45 -0700196 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100197}
198
199static inline void __flush_tlb_all(void)
200{
Daniel Borkmann2c4ea6e2017-03-11 01:31:19 +0100201 if (boot_cpu_has(X86_FEATURE_PGE))
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100202 __flush_tlb_global();
203 else
204 __flush_tlb();
205}
206
207static inline void __flush_tlb_one(unsigned long addr)
208{
Mel Gormanec659932014-01-21 14:33:16 -0800209 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
Michael Wange8747f12013-06-04 14:28:18 +0800210 __flush_tlb_single(addr);
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100211}
212
Alex Shi3e7f3db2012-05-10 18:01:59 +0800213#define TLB_FLUSH_ALL -1UL
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100214
215/*
216 * TLB flushing:
217 *
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100218 * - flush_tlb_all() flushes all processes TLBs
219 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
220 * - flush_tlb_page(vma, vmaddr) flushes one page
221 * - flush_tlb_range(vma, start, end) flushes a range of pages
222 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
Alex Shie7b52ff2012-06-28 09:02:17 +0800223 * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100224 *
225 * ..but the i386 has somewhat limited tlb flushing capabilities,
226 * and page-granular flushes are available only on i486 and up.
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100227 */
228
229#ifndef CONFIG_SMP
230
Dave Hansen6df46862013-09-11 14:20:24 -0700231/* "_up" is for UniProcessor.
232 *
233 * This is a helper for other header functions. *Not* intended to be called
234 * directly. All global TLB flushes need to either call this, or to bump the
235 * vm statistics themselves.
236 */
237static inline void __flush_tlb_up(void)
238{
Mel Gormanec659932014-01-21 14:33:16 -0800239 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
Dave Hansen6df46862013-09-11 14:20:24 -0700240 __flush_tlb();
241}
242
243static inline void flush_tlb_all(void)
244{
Mel Gormanec659932014-01-21 14:33:16 -0800245 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
Dave Hansen6df46862013-09-11 14:20:24 -0700246 __flush_tlb_all();
247}
248
Dave Hansen6df46862013-09-11 14:20:24 -0700249static inline void local_flush_tlb(void)
250{
251 __flush_tlb_up();
252}
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100253
254static inline void flush_tlb_mm(struct mm_struct *mm)
255{
256 if (mm == current->active_mm)
Dave Hansen6df46862013-09-11 14:20:24 -0700257 __flush_tlb_up();
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100258}
259
260static inline void flush_tlb_page(struct vm_area_struct *vma,
261 unsigned long addr)
262{
263 if (vma->vm_mm == current->active_mm)
264 __flush_tlb_one(addr);
265}
266
267static inline void flush_tlb_range(struct vm_area_struct *vma,
268 unsigned long start, unsigned long end)
269{
270 if (vma->vm_mm == current->active_mm)
Dave Hansen6df46862013-09-11 14:20:24 -0700271 __flush_tlb_up();
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100272}
273
Alex Shi7efa1c82012-07-20 09:18:23 +0800274static inline void flush_tlb_mm_range(struct mm_struct *mm,
Alex Shi611ae8e2012-06-28 09:02:22 +0800275 unsigned long start, unsigned long end, unsigned long vmflag)
276{
Alex Shi7efa1c82012-07-20 09:18:23 +0800277 if (mm == current->active_mm)
Dave Hansen6df46862013-09-11 14:20:24 -0700278 __flush_tlb_up();
Alex Shi611ae8e2012-06-28 09:02:22 +0800279}
280
Rusty Russell4595f962009-01-10 21:58:09 -0800281static inline void native_flush_tlb_others(const struct cpumask *cpumask,
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100282 struct mm_struct *mm,
Alex Shie7b52ff2012-06-28 09:02:17 +0800283 unsigned long start,
284 unsigned long end)
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100285{
286}
287
Alex Nixon913da642008-09-03 14:30:23 +0100288static inline void reset_lazy_tlbstate(void)
289{
290}
291
Alex Shieffee4b2012-06-28 09:02:24 +0800292static inline void flush_tlb_kernel_range(unsigned long start,
293 unsigned long end)
294{
295 flush_tlb_all();
296}
297
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100298#else /* SMP */
299
300#include <asm/smp.h>
301
302#define local_flush_tlb() __flush_tlb()
303
Alex Shi611ae8e2012-06-28 09:02:22 +0800304#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
305
306#define flush_tlb_range(vma, start, end) \
307 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
308
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100309extern void flush_tlb_all(void);
Alex Shi611ae8e2012-06-28 09:02:22 +0800310extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
311 unsigned long end, unsigned long vmflag);
Alex Shieffee4b2012-06-28 09:02:24 +0800312extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100313
Andy Lutomirskica6c99c02017-05-22 15:30:01 -0700314static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
315{
316 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
317}
318
Rusty Russell4595f962009-01-10 21:58:09 -0800319void native_flush_tlb_others(const struct cpumask *cpumask,
Alex Shie7b52ff2012-06-28 09:02:17 +0800320 struct mm_struct *mm,
321 unsigned long start, unsigned long end);
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100322
323#define TLBSTATE_OK 1
324#define TLBSTATE_LAZY 2
325
Alex Nixon913da642008-09-03 14:30:23 +0100326static inline void reset_lazy_tlbstate(void)
327{
Alex Shic6ae41e2012-05-11 15:35:27 +0800328 this_cpu_write(cpu_tlbstate.state, 0);
329 this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
Alex Nixon913da642008-09-03 14:30:23 +0100330}
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100331
332#endif /* SMP */
333
334#ifndef CONFIG_PARAVIRT
Alex Shie7b52ff2012-06-28 09:02:17 +0800335#define flush_tlb_others(mask, mm, start, end) \
336 native_flush_tlb_others(mask, mm, start, end)
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100337#endif
338
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700339#endif /* _ASM_X86_TLBFLUSH_H */