blob: dbb5a9f0fed82d64530bb658907b5558c0dc665c [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_TLBFLUSH_H
2#define _ASM_X86_TLBFLUSH_H
Thomas Gleixnerd291cf82008-01-30 13:30:35 +01003
4#include <linux/mm.h>
5#include <linux/sched.h>
6
7#include <asm/processor.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +01008#include <asm/cpufeature.h>
David Howellsf05e7982012-03-28 18:11:12 +01009#include <asm/special_insns.h>
Andy Lutomirskice4a4e562017-05-28 10:00:14 -070010#include <asm/smp.h>
Thomas Gleixnerd291cf82008-01-30 13:30:35 +010011
Andy Lutomirski060a4022016-01-29 11:42:57 -080012static inline void __invpcid(unsigned long pcid, unsigned long addr,
13 unsigned long type)
14{
Borislav Petkove2c7698c2016-02-10 15:51:16 +010015 struct { u64 d[2]; } desc = { { pcid, addr } };
Andy Lutomirski060a4022016-01-29 11:42:57 -080016
17 /*
18 * The memory clobber is because the whole point is to invalidate
19 * stale TLB entries and, especially if we're flushing global
20 * mappings, we don't want the compiler to reorder any subsequent
21 * memory accesses before the TLB flush.
22 *
23 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
24 * invpcid (%rcx), %rax in long mode.
25 */
26 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
Borislav Petkove2c7698c2016-02-10 15:51:16 +010027 : : "m" (desc), "a" (type), "c" (&desc) : "memory");
Andy Lutomirski060a4022016-01-29 11:42:57 -080028}
29
30#define INVPCID_TYPE_INDIV_ADDR 0
31#define INVPCID_TYPE_SINGLE_CTXT 1
32#define INVPCID_TYPE_ALL_INCL_GLOBAL 2
33#define INVPCID_TYPE_ALL_NON_GLOBAL 3
34
35/* Flush all mappings for a given pcid and addr, not including globals. */
36static inline void invpcid_flush_one(unsigned long pcid,
37 unsigned long addr)
38{
39 __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
40}
41
42/* Flush all mappings for a given PCID, not including globals. */
43static inline void invpcid_flush_single_context(unsigned long pcid)
44{
45 __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
46}
47
48/* Flush all mappings, including globals, for all PCIDs. */
49static inline void invpcid_flush_all(void)
50{
51 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
52}
53
54/* Flush all mappings for all PCIDs except globals. */
55static inline void invpcid_flush_all_nonglobals(void)
56{
57 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
58}
59
Thomas Gleixnerd291cf82008-01-30 13:30:35 +010060#ifdef CONFIG_PARAVIRT
61#include <asm/paravirt.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +020062#else
Thomas Gleixnerd291cf82008-01-30 13:30:35 +010063#define __flush_tlb() __native_flush_tlb()
64#define __flush_tlb_global() __native_flush_tlb_global()
65#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
Thomas Gleixner96a388d2007-10-11 11:20:03 +020066#endif
Thomas Gleixnerd291cf82008-01-30 13:30:35 +010067
Andy Lutomirski1e02ce42014-10-24 15:58:08 -070068struct tlb_state {
Andy Lutomirski1e02ce42014-10-24 15:58:08 -070069 struct mm_struct *active_mm;
70 int state;
Andy Lutomirski1e02ce42014-10-24 15:58:08 -070071
72 /*
73 * Access to this CR4 shadow and to H/W CR4 is protected by
74 * disabling interrupts when modifying either one.
75 */
76 unsigned long cr4;
77};
78DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
79
80/* Initialize cr4 shadow for this CPU. */
81static inline void cr4_init_shadow(void)
82{
Andy Lutomirski1ef55be12016-09-29 12:48:12 -070083 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
Andy Lutomirski1e02ce42014-10-24 15:58:08 -070084}
85
Andy Lutomirski375074c2014-10-24 15:58:07 -070086/* Set in this cpu's CR4. */
87static inline void cr4_set_bits(unsigned long mask)
88{
89 unsigned long cr4;
90
Andy Lutomirski1e02ce42014-10-24 15:58:08 -070091 cr4 = this_cpu_read(cpu_tlbstate.cr4);
92 if ((cr4 | mask) != cr4) {
93 cr4 |= mask;
94 this_cpu_write(cpu_tlbstate.cr4, cr4);
95 __write_cr4(cr4);
96 }
Andy Lutomirski375074c2014-10-24 15:58:07 -070097}
98
99/* Clear in this cpu's CR4. */
100static inline void cr4_clear_bits(unsigned long mask)
101{
102 unsigned long cr4;
103
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700104 cr4 = this_cpu_read(cpu_tlbstate.cr4);
105 if ((cr4 & ~mask) != cr4) {
106 cr4 &= ~mask;
107 this_cpu_write(cpu_tlbstate.cr4, cr4);
108 __write_cr4(cr4);
109 }
110}
111
Thomas Gleixner5a920152017-02-14 00:11:04 -0800112static inline void cr4_toggle_bits(unsigned long mask)
113{
114 unsigned long cr4;
115
116 cr4 = this_cpu_read(cpu_tlbstate.cr4);
117 cr4 ^= mask;
118 this_cpu_write(cpu_tlbstate.cr4, cr4);
119 __write_cr4(cr4);
120}
121
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700122/* Read the CR4 shadow. */
123static inline unsigned long cr4_read_shadow(void)
124{
125 return this_cpu_read(cpu_tlbstate.cr4);
Andy Lutomirski375074c2014-10-24 15:58:07 -0700126}
127
128/*
129 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
130 * enable and PPro Global page enable), so that any CPU's that boot
131 * up after us can get the correct flags. This should only be used
132 * during boot on the boot cpu.
133 */
134extern unsigned long mmu_cr4_features;
135extern u32 *trampoline_cr4_features;
136
137static inline void cr4_set_bits_and_update_boot(unsigned long mask)
138{
139 mmu_cr4_features |= mask;
140 if (trampoline_cr4_features)
141 *trampoline_cr4_features = mmu_cr4_features;
142 cr4_set_bits(mask);
143}
144
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100145static inline void __native_flush_tlb(void)
146{
Sebastian Andrzej Siewior5cf07912016-08-05 15:37:39 +0200147 /*
148 * If current->mm == NULL then we borrow a mm which may change during a
149 * task switch and therefore we must not be preempted while we write CR3
150 * back:
151 */
152 preempt_disable();
Chris Wrightd7285c62009-04-23 10:21:38 -0700153 native_write_cr3(native_read_cr3());
Sebastian Andrzej Siewior5cf07912016-08-05 15:37:39 +0200154 preempt_enable();
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100155}
156
Fenghua Yu086fc8f2012-12-20 23:44:27 -0800157static inline void __native_flush_tlb_global_irq_disabled(void)
158{
159 unsigned long cr4;
160
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700161 cr4 = this_cpu_read(cpu_tlbstate.cr4);
Fenghua Yu086fc8f2012-12-20 23:44:27 -0800162 /* clear PGE */
163 native_write_cr4(cr4 & ~X86_CR4_PGE);
164 /* write old PGE again and flush TLBs */
165 native_write_cr4(cr4);
166}
167
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100168static inline void __native_flush_tlb_global(void)
169{
Ingo Molnarb1979a52008-05-12 21:21:15 +0200170 unsigned long flags;
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100171
Andy Lutomirskid8bced72016-01-29 11:42:59 -0800172 if (static_cpu_has(X86_FEATURE_INVPCID)) {
173 /*
174 * Using INVPCID is considerably faster than a pair of writes
175 * to CR4 sandwiched inside an IRQ flag save/restore.
176 */
177 invpcid_flush_all();
178 return;
179 }
180
Ingo Molnarb1979a52008-05-12 21:21:15 +0200181 /*
182 * Read-modify-write to CR4 - protect it from preemption and
183 * from interrupts. (Use the raw variant because this code can
184 * be called from deep inside debugging code.)
185 */
186 raw_local_irq_save(flags);
187
Fenghua Yu086fc8f2012-12-20 23:44:27 -0800188 __native_flush_tlb_global_irq_disabled();
Ingo Molnarb1979a52008-05-12 21:21:15 +0200189
190 raw_local_irq_restore(flags);
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100191}
192
193static inline void __native_flush_tlb_single(unsigned long addr)
194{
Joe Perches94cf8de2008-03-23 01:03:45 -0700195 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100196}
197
198static inline void __flush_tlb_all(void)
199{
Daniel Borkmann2c4ea6e2017-03-11 01:31:19 +0100200 if (boot_cpu_has(X86_FEATURE_PGE))
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100201 __flush_tlb_global();
202 else
203 __flush_tlb();
204}
205
206static inline void __flush_tlb_one(unsigned long addr)
207{
Mel Gormanec659932014-01-21 14:33:16 -0800208 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
Michael Wange8747f12013-06-04 14:28:18 +0800209 __flush_tlb_single(addr);
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100210}
211
Alex Shi3e7f3db2012-05-10 18:01:59 +0800212#define TLB_FLUSH_ALL -1UL
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100213
214/*
215 * TLB flushing:
216 *
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100217 * - flush_tlb_all() flushes all processes TLBs
218 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
219 * - flush_tlb_page(vma, vmaddr) flushes one page
220 * - flush_tlb_range(vma, start, end) flushes a range of pages
221 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
Andy Lutomirskia2055ab2017-05-28 10:00:10 -0700222 * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100223 *
224 * ..but the i386 has somewhat limited tlb flushing capabilities,
225 * and page-granular flushes are available only on i486 and up.
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100226 */
Andy Lutomirskia2055ab2017-05-28 10:00:10 -0700227struct flush_tlb_info {
228 struct mm_struct *mm;
229 unsigned long start;
230 unsigned long end;
231};
232
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100233#define local_flush_tlb() __flush_tlb()
234
Alex Shi611ae8e2012-06-28 09:02:22 +0800235#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
236
237#define flush_tlb_range(vma, start, end) \
238 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
239
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100240extern void flush_tlb_all(void);
Alex Shi611ae8e2012-06-28 09:02:22 +0800241extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
242 unsigned long end, unsigned long vmflag);
Alex Shieffee4b2012-06-28 09:02:24 +0800243extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100244
Andy Lutomirskica6c99c02017-05-22 15:30:01 -0700245static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
246{
247 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
248}
249
Rusty Russell4595f962009-01-10 21:58:09 -0800250void native_flush_tlb_others(const struct cpumask *cpumask,
Andy Lutomirskia2055ab2017-05-28 10:00:10 -0700251 const struct flush_tlb_info *info);
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100252
253#define TLBSTATE_OK 1
254#define TLBSTATE_LAZY 2
255
Alex Nixon913da642008-09-03 14:30:23 +0100256static inline void reset_lazy_tlbstate(void)
257{
Alex Shic6ae41e2012-05-11 15:35:27 +0800258 this_cpu_write(cpu_tlbstate.state, 0);
259 this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
Alex Nixon913da642008-09-03 14:30:23 +0100260}
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100261
Andy Lutomirskie73ad5f2017-05-22 15:30:03 -0700262static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
263 struct mm_struct *mm)
264{
265 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
266}
267
268extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
269
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100270#ifndef CONFIG_PARAVIRT
Andy Lutomirskia2055ab2017-05-28 10:00:10 -0700271#define flush_tlb_others(mask, info) \
272 native_flush_tlb_others(mask, info)
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100273#endif
274
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700275#endif /* _ASM_X86_TLBFLUSH_H */