blob: 0d4a1bb7e303d0b71a210c101d4eebfa08764b32 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_TLBFLUSH_H
2#define _ASM_X86_TLBFLUSH_H
Thomas Gleixnerd291cf82008-01-30 13:30:35 +01003
4#include <linux/mm.h>
5#include <linux/sched.h>
6
7#include <asm/processor.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +01008#include <asm/cpufeature.h>
David Howellsf05e7982012-03-28 18:11:12 +01009#include <asm/special_insns.h>
Andy Lutomirskice4a4e562017-05-28 10:00:14 -070010#include <asm/smp.h>
Thomas Gleixnerd291cf82008-01-30 13:30:35 +010011
Andy Lutomirski060a4022016-01-29 11:42:57 -080012static inline void __invpcid(unsigned long pcid, unsigned long addr,
13 unsigned long type)
14{
Borislav Petkove2c7698c2016-02-10 15:51:16 +010015 struct { u64 d[2]; } desc = { { pcid, addr } };
Andy Lutomirski060a4022016-01-29 11:42:57 -080016
17 /*
18 * The memory clobber is because the whole point is to invalidate
19 * stale TLB entries and, especially if we're flushing global
20 * mappings, we don't want the compiler to reorder any subsequent
21 * memory accesses before the TLB flush.
22 *
23 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
24 * invpcid (%rcx), %rax in long mode.
25 */
26 asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
Borislav Petkove2c7698c2016-02-10 15:51:16 +010027 : : "m" (desc), "a" (type), "c" (&desc) : "memory");
Andy Lutomirski060a4022016-01-29 11:42:57 -080028}
29
30#define INVPCID_TYPE_INDIV_ADDR 0
31#define INVPCID_TYPE_SINGLE_CTXT 1
32#define INVPCID_TYPE_ALL_INCL_GLOBAL 2
33#define INVPCID_TYPE_ALL_NON_GLOBAL 3
34
35/* Flush all mappings for a given pcid and addr, not including globals. */
36static inline void invpcid_flush_one(unsigned long pcid,
37 unsigned long addr)
38{
39 __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
40}
41
42/* Flush all mappings for a given PCID, not including globals. */
43static inline void invpcid_flush_single_context(unsigned long pcid)
44{
45 __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
46}
47
48/* Flush all mappings, including globals, for all PCIDs. */
49static inline void invpcid_flush_all(void)
50{
51 __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
52}
53
54/* Flush all mappings for all PCIDs except globals. */
55static inline void invpcid_flush_all_nonglobals(void)
56{
57 __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
58}
59
Andy Lutomirskif39681e2017-06-29 08:53:15 -070060static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
61{
62 u64 new_tlb_gen;
63
64 /*
65 * Bump the generation count. This also serves as a full barrier
66 * that synchronizes with switch_mm(): callers are required to order
67 * their read of mm_cpumask after their writes to the paging
68 * structures.
69 */
70 smp_mb__before_atomic();
71 new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
72 smp_mb__after_atomic();
73
74 return new_tlb_gen;
75}
76
Thomas Gleixnerd291cf82008-01-30 13:30:35 +010077#ifdef CONFIG_PARAVIRT
78#include <asm/paravirt.h>
Thomas Gleixner96a388d2007-10-11 11:20:03 +020079#else
Thomas Gleixnerd291cf82008-01-30 13:30:35 +010080#define __flush_tlb() __native_flush_tlb()
81#define __flush_tlb_global() __native_flush_tlb_global()
82#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
Thomas Gleixner96a388d2007-10-11 11:20:03 +020083#endif
Thomas Gleixnerd291cf82008-01-30 13:30:35 +010084
Andy Lutomirski10af6232017-07-24 21:41:38 -070085/*
Andy Lutomirskib9565752017-10-09 09:50:49 -070086 * If tlb_use_lazy_mode is true, then we try to avoid switching CR3 to point
87 * to init_mm when we switch to a kernel thread (e.g. the idle thread). If
88 * it's false, then we immediately switch CR3 when entering a kernel thread.
89 */
Andy Lutomirski4e57b942017-10-14 09:59:50 -070090DECLARE_STATIC_KEY_TRUE(__tlb_defer_switch_to_init_mm);
91
92static inline bool tlb_defer_switch_to_init_mm(void)
93{
94 return static_branch_unlikely(&__tlb_defer_switch_to_init_mm);
95}
Andy Lutomirskib9565752017-10-09 09:50:49 -070096
97/*
Andy Lutomirski10af6232017-07-24 21:41:38 -070098 * 6 because 6 should be plenty and struct tlb_state will fit in
99 * two cache lines.
100 */
101#define TLB_NR_DYN_ASIDS 6
102
Andy Lutomirskib0579ad2017-06-29 08:53:16 -0700103struct tlb_context {
104 u64 ctx_id;
105 u64 tlb_gen;
106};
107
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700108struct tlb_state {
Andy Lutomirski3d28ebc2017-05-28 10:00:15 -0700109 /*
110 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
111 * are on. This means that it may not match current->active_mm,
112 * which will contain the previous user mm when we're in lazy TLB
113 * mode even if we've already switched back to swapper_pg_dir.
114 */
115 struct mm_struct *loaded_mm;
Andy Lutomirski10af6232017-07-24 21:41:38 -0700116 u16 loaded_mm_asid;
117 u16 next_asid;
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700118
119 /*
Andy Lutomirskib9565752017-10-09 09:50:49 -0700120 * We can be in one of several states:
121 *
122 * - Actively using an mm. Our CPU's bit will be set in
123 * mm_cpumask(loaded_mm) and is_lazy == false;
124 *
125 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
126 * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
127 *
128 * - Lazily using a real mm. loaded_mm != &init_mm, our bit
129 * is set in mm_cpumask(loaded_mm), but is_lazy == true.
130 * We're heuristically guessing that the CR3 load we
131 * skipped more than makes up for the overhead added by
132 * lazy mode.
133 */
134 bool is_lazy;
135
136 /*
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700137 * Access to this CR4 shadow and to H/W CR4 is protected by
138 * disabling interrupts when modifying either one.
139 */
140 unsigned long cr4;
Andy Lutomirskib0579ad2017-06-29 08:53:16 -0700141
142 /*
143 * This is a list of all contexts that might exist in the TLB.
Andy Lutomirski10af6232017-07-24 21:41:38 -0700144 * There is one per ASID that we use, and the ASID (what the
145 * CPU calls PCID) is the index into ctxts.
Andy Lutomirskib0579ad2017-06-29 08:53:16 -0700146 *
147 * For each context, ctx_id indicates which mm the TLB's user
148 * entries came from. As an invariant, the TLB will never
149 * contain entries that are out-of-date as when that mm reached
150 * the tlb_gen in the list.
151 *
152 * To be clear, this means that it's legal for the TLB code to
153 * flush the TLB without updating tlb_gen. This can happen
154 * (for now, at least) due to paravirt remote flushes.
Andy Lutomirski10af6232017-07-24 21:41:38 -0700155 *
156 * NB: context 0 is a bit special, since it's also used by
157 * various bits of init code. This is fine -- code that
158 * isn't aware of PCID will end up harmlessly flushing
159 * context 0.
Andy Lutomirskib0579ad2017-06-29 08:53:16 -0700160 */
Andy Lutomirski10af6232017-07-24 21:41:38 -0700161 struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700162};
163DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
164
165/* Initialize cr4 shadow for this CPU. */
166static inline void cr4_init_shadow(void)
167{
Andy Lutomirski1ef55be12016-09-29 12:48:12 -0700168 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700169}
170
Andy Lutomirski375074c2014-10-24 15:58:07 -0700171/* Set in this cpu's CR4. */
172static inline void cr4_set_bits(unsigned long mask)
173{
174 unsigned long cr4;
175
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700176 cr4 = this_cpu_read(cpu_tlbstate.cr4);
177 if ((cr4 | mask) != cr4) {
178 cr4 |= mask;
179 this_cpu_write(cpu_tlbstate.cr4, cr4);
180 __write_cr4(cr4);
181 }
Andy Lutomirski375074c2014-10-24 15:58:07 -0700182}
183
184/* Clear in this cpu's CR4. */
185static inline void cr4_clear_bits(unsigned long mask)
186{
187 unsigned long cr4;
188
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700189 cr4 = this_cpu_read(cpu_tlbstate.cr4);
190 if ((cr4 & ~mask) != cr4) {
191 cr4 &= ~mask;
192 this_cpu_write(cpu_tlbstate.cr4, cr4);
193 __write_cr4(cr4);
194 }
195}
196
Thomas Gleixner5a920152017-02-14 00:11:04 -0800197static inline void cr4_toggle_bits(unsigned long mask)
198{
199 unsigned long cr4;
200
201 cr4 = this_cpu_read(cpu_tlbstate.cr4);
202 cr4 ^= mask;
203 this_cpu_write(cpu_tlbstate.cr4, cr4);
204 __write_cr4(cr4);
205}
206
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700207/* Read the CR4 shadow. */
208static inline unsigned long cr4_read_shadow(void)
209{
210 return this_cpu_read(cpu_tlbstate.cr4);
Andy Lutomirski375074c2014-10-24 15:58:07 -0700211}
212
213/*
214 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
215 * enable and PPro Global page enable), so that any CPU's that boot
216 * up after us can get the correct flags. This should only be used
217 * during boot on the boot cpu.
218 */
219extern unsigned long mmu_cr4_features;
220extern u32 *trampoline_cr4_features;
221
222static inline void cr4_set_bits_and_update_boot(unsigned long mask)
223{
224 mmu_cr4_features |= mask;
225 if (trampoline_cr4_features)
226 *trampoline_cr4_features = mmu_cr4_features;
227 cr4_set_bits(mask);
228}
229
Andy Lutomirski72c00982017-09-06 19:54:53 -0700230extern void initialize_tlbstate_and_flush(void);
231
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100232static inline void __native_flush_tlb(void)
233{
Sebastian Andrzej Siewior5cf07912016-08-05 15:37:39 +0200234 /*
235 * If current->mm == NULL then we borrow a mm which may change during a
236 * task switch and therefore we must not be preempted while we write CR3
237 * back:
238 */
239 preempt_disable();
Andy Lutomirski6c690ee2017-06-12 10:26:14 -0700240 native_write_cr3(__native_read_cr3());
Sebastian Andrzej Siewior5cf07912016-08-05 15:37:39 +0200241 preempt_enable();
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100242}
243
Fenghua Yu086fc8f2012-12-20 23:44:27 -0800244static inline void __native_flush_tlb_global_irq_disabled(void)
245{
246 unsigned long cr4;
247
Andy Lutomirski1e02ce42014-10-24 15:58:08 -0700248 cr4 = this_cpu_read(cpu_tlbstate.cr4);
Fenghua Yu086fc8f2012-12-20 23:44:27 -0800249 /* clear PGE */
250 native_write_cr4(cr4 & ~X86_CR4_PGE);
251 /* write old PGE again and flush TLBs */
252 native_write_cr4(cr4);
253}
254
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100255static inline void __native_flush_tlb_global(void)
256{
Ingo Molnarb1979a52008-05-12 21:21:15 +0200257 unsigned long flags;
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100258
Andy Lutomirskid8bced72016-01-29 11:42:59 -0800259 if (static_cpu_has(X86_FEATURE_INVPCID)) {
260 /*
261 * Using INVPCID is considerably faster than a pair of writes
262 * to CR4 sandwiched inside an IRQ flag save/restore.
263 */
264 invpcid_flush_all();
265 return;
266 }
267
Ingo Molnarb1979a52008-05-12 21:21:15 +0200268 /*
269 * Read-modify-write to CR4 - protect it from preemption and
270 * from interrupts. (Use the raw variant because this code can
271 * be called from deep inside debugging code.)
272 */
273 raw_local_irq_save(flags);
274
Fenghua Yu086fc8f2012-12-20 23:44:27 -0800275 __native_flush_tlb_global_irq_disabled();
Ingo Molnarb1979a52008-05-12 21:21:15 +0200276
277 raw_local_irq_restore(flags);
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100278}
279
280static inline void __native_flush_tlb_single(unsigned long addr)
281{
Joe Perches94cf8de2008-03-23 01:03:45 -0700282 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100283}
284
285static inline void __flush_tlb_all(void)
286{
Daniel Borkmann2c4ea6e2017-03-11 01:31:19 +0100287 if (boot_cpu_has(X86_FEATURE_PGE))
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100288 __flush_tlb_global();
289 else
290 __flush_tlb();
Andy Lutomirski660da7c2017-06-29 08:53:21 -0700291
292 /*
293 * Note: if we somehow had PCID but not PGE, then this wouldn't work --
294 * we'd end up flushing kernel translations for the current ASID but
295 * we might fail to flush kernel translations for other cached ASIDs.
296 *
297 * To avoid this issue, we force PCID off if PGE is off.
298 */
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100299}
300
301static inline void __flush_tlb_one(unsigned long addr)
302{
Mel Gormanec659932014-01-21 14:33:16 -0800303 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
Michael Wange8747f12013-06-04 14:28:18 +0800304 __flush_tlb_single(addr);
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100305}
306
Alex Shi3e7f3db2012-05-10 18:01:59 +0800307#define TLB_FLUSH_ALL -1UL
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100308
309/*
310 * TLB flushing:
311 *
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100312 * - flush_tlb_all() flushes all processes TLBs
313 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
314 * - flush_tlb_page(vma, vmaddr) flushes one page
315 * - flush_tlb_range(vma, start, end) flushes a range of pages
316 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
Andy Lutomirskia2055ab2017-05-28 10:00:10 -0700317 * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100318 *
319 * ..but the i386 has somewhat limited tlb flushing capabilities,
320 * and page-granular flushes are available only on i486 and up.
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100321 */
Andy Lutomirskia2055ab2017-05-28 10:00:10 -0700322struct flush_tlb_info {
Andy Lutomirskib0579ad2017-06-29 08:53:16 -0700323 /*
324 * We support several kinds of flushes.
325 *
326 * - Fully flush a single mm. .mm will be set, .end will be
327 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
328 * which the IPI sender is trying to catch us up.
329 *
330 * - Partially flush a single mm. .mm will be set, .start and
331 * .end will indicate the range, and .new_tlb_gen will be set
332 * such that the changes between generation .new_tlb_gen-1 and
333 * .new_tlb_gen are entirely contained in the indicated range.
334 *
335 * - Fully flush all mms whose tlb_gens have been updated. .mm
336 * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
337 * will be zero.
338 */
339 struct mm_struct *mm;
340 unsigned long start;
341 unsigned long end;
342 u64 new_tlb_gen;
Andy Lutomirskia2055ab2017-05-28 10:00:10 -0700343};
344
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100345#define local_flush_tlb() __flush_tlb()
346
Alex Shi611ae8e2012-06-28 09:02:22 +0800347#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
348
349#define flush_tlb_range(vma, start, end) \
350 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
351
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100352extern void flush_tlb_all(void);
Alex Shi611ae8e2012-06-28 09:02:22 +0800353extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
354 unsigned long end, unsigned long vmflag);
Alex Shieffee4b2012-06-28 09:02:24 +0800355extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100356
Andy Lutomirskica6c99c02017-05-22 15:30:01 -0700357static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
358{
359 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
360}
361
Rusty Russell4595f962009-01-10 21:58:09 -0800362void native_flush_tlb_others(const struct cpumask *cpumask,
Andy Lutomirskia2055ab2017-05-28 10:00:10 -0700363 const struct flush_tlb_info *info);
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100364
Andy Lutomirskie73ad5f2017-05-22 15:30:03 -0700365static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
366 struct mm_struct *mm)
367{
Andy Lutomirskif39681e2017-06-29 08:53:15 -0700368 inc_mm_tlb_gen(mm);
Andy Lutomirskie73ad5f2017-05-22 15:30:03 -0700369 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
370}
371
372extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
373
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100374#ifndef CONFIG_PARAVIRT
Andy Lutomirskia2055ab2017-05-28 10:00:10 -0700375#define flush_tlb_others(mask, info) \
376 native_flush_tlb_others(mask, info)
Thomas Gleixnerd291cf82008-01-30 13:30:35 +0100377#endif
378
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700379#endif /* _ASM_X86_TLBFLUSH_H */