blob: ee61c36d64f84dd944873ec0acf80b4f8ad06da7 [file] [log] [blame]
Glauber Costac048fdf2008-03-03 14:12:54 -03001#include <linux/init.h>
2
3#include <linux/mm.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03004#include <linux/spinlock.h>
5#include <linux/smp.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03006#include <linux/interrupt.h>
Tejun Heo6dd01be2009-01-21 17:26:06 +09007#include <linux/module.h>
Shaohua Li93296722010-10-20 11:07:03 +08008#include <linux/cpu.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03009
Glauber Costac048fdf2008-03-03 14:12:54 -030010#include <asm/tlbflush.h>
Glauber Costac048fdf2008-03-03 14:12:54 -030011#include <asm/mmu_context.h>
Jan Beulich350f8f52009-11-13 11:54:40 +000012#include <asm/cache.h>
Tejun Heo6dd01be2009-01-21 17:26:06 +090013#include <asm/apic.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090014#include <asm/uv/uv.h>
Alex Shi3df32122012-06-28 09:02:20 +080015#include <linux/debugfs.h>
Glauber Costa5af55732008-03-25 13:28:56 -030016
Brian Gerst9eb912d2009-01-19 00:38:57 +090017DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
18 = { &init_mm, 0, };
19
Glauber Costac048fdf2008-03-03 14:12:54 -030020/*
21 * Smarter SMP flushing macros.
22 * c/o Linus Torvalds.
23 *
24 * These mean you can really definitely utterly forget about
25 * writing to user space from interrupts. (Its not allowed anyway).
26 *
27 * Optimizations Manfred Spraul <manfred@colorfullife.com>
28 *
29 * More scalable flush, from Andi Kleen
30 *
Alex Shi52aec332012-06-28 09:02:23 +080031 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
Glauber Costac048fdf2008-03-03 14:12:54 -030032 */
33
Alex Shi52aec332012-06-28 09:02:23 +080034struct flush_tlb_info {
35 struct mm_struct *flush_mm;
36 unsigned long flush_start;
37 unsigned long flush_end;
38};
Shaohua Li93296722010-10-20 11:07:03 +080039
Glauber Costac048fdf2008-03-03 14:12:54 -030040/*
41 * We cannot call mmdrop() because we are in interrupt context,
42 * instead update mm->cpu_vm_mask.
43 */
44void leave_mm(int cpu)
45{
Linus Torvalds02171b42012-05-23 11:06:59 -070046 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Alex Shic6ae41e2012-05-11 15:35:27 +080047 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
Glauber Costac048fdf2008-03-03 14:12:54 -030048 BUG();
Suresh Siddhaa6fca402012-03-22 17:01:25 -070049 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
50 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
51 load_cr3(swapper_pg_dir);
Dave Hansen7c7f1542014-08-07 10:58:41 -070052 /*
53 * This gets called in the idle path where RCU
54 * functions differently. Tracing normally
55 * uses RCU, so we have to call the tracepoint
56 * specially here.
57 */
58 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
Suresh Siddhaa6fca402012-03-22 17:01:25 -070059 }
Glauber Costac048fdf2008-03-03 14:12:54 -030060}
61EXPORT_SYMBOL_GPL(leave_mm);
62
63/*
Glauber Costac048fdf2008-03-03 14:12:54 -030064 * The flush IPI assumes that a thread switch happens in this order:
65 * [cpu0: the cpu that switches]
66 * 1) switch_mm() either 1a) or 1b)
67 * 1a) thread switch to a different mm
Alex Shi52aec332012-06-28 09:02:23 +080068 * 1a1) set cpu_tlbstate to TLBSTATE_OK
69 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
70 * if cpu0 was in lazy tlb mode.
71 * 1a2) update cpu active_mm
Glauber Costac048fdf2008-03-03 14:12:54 -030072 * Now cpu0 accepts tlb flushes for the new mm.
Alex Shi52aec332012-06-28 09:02:23 +080073 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
Glauber Costac048fdf2008-03-03 14:12:54 -030074 * Now the other cpus will send tlb flush ipis.
75 * 1a4) change cr3.
Alex Shi52aec332012-06-28 09:02:23 +080076 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
77 * Stop ipi delivery for the old mm. This is not synchronized with
78 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
79 * mm, and in the worst case we perform a superfluous tlb flush.
Glauber Costac048fdf2008-03-03 14:12:54 -030080 * 1b) thread switch without mm change
Alex Shi52aec332012-06-28 09:02:23 +080081 * cpu active_mm is correct, cpu0 already handles flush ipis.
82 * 1b1) set cpu_tlbstate to TLBSTATE_OK
Glauber Costac048fdf2008-03-03 14:12:54 -030083 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
84 * Atomically set the bit [other cpus will start sending flush ipis],
85 * and test the bit.
86 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
87 * 2) switch %%esp, ie current
88 *
89 * The interrupt must handle 2 special cases:
90 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
91 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
92 * runs in kernel space, the cpu could load tlb entries for user space
93 * pages.
94 *
Alex Shi52aec332012-06-28 09:02:23 +080095 * The good news is that cpu_tlbstate is local to each cpu, no
Glauber Costac048fdf2008-03-03 14:12:54 -030096 * write/read ordering problems.
97 */
98
99/*
Alex Shi52aec332012-06-28 09:02:23 +0800100 * TLB flush funcation:
Glauber Costac048fdf2008-03-03 14:12:54 -0300101 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
102 * 2) Leave the mm if we are in the lazy tlb mode.
Glauber Costac048fdf2008-03-03 14:12:54 -0300103 */
Alex Shi52aec332012-06-28 09:02:23 +0800104static void flush_tlb_func(void *info)
Glauber Costac048fdf2008-03-03 14:12:54 -0300105{
Alex Shi52aec332012-06-28 09:02:23 +0800106 struct flush_tlb_info *f = info;
Glauber Costac048fdf2008-03-03 14:12:54 -0300107
Tomoki Sekiyamafd0f5862012-09-26 11:11:28 +0900108 inc_irq_stat(irq_tlb_count);
109
Alex Shi52aec332012-06-28 09:02:23 +0800110 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
111 return;
Dave Hansena23421f2014-07-31 08:40:58 -0700112 if (!f->flush_end)
113 f->flush_end = f->flush_start + PAGE_SIZE;
Glauber Costac048fdf2008-03-03 14:12:54 -0300114
Mel Gormanec659932014-01-21 14:33:16 -0800115 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
Alex Shi52aec332012-06-28 09:02:23 +0800116 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
Dave Hansend17d8f92014-07-31 08:40:59 -0700117 if (f->flush_end == TLB_FLUSH_ALL) {
Alex Shi52aec332012-06-28 09:02:23 +0800118 local_flush_tlb();
Dave Hansend17d8f92014-07-31 08:40:59 -0700119 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
120 } else {
Alex Shi52aec332012-06-28 09:02:23 +0800121 unsigned long addr;
Dave Hansend17d8f92014-07-31 08:40:59 -0700122 unsigned long nr_pages =
123 f->flush_end - f->flush_start / PAGE_SIZE;
Alex Shi52aec332012-06-28 09:02:23 +0800124 addr = f->flush_start;
125 while (addr < f->flush_end) {
126 __flush_tlb_single(addr);
127 addr += PAGE_SIZE;
Alex Shie7b52ff2012-06-28 09:02:17 +0800128 }
Dave Hansend17d8f92014-07-31 08:40:59 -0700129 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
Alex Shi52aec332012-06-28 09:02:23 +0800130 }
131 } else
132 leave_mm(smp_processor_id());
Glauber Costac048fdf2008-03-03 14:12:54 -0300133
Glauber Costac048fdf2008-03-03 14:12:54 -0300134}
135
Rusty Russell4595f962009-01-10 21:58:09 -0800136void native_flush_tlb_others(const struct cpumask *cpumask,
Alex Shie7b52ff2012-06-28 09:02:17 +0800137 struct mm_struct *mm, unsigned long start,
138 unsigned long end)
Rusty Russell4595f962009-01-10 21:58:09 -0800139{
Alex Shi52aec332012-06-28 09:02:23 +0800140 struct flush_tlb_info info;
141 info.flush_mm = mm;
142 info.flush_start = start;
143 info.flush_end = end;
144
Mel Gormanec659932014-01-21 14:33:16 -0800145 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
Rusty Russell4595f962009-01-10 21:58:09 -0800146 if (is_uv_system()) {
Tejun Heobdbcdd42009-01-21 17:26:06 +0900147 unsigned int cpu;
Rusty Russell4595f962009-01-10 21:58:09 -0800148
Xiao Guangrong25542c62011-03-15 09:57:37 +0800149 cpu = smp_processor_id();
Alex Shie7b52ff2012-06-28 09:02:17 +0800150 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
Tejun Heobdbcdd42009-01-21 17:26:06 +0900151 if (cpumask)
Alex Shi52aec332012-06-28 09:02:23 +0800152 smp_call_function_many(cpumask, flush_tlb_func,
153 &info, 1);
Mike Travis0e219902009-01-10 21:58:10 -0800154 return;
Rusty Russell4595f962009-01-10 21:58:09 -0800155 }
Alex Shi52aec332012-06-28 09:02:23 +0800156 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
Rusty Russell4595f962009-01-10 21:58:09 -0800157}
158
Glauber Costac048fdf2008-03-03 14:12:54 -0300159void flush_tlb_current_task(void)
160{
161 struct mm_struct *mm = current->mm;
Glauber Costac048fdf2008-03-03 14:12:54 -0300162
163 preempt_disable();
Glauber Costac048fdf2008-03-03 14:12:54 -0300164
Mel Gormanec659932014-01-21 14:33:16 -0800165 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300166 local_flush_tlb();
Dave Hansend17d8f92014-07-31 08:40:59 -0700167 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
Rusty Russell78f1c4d2009-09-24 09:34:51 -0600168 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Alex Shie7b52ff2012-06-28 09:02:17 +0800169 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300170 preempt_enable();
171}
172
Dave Hansena5102472014-07-31 08:41:03 -0700173/*
174 * See Documentation/x86/tlb.txt for details. We choose 33
175 * because it is large enough to cover the vast majority (at
176 * least 95%) of allocations, and is small enough that we are
177 * confident it will not cause too much overhead. Each single
178 * flush is about 100 ns, so this caps the maximum overhead at
179 * _about_ 3,000 ns.
180 *
181 * This is in units of pages.
182 */
Jeremiah Mahler86426852014-08-09 00:38:33 -0700183static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
Dave Hansene9f4e0a2014-07-31 08:40:55 -0700184
Alex Shi611ae8e2012-06-28 09:02:22 +0800185void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
186 unsigned long end, unsigned long vmflag)
187{
188 unsigned long addr;
Dave Hansen9dfa6de2014-07-31 08:40:56 -0700189 /* do a global flush by default */
190 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
Alex Shi611ae8e2012-06-28 09:02:22 +0800191
192 preempt_disable();
193 if (current->active_mm != mm)
Dave Hansen4995ab92014-07-31 08:40:54 -0700194 goto out;
Alex Shi611ae8e2012-06-28 09:02:22 +0800195
196 if (!current->mm) {
197 leave_mm(smp_processor_id());
Dave Hansen4995ab92014-07-31 08:40:54 -0700198 goto out;
Alex Shi611ae8e2012-06-28 09:02:22 +0800199 }
200
Dave Hansen9dfa6de2014-07-31 08:40:56 -0700201 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
202 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
Alex Shi611ae8e2012-06-28 09:02:22 +0800203
Dave Hansen9dfa6de2014-07-31 08:40:56 -0700204 if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
205 base_pages_to_flush = TLB_FLUSH_ALL;
Mel Gormanec659932014-01-21 14:33:16 -0800206 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
Alex Shi611ae8e2012-06-28 09:02:22 +0800207 local_flush_tlb();
Dave Hansen9824cf92013-09-11 14:20:23 -0700208 } else {
Alex Shi611ae8e2012-06-28 09:02:22 +0800209 /* flush range by one by one 'invlpg' */
Dave Hansen9824cf92013-09-11 14:20:23 -0700210 for (addr = start; addr < end; addr += PAGE_SIZE) {
Mel Gormanec659932014-01-21 14:33:16 -0800211 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
Alex Shi611ae8e2012-06-28 09:02:22 +0800212 __flush_tlb_single(addr);
Dave Hansen9824cf92013-09-11 14:20:23 -0700213 }
Alex Shie7b52ff2012-06-28 09:02:17 +0800214 }
Dave Hansend17d8f92014-07-31 08:40:59 -0700215 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
Dave Hansen4995ab92014-07-31 08:40:54 -0700216out:
Dave Hansen9dfa6de2014-07-31 08:40:56 -0700217 if (base_pages_to_flush == TLB_FLUSH_ALL) {
Dave Hansen4995ab92014-07-31 08:40:54 -0700218 start = 0UL;
219 end = TLB_FLUSH_ALL;
220 }
Alex Shie7b52ff2012-06-28 09:02:17 +0800221 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Dave Hansen4995ab92014-07-31 08:40:54 -0700222 flush_tlb_others(mm_cpumask(mm), mm, start, end);
Alex Shie7b52ff2012-06-28 09:02:17 +0800223 preempt_enable();
224}
225
Alex Shie7b52ff2012-06-28 09:02:17 +0800226void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
Glauber Costac048fdf2008-03-03 14:12:54 -0300227{
228 struct mm_struct *mm = vma->vm_mm;
Glauber Costac048fdf2008-03-03 14:12:54 -0300229
230 preempt_disable();
Glauber Costac048fdf2008-03-03 14:12:54 -0300231
232 if (current->active_mm == mm) {
233 if (current->mm)
Alex Shie7b52ff2012-06-28 09:02:17 +0800234 __flush_tlb_one(start);
Glauber Costac048fdf2008-03-03 14:12:54 -0300235 else
236 leave_mm(smp_processor_id());
237 }
238
Rusty Russell78f1c4d2009-09-24 09:34:51 -0600239 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Alex Shie7b52ff2012-06-28 09:02:17 +0800240 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300241
242 preempt_enable();
243}
244
245static void do_flush_tlb_all(void *info)
246{
Mel Gormanec659932014-01-21 14:33:16 -0800247 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
Glauber Costac048fdf2008-03-03 14:12:54 -0300248 __flush_tlb_all();
Alex Shic6ae41e2012-05-11 15:35:27 +0800249 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
Borislav Petkov3f8afb72010-07-21 14:47:05 +0200250 leave_mm(smp_processor_id());
Glauber Costac048fdf2008-03-03 14:12:54 -0300251}
252
253void flush_tlb_all(void)
254{
Mel Gormanec659932014-01-21 14:33:16 -0800255 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200256 on_each_cpu(do_flush_tlb_all, NULL, 1);
Glauber Costac048fdf2008-03-03 14:12:54 -0300257}
Alex Shi3df32122012-06-28 09:02:20 +0800258
Alex Shieffee4b2012-06-28 09:02:24 +0800259static void do_kernel_range_flush(void *info)
260{
261 struct flush_tlb_info *f = info;
262 unsigned long addr;
263
264 /* flush range by one by one 'invlpg' */
Dave Hansen6df46862013-09-11 14:20:24 -0700265 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
Alex Shieffee4b2012-06-28 09:02:24 +0800266 __flush_tlb_single(addr);
267}
268
269void flush_tlb_kernel_range(unsigned long start, unsigned long end)
270{
Alex Shieffee4b2012-06-28 09:02:24 +0800271
272 /* Balance as user space task's flush, a bit conservative */
Dave Hansene9f4e0a2014-07-31 08:40:55 -0700273 if (end == TLB_FLUSH_ALL ||
274 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
Alex Shieffee4b2012-06-28 09:02:24 +0800275 on_each_cpu(do_flush_tlb_all, NULL, 1);
Dave Hansene9f4e0a2014-07-31 08:40:55 -0700276 } else {
277 struct flush_tlb_info info;
Alex Shieffee4b2012-06-28 09:02:24 +0800278 info.flush_start = start;
279 info.flush_end = end;
280 on_each_cpu(do_kernel_range_flush, &info, 1);
281 }
282}
Dave Hansen2d040a12014-07-31 08:41:01 -0700283
284static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
285 size_t count, loff_t *ppos)
286{
287 char buf[32];
288 unsigned int len;
289
290 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
291 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
292}
293
294static ssize_t tlbflush_write_file(struct file *file,
295 const char __user *user_buf, size_t count, loff_t *ppos)
296{
297 char buf[32];
298 ssize_t len;
299 int ceiling;
300
301 len = min(count, sizeof(buf) - 1);
302 if (copy_from_user(buf, user_buf, len))
303 return -EFAULT;
304
305 buf[len] = '\0';
306 if (kstrtoint(buf, 0, &ceiling))
307 return -EINVAL;
308
309 if (ceiling < 0)
310 return -EINVAL;
311
312 tlb_single_page_flush_ceiling = ceiling;
313 return count;
314}
315
316static const struct file_operations fops_tlbflush = {
317 .read = tlbflush_read_file,
318 .write = tlbflush_write_file,
319 .llseek = default_llseek,
320};
321
322static int __init create_tlb_single_page_flush_ceiling(void)
323{
324 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
325 arch_debugfs_dir, NULL, &fops_tlbflush);
326 return 0;
327}
328late_initcall(create_tlb_single_page_flush_ceiling);