blob: 1fe33987de027f73c2997960198978a557bb1027 [file] [log] [blame]
Glauber Costac048fdf2008-03-03 14:12:54 -03001#include <linux/init.h>
2
3#include <linux/mm.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03004#include <linux/spinlock.h>
5#include <linux/smp.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03006#include <linux/interrupt.h>
Tejun Heo6dd01be2009-01-21 17:26:06 +09007#include <linux/module.h>
Shaohua Li93296722010-10-20 11:07:03 +08008#include <linux/cpu.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03009
Glauber Costac048fdf2008-03-03 14:12:54 -030010#include <asm/tlbflush.h>
Glauber Costac048fdf2008-03-03 14:12:54 -030011#include <asm/mmu_context.h>
Jan Beulich350f8f52009-11-13 11:54:40 +000012#include <asm/cache.h>
Tejun Heo6dd01be2009-01-21 17:26:06 +090013#include <asm/apic.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090014#include <asm/uv/uv.h>
Alex Shi3df32122012-06-28 09:02:20 +080015#include <linux/debugfs.h>
Glauber Costa5af55732008-03-25 13:28:56 -030016
Brian Gerst9eb912d2009-01-19 00:38:57 +090017DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
18 = { &init_mm, 0, };
19
Glauber Costac048fdf2008-03-03 14:12:54 -030020/*
21 * Smarter SMP flushing macros.
22 * c/o Linus Torvalds.
23 *
24 * These mean you can really definitely utterly forget about
25 * writing to user space from interrupts. (Its not allowed anyway).
26 *
27 * Optimizations Manfred Spraul <manfred@colorfullife.com>
28 *
29 * More scalable flush, from Andi Kleen
30 *
Alex Shi52aec332012-06-28 09:02:23 +080031 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
Glauber Costac048fdf2008-03-03 14:12:54 -030032 */
33
Alex Shi52aec332012-06-28 09:02:23 +080034struct flush_tlb_info {
35 struct mm_struct *flush_mm;
36 unsigned long flush_start;
37 unsigned long flush_end;
38};
Shaohua Li93296722010-10-20 11:07:03 +080039
Glauber Costac048fdf2008-03-03 14:12:54 -030040/*
41 * We cannot call mmdrop() because we are in interrupt context,
42 * instead update mm->cpu_vm_mask.
43 */
44void leave_mm(int cpu)
45{
Linus Torvalds02171b42012-05-23 11:06:59 -070046 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Alex Shic6ae41e2012-05-11 15:35:27 +080047 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
Glauber Costac048fdf2008-03-03 14:12:54 -030048 BUG();
Suresh Siddhaa6fca402012-03-22 17:01:25 -070049 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
50 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
51 load_cr3(swapper_pg_dir);
Dave Hansend17d8f92014-07-31 08:40:59 -070052 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
Suresh Siddhaa6fca402012-03-22 17:01:25 -070053 }
Glauber Costac048fdf2008-03-03 14:12:54 -030054}
55EXPORT_SYMBOL_GPL(leave_mm);
56
57/*
Glauber Costac048fdf2008-03-03 14:12:54 -030058 * The flush IPI assumes that a thread switch happens in this order:
59 * [cpu0: the cpu that switches]
60 * 1) switch_mm() either 1a) or 1b)
61 * 1a) thread switch to a different mm
Alex Shi52aec332012-06-28 09:02:23 +080062 * 1a1) set cpu_tlbstate to TLBSTATE_OK
63 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
64 * if cpu0 was in lazy tlb mode.
65 * 1a2) update cpu active_mm
Glauber Costac048fdf2008-03-03 14:12:54 -030066 * Now cpu0 accepts tlb flushes for the new mm.
Alex Shi52aec332012-06-28 09:02:23 +080067 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
Glauber Costac048fdf2008-03-03 14:12:54 -030068 * Now the other cpus will send tlb flush ipis.
69 * 1a4) change cr3.
Alex Shi52aec332012-06-28 09:02:23 +080070 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
71 * Stop ipi delivery for the old mm. This is not synchronized with
72 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
73 * mm, and in the worst case we perform a superfluous tlb flush.
Glauber Costac048fdf2008-03-03 14:12:54 -030074 * 1b) thread switch without mm change
Alex Shi52aec332012-06-28 09:02:23 +080075 * cpu active_mm is correct, cpu0 already handles flush ipis.
76 * 1b1) set cpu_tlbstate to TLBSTATE_OK
Glauber Costac048fdf2008-03-03 14:12:54 -030077 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
78 * Atomically set the bit [other cpus will start sending flush ipis],
79 * and test the bit.
80 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
81 * 2) switch %%esp, ie current
82 *
83 * The interrupt must handle 2 special cases:
84 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
85 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
86 * runs in kernel space, the cpu could load tlb entries for user space
87 * pages.
88 *
Alex Shi52aec332012-06-28 09:02:23 +080089 * The good news is that cpu_tlbstate is local to each cpu, no
Glauber Costac048fdf2008-03-03 14:12:54 -030090 * write/read ordering problems.
91 */
92
93/*
Alex Shi52aec332012-06-28 09:02:23 +080094 * TLB flush funcation:
Glauber Costac048fdf2008-03-03 14:12:54 -030095 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
96 * 2) Leave the mm if we are in the lazy tlb mode.
Glauber Costac048fdf2008-03-03 14:12:54 -030097 */
Alex Shi52aec332012-06-28 09:02:23 +080098static void flush_tlb_func(void *info)
Glauber Costac048fdf2008-03-03 14:12:54 -030099{
Alex Shi52aec332012-06-28 09:02:23 +0800100 struct flush_tlb_info *f = info;
Glauber Costac048fdf2008-03-03 14:12:54 -0300101
Tomoki Sekiyamafd0f5862012-09-26 11:11:28 +0900102 inc_irq_stat(irq_tlb_count);
103
Alex Shi52aec332012-06-28 09:02:23 +0800104 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
105 return;
Dave Hansena23421f2014-07-31 08:40:58 -0700106 if (!f->flush_end)
107 f->flush_end = f->flush_start + PAGE_SIZE;
Glauber Costac048fdf2008-03-03 14:12:54 -0300108
Mel Gormanec659932014-01-21 14:33:16 -0800109 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
Alex Shi52aec332012-06-28 09:02:23 +0800110 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
Dave Hansend17d8f92014-07-31 08:40:59 -0700111 if (f->flush_end == TLB_FLUSH_ALL) {
Alex Shi52aec332012-06-28 09:02:23 +0800112 local_flush_tlb();
Dave Hansend17d8f92014-07-31 08:40:59 -0700113 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
114 } else {
Alex Shi52aec332012-06-28 09:02:23 +0800115 unsigned long addr;
Dave Hansend17d8f92014-07-31 08:40:59 -0700116 unsigned long nr_pages =
117 f->flush_end - f->flush_start / PAGE_SIZE;
Alex Shi52aec332012-06-28 09:02:23 +0800118 addr = f->flush_start;
119 while (addr < f->flush_end) {
120 __flush_tlb_single(addr);
121 addr += PAGE_SIZE;
Alex Shie7b52ff2012-06-28 09:02:17 +0800122 }
Dave Hansend17d8f92014-07-31 08:40:59 -0700123 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
Alex Shi52aec332012-06-28 09:02:23 +0800124 }
125 } else
126 leave_mm(smp_processor_id());
Glauber Costac048fdf2008-03-03 14:12:54 -0300127
Glauber Costac048fdf2008-03-03 14:12:54 -0300128}
129
Rusty Russell4595f962009-01-10 21:58:09 -0800130void native_flush_tlb_others(const struct cpumask *cpumask,
Alex Shie7b52ff2012-06-28 09:02:17 +0800131 struct mm_struct *mm, unsigned long start,
132 unsigned long end)
Rusty Russell4595f962009-01-10 21:58:09 -0800133{
Alex Shi52aec332012-06-28 09:02:23 +0800134 struct flush_tlb_info info;
135 info.flush_mm = mm;
136 info.flush_start = start;
137 info.flush_end = end;
138
Mel Gormanec659932014-01-21 14:33:16 -0800139 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
Rusty Russell4595f962009-01-10 21:58:09 -0800140 if (is_uv_system()) {
Tejun Heobdbcdd42009-01-21 17:26:06 +0900141 unsigned int cpu;
Rusty Russell4595f962009-01-10 21:58:09 -0800142
Xiao Guangrong25542c62011-03-15 09:57:37 +0800143 cpu = smp_processor_id();
Alex Shie7b52ff2012-06-28 09:02:17 +0800144 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
Tejun Heobdbcdd42009-01-21 17:26:06 +0900145 if (cpumask)
Alex Shi52aec332012-06-28 09:02:23 +0800146 smp_call_function_many(cpumask, flush_tlb_func,
147 &info, 1);
Mike Travis0e219902009-01-10 21:58:10 -0800148 return;
Rusty Russell4595f962009-01-10 21:58:09 -0800149 }
Alex Shi52aec332012-06-28 09:02:23 +0800150 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
Rusty Russell4595f962009-01-10 21:58:09 -0800151}
152
Glauber Costac048fdf2008-03-03 14:12:54 -0300153void flush_tlb_current_task(void)
154{
155 struct mm_struct *mm = current->mm;
Glauber Costac048fdf2008-03-03 14:12:54 -0300156
157 preempt_disable();
Glauber Costac048fdf2008-03-03 14:12:54 -0300158
Mel Gormanec659932014-01-21 14:33:16 -0800159 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300160 local_flush_tlb();
Dave Hansend17d8f92014-07-31 08:40:59 -0700161 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
Rusty Russell78f1c4d2009-09-24 09:34:51 -0600162 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Alex Shie7b52ff2012-06-28 09:02:17 +0800163 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300164 preempt_enable();
165}
166
Dave Hansena5102472014-07-31 08:41:03 -0700167/*
168 * See Documentation/x86/tlb.txt for details. We choose 33
169 * because it is large enough to cover the vast majority (at
170 * least 95%) of allocations, and is small enough that we are
171 * confident it will not cause too much overhead. Each single
172 * flush is about 100 ns, so this caps the maximum overhead at
173 * _about_ 3,000 ns.
174 *
175 * This is in units of pages.
176 */
177unsigned long tlb_single_page_flush_ceiling = 33;
Dave Hansene9f4e0a2014-07-31 08:40:55 -0700178
Alex Shi611ae8e2012-06-28 09:02:22 +0800179void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
180 unsigned long end, unsigned long vmflag)
181{
182 unsigned long addr;
Dave Hansen9dfa6de2014-07-31 08:40:56 -0700183 /* do a global flush by default */
184 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
Alex Shi611ae8e2012-06-28 09:02:22 +0800185
186 preempt_disable();
187 if (current->active_mm != mm)
Dave Hansen4995ab92014-07-31 08:40:54 -0700188 goto out;
Alex Shi611ae8e2012-06-28 09:02:22 +0800189
190 if (!current->mm) {
191 leave_mm(smp_processor_id());
Dave Hansen4995ab92014-07-31 08:40:54 -0700192 goto out;
Alex Shi611ae8e2012-06-28 09:02:22 +0800193 }
194
Dave Hansen9dfa6de2014-07-31 08:40:56 -0700195 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
196 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
Alex Shi611ae8e2012-06-28 09:02:22 +0800197
Dave Hansen9dfa6de2014-07-31 08:40:56 -0700198 if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
199 base_pages_to_flush = TLB_FLUSH_ALL;
Mel Gormanec659932014-01-21 14:33:16 -0800200 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
Alex Shi611ae8e2012-06-28 09:02:22 +0800201 local_flush_tlb();
Dave Hansen9824cf92013-09-11 14:20:23 -0700202 } else {
Alex Shi611ae8e2012-06-28 09:02:22 +0800203 /* flush range by one by one 'invlpg' */
Dave Hansen9824cf92013-09-11 14:20:23 -0700204 for (addr = start; addr < end; addr += PAGE_SIZE) {
Mel Gormanec659932014-01-21 14:33:16 -0800205 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
Alex Shi611ae8e2012-06-28 09:02:22 +0800206 __flush_tlb_single(addr);
Dave Hansen9824cf92013-09-11 14:20:23 -0700207 }
Alex Shie7b52ff2012-06-28 09:02:17 +0800208 }
Dave Hansend17d8f92014-07-31 08:40:59 -0700209 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
Dave Hansen4995ab92014-07-31 08:40:54 -0700210out:
Dave Hansen9dfa6de2014-07-31 08:40:56 -0700211 if (base_pages_to_flush == TLB_FLUSH_ALL) {
Dave Hansen4995ab92014-07-31 08:40:54 -0700212 start = 0UL;
213 end = TLB_FLUSH_ALL;
214 }
Alex Shie7b52ff2012-06-28 09:02:17 +0800215 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Dave Hansen4995ab92014-07-31 08:40:54 -0700216 flush_tlb_others(mm_cpumask(mm), mm, start, end);
Alex Shie7b52ff2012-06-28 09:02:17 +0800217 preempt_enable();
218}
219
Alex Shie7b52ff2012-06-28 09:02:17 +0800220void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
Glauber Costac048fdf2008-03-03 14:12:54 -0300221{
222 struct mm_struct *mm = vma->vm_mm;
Glauber Costac048fdf2008-03-03 14:12:54 -0300223
224 preempt_disable();
Glauber Costac048fdf2008-03-03 14:12:54 -0300225
226 if (current->active_mm == mm) {
227 if (current->mm)
Alex Shie7b52ff2012-06-28 09:02:17 +0800228 __flush_tlb_one(start);
Glauber Costac048fdf2008-03-03 14:12:54 -0300229 else
230 leave_mm(smp_processor_id());
231 }
232
Rusty Russell78f1c4d2009-09-24 09:34:51 -0600233 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Alex Shie7b52ff2012-06-28 09:02:17 +0800234 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300235
236 preempt_enable();
237}
238
239static void do_flush_tlb_all(void *info)
240{
Mel Gormanec659932014-01-21 14:33:16 -0800241 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
Glauber Costac048fdf2008-03-03 14:12:54 -0300242 __flush_tlb_all();
Alex Shic6ae41e2012-05-11 15:35:27 +0800243 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
Borislav Petkov3f8afb72010-07-21 14:47:05 +0200244 leave_mm(smp_processor_id());
Glauber Costac048fdf2008-03-03 14:12:54 -0300245}
246
247void flush_tlb_all(void)
248{
Mel Gormanec659932014-01-21 14:33:16 -0800249 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200250 on_each_cpu(do_flush_tlb_all, NULL, 1);
Glauber Costac048fdf2008-03-03 14:12:54 -0300251}
Alex Shi3df32122012-06-28 09:02:20 +0800252
Alex Shieffee4b2012-06-28 09:02:24 +0800253static void do_kernel_range_flush(void *info)
254{
255 struct flush_tlb_info *f = info;
256 unsigned long addr;
257
258 /* flush range by one by one 'invlpg' */
Dave Hansen6df46862013-09-11 14:20:24 -0700259 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
Alex Shieffee4b2012-06-28 09:02:24 +0800260 __flush_tlb_single(addr);
261}
262
263void flush_tlb_kernel_range(unsigned long start, unsigned long end)
264{
Alex Shieffee4b2012-06-28 09:02:24 +0800265
266 /* Balance as user space task's flush, a bit conservative */
Dave Hansene9f4e0a2014-07-31 08:40:55 -0700267 if (end == TLB_FLUSH_ALL ||
268 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
Alex Shieffee4b2012-06-28 09:02:24 +0800269 on_each_cpu(do_flush_tlb_all, NULL, 1);
Dave Hansene9f4e0a2014-07-31 08:40:55 -0700270 } else {
271 struct flush_tlb_info info;
Alex Shieffee4b2012-06-28 09:02:24 +0800272 info.flush_start = start;
273 info.flush_end = end;
274 on_each_cpu(do_kernel_range_flush, &info, 1);
275 }
276}
Dave Hansen2d040a12014-07-31 08:41:01 -0700277
278static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
279 size_t count, loff_t *ppos)
280{
281 char buf[32];
282 unsigned int len;
283
284 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
285 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
286}
287
288static ssize_t tlbflush_write_file(struct file *file,
289 const char __user *user_buf, size_t count, loff_t *ppos)
290{
291 char buf[32];
292 ssize_t len;
293 int ceiling;
294
295 len = min(count, sizeof(buf) - 1);
296 if (copy_from_user(buf, user_buf, len))
297 return -EFAULT;
298
299 buf[len] = '\0';
300 if (kstrtoint(buf, 0, &ceiling))
301 return -EINVAL;
302
303 if (ceiling < 0)
304 return -EINVAL;
305
306 tlb_single_page_flush_ceiling = ceiling;
307 return count;
308}
309
310static const struct file_operations fops_tlbflush = {
311 .read = tlbflush_read_file,
312 .write = tlbflush_write_file,
313 .llseek = default_llseek,
314};
315
316static int __init create_tlb_single_page_flush_ceiling(void)
317{
318 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
319 arch_debugfs_dir, NULL, &fops_tlbflush);
320 return 0;
321}
322late_initcall(create_tlb_single_page_flush_ceiling);