blob: 8f4cc3dfac322a2911ab2b2e8471c6e8e87d8af0 [file] [log] [blame]
Glauber Costac048fdf2008-03-03 14:12:54 -03001#include <linux/init.h>
2
3#include <linux/mm.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03004#include <linux/spinlock.h>
5#include <linux/smp.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03006#include <linux/interrupt.h>
Tejun Heo6dd01be2009-01-21 17:26:06 +09007#include <linux/module.h>
Shaohua Li93296722010-10-20 11:07:03 +08008#include <linux/cpu.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03009
Glauber Costac048fdf2008-03-03 14:12:54 -030010#include <asm/tlbflush.h>
Glauber Costac048fdf2008-03-03 14:12:54 -030011#include <asm/mmu_context.h>
Jan Beulich350f8f52009-11-13 11:54:40 +000012#include <asm/cache.h>
Tejun Heo6dd01be2009-01-21 17:26:06 +090013#include <asm/apic.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090014#include <asm/uv/uv.h>
Alex Shi3df32122012-06-28 09:02:20 +080015#include <linux/debugfs.h>
Glauber Costa5af55732008-03-25 13:28:56 -030016
Glauber Costac048fdf2008-03-03 14:12:54 -030017/*
18 * Smarter SMP flushing macros.
19 * c/o Linus Torvalds.
20 *
21 * These mean you can really definitely utterly forget about
22 * writing to user space from interrupts. (Its not allowed anyway).
23 *
24 * Optimizations Manfred Spraul <manfred@colorfullife.com>
25 *
26 * More scalable flush, from Andi Kleen
27 *
Alex Shi52aec332012-06-28 09:02:23 +080028 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
Glauber Costac048fdf2008-03-03 14:12:54 -030029 */
30
Alex Shi52aec332012-06-28 09:02:23 +080031struct flush_tlb_info {
32 struct mm_struct *flush_mm;
33 unsigned long flush_start;
34 unsigned long flush_end;
35};
Shaohua Li93296722010-10-20 11:07:03 +080036
Glauber Costac048fdf2008-03-03 14:12:54 -030037/*
38 * We cannot call mmdrop() because we are in interrupt context,
39 * instead update mm->cpu_vm_mask.
40 */
41void leave_mm(int cpu)
42{
Linus Torvalds02171b42012-05-23 11:06:59 -070043 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Alex Shic6ae41e2012-05-11 15:35:27 +080044 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
Glauber Costac048fdf2008-03-03 14:12:54 -030045 BUG();
Suresh Siddhaa6fca402012-03-22 17:01:25 -070046 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
47 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
48 load_cr3(swapper_pg_dir);
Dave Hansen7c7f1542014-08-07 10:58:41 -070049 /*
50 * This gets called in the idle path where RCU
51 * functions differently. Tracing normally
52 * uses RCU, so we have to call the tracepoint
53 * specially here.
54 */
55 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
Suresh Siddhaa6fca402012-03-22 17:01:25 -070056 }
Glauber Costac048fdf2008-03-03 14:12:54 -030057}
58EXPORT_SYMBOL_GPL(leave_mm);
59
60/*
Glauber Costac048fdf2008-03-03 14:12:54 -030061 * The flush IPI assumes that a thread switch happens in this order:
62 * [cpu0: the cpu that switches]
63 * 1) switch_mm() either 1a) or 1b)
64 * 1a) thread switch to a different mm
Alex Shi52aec332012-06-28 09:02:23 +080065 * 1a1) set cpu_tlbstate to TLBSTATE_OK
66 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
67 * if cpu0 was in lazy tlb mode.
68 * 1a2) update cpu active_mm
Glauber Costac048fdf2008-03-03 14:12:54 -030069 * Now cpu0 accepts tlb flushes for the new mm.
Alex Shi52aec332012-06-28 09:02:23 +080070 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
Glauber Costac048fdf2008-03-03 14:12:54 -030071 * Now the other cpus will send tlb flush ipis.
72 * 1a4) change cr3.
Alex Shi52aec332012-06-28 09:02:23 +080073 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
74 * Stop ipi delivery for the old mm. This is not synchronized with
75 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
76 * mm, and in the worst case we perform a superfluous tlb flush.
Glauber Costac048fdf2008-03-03 14:12:54 -030077 * 1b) thread switch without mm change
Alex Shi52aec332012-06-28 09:02:23 +080078 * cpu active_mm is correct, cpu0 already handles flush ipis.
79 * 1b1) set cpu_tlbstate to TLBSTATE_OK
Glauber Costac048fdf2008-03-03 14:12:54 -030080 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
81 * Atomically set the bit [other cpus will start sending flush ipis],
82 * and test the bit.
83 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
84 * 2) switch %%esp, ie current
85 *
86 * The interrupt must handle 2 special cases:
87 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
88 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
89 * runs in kernel space, the cpu could load tlb entries for user space
90 * pages.
91 *
Alex Shi52aec332012-06-28 09:02:23 +080092 * The good news is that cpu_tlbstate is local to each cpu, no
Glauber Costac048fdf2008-03-03 14:12:54 -030093 * write/read ordering problems.
94 */
95
96/*
Alex Shi52aec332012-06-28 09:02:23 +080097 * TLB flush funcation:
Glauber Costac048fdf2008-03-03 14:12:54 -030098 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
99 * 2) Leave the mm if we are in the lazy tlb mode.
Glauber Costac048fdf2008-03-03 14:12:54 -0300100 */
Alex Shi52aec332012-06-28 09:02:23 +0800101static void flush_tlb_func(void *info)
Glauber Costac048fdf2008-03-03 14:12:54 -0300102{
Alex Shi52aec332012-06-28 09:02:23 +0800103 struct flush_tlb_info *f = info;
Glauber Costac048fdf2008-03-03 14:12:54 -0300104
Tomoki Sekiyamafd0f5862012-09-26 11:11:28 +0900105 inc_irq_stat(irq_tlb_count);
106
Alex Shi52aec332012-06-28 09:02:23 +0800107 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
108 return;
Dave Hansena23421f2014-07-31 08:40:58 -0700109 if (!f->flush_end)
110 f->flush_end = f->flush_start + PAGE_SIZE;
Glauber Costac048fdf2008-03-03 14:12:54 -0300111
Mel Gormanec659932014-01-21 14:33:16 -0800112 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
Alex Shi52aec332012-06-28 09:02:23 +0800113 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
Dave Hansend17d8f92014-07-31 08:40:59 -0700114 if (f->flush_end == TLB_FLUSH_ALL) {
Alex Shi52aec332012-06-28 09:02:23 +0800115 local_flush_tlb();
Dave Hansend17d8f92014-07-31 08:40:59 -0700116 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
117 } else {
Alex Shi52aec332012-06-28 09:02:23 +0800118 unsigned long addr;
Dave Hansend17d8f92014-07-31 08:40:59 -0700119 unsigned long nr_pages =
Dave Hansenbbc03772015-07-20 16:01:53 -0700120 (f->flush_end - f->flush_start) / PAGE_SIZE;
Alex Shi52aec332012-06-28 09:02:23 +0800121 addr = f->flush_start;
122 while (addr < f->flush_end) {
123 __flush_tlb_single(addr);
124 addr += PAGE_SIZE;
Alex Shie7b52ff2012-06-28 09:02:17 +0800125 }
Dave Hansend17d8f92014-07-31 08:40:59 -0700126 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
Alex Shi52aec332012-06-28 09:02:23 +0800127 }
128 } else
129 leave_mm(smp_processor_id());
Glauber Costac048fdf2008-03-03 14:12:54 -0300130
Glauber Costac048fdf2008-03-03 14:12:54 -0300131}
132
Rusty Russell4595f962009-01-10 21:58:09 -0800133void native_flush_tlb_others(const struct cpumask *cpumask,
Alex Shie7b52ff2012-06-28 09:02:17 +0800134 struct mm_struct *mm, unsigned long start,
135 unsigned long end)
Rusty Russell4595f962009-01-10 21:58:09 -0800136{
Alex Shi52aec332012-06-28 09:02:23 +0800137 struct flush_tlb_info info;
138 info.flush_mm = mm;
139 info.flush_start = start;
140 info.flush_end = end;
141
Mel Gormanec659932014-01-21 14:33:16 -0800142 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
Mel Gorman5b742832015-09-04 15:47:29 -0700143 trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start);
Rusty Russell4595f962009-01-10 21:58:09 -0800144 if (is_uv_system()) {
Tejun Heobdbcdd42009-01-21 17:26:06 +0900145 unsigned int cpu;
Rusty Russell4595f962009-01-10 21:58:09 -0800146
Xiao Guangrong25542c62011-03-15 09:57:37 +0800147 cpu = smp_processor_id();
Alex Shie7b52ff2012-06-28 09:02:17 +0800148 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
Tejun Heobdbcdd42009-01-21 17:26:06 +0900149 if (cpumask)
Alex Shi52aec332012-06-28 09:02:23 +0800150 smp_call_function_many(cpumask, flush_tlb_func,
151 &info, 1);
Mike Travis0e219902009-01-10 21:58:10 -0800152 return;
Rusty Russell4595f962009-01-10 21:58:09 -0800153 }
Alex Shi52aec332012-06-28 09:02:23 +0800154 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
Rusty Russell4595f962009-01-10 21:58:09 -0800155}
156
Glauber Costac048fdf2008-03-03 14:12:54 -0300157void flush_tlb_current_task(void)
158{
159 struct mm_struct *mm = current->mm;
Glauber Costac048fdf2008-03-03 14:12:54 -0300160
161 preempt_disable();
Glauber Costac048fdf2008-03-03 14:12:54 -0300162
Mel Gormanec659932014-01-21 14:33:16 -0800163 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
Andy Lutomirski71b3c122016-01-06 12:21:01 -0800164
165 /* This is an implicit full barrier that synchronizes with switch_mm. */
Glauber Costac048fdf2008-03-03 14:12:54 -0300166 local_flush_tlb();
Andy Lutomirski71b3c122016-01-06 12:21:01 -0800167
Dave Hansend17d8f92014-07-31 08:40:59 -0700168 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
Rusty Russell78f1c4d2009-09-24 09:34:51 -0600169 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Alex Shie7b52ff2012-06-28 09:02:17 +0800170 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300171 preempt_enable();
172}
173
Dave Hansena5102472014-07-31 08:41:03 -0700174/*
175 * See Documentation/x86/tlb.txt for details. We choose 33
176 * because it is large enough to cover the vast majority (at
177 * least 95%) of allocations, and is small enough that we are
178 * confident it will not cause too much overhead. Each single
179 * flush is about 100 ns, so this caps the maximum overhead at
180 * _about_ 3,000 ns.
181 *
182 * This is in units of pages.
183 */
Jeremiah Mahler86426852014-08-09 00:38:33 -0700184static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
Dave Hansene9f4e0a2014-07-31 08:40:55 -0700185
Alex Shi611ae8e2012-06-28 09:02:22 +0800186void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
187 unsigned long end, unsigned long vmflag)
188{
189 unsigned long addr;
Dave Hansen9dfa6de2014-07-31 08:40:56 -0700190 /* do a global flush by default */
191 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
Alex Shi611ae8e2012-06-28 09:02:22 +0800192
193 preempt_disable();
Andy Lutomirski71b3c122016-01-06 12:21:01 -0800194 if (current->active_mm != mm) {
195 /* Synchronize with switch_mm. */
196 smp_mb();
197
Dave Hansen4995ab92014-07-31 08:40:54 -0700198 goto out;
Andy Lutomirski71b3c122016-01-06 12:21:01 -0800199 }
Alex Shi611ae8e2012-06-28 09:02:22 +0800200
201 if (!current->mm) {
202 leave_mm(smp_processor_id());
Andy Lutomirski71b3c122016-01-06 12:21:01 -0800203
204 /* Synchronize with switch_mm. */
205 smp_mb();
206
Dave Hansen4995ab92014-07-31 08:40:54 -0700207 goto out;
Alex Shi611ae8e2012-06-28 09:02:22 +0800208 }
209
Dave Hansen9dfa6de2014-07-31 08:40:56 -0700210 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
211 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
Alex Shi611ae8e2012-06-28 09:02:22 +0800212
Andy Lutomirski71b3c122016-01-06 12:21:01 -0800213 /*
214 * Both branches below are implicit full barriers (MOV to CR or
215 * INVLPG) that synchronize with switch_mm.
216 */
Dave Hansen9dfa6de2014-07-31 08:40:56 -0700217 if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
218 base_pages_to_flush = TLB_FLUSH_ALL;
Mel Gormanec659932014-01-21 14:33:16 -0800219 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
Alex Shi611ae8e2012-06-28 09:02:22 +0800220 local_flush_tlb();
Dave Hansen9824cf92013-09-11 14:20:23 -0700221 } else {
Alex Shi611ae8e2012-06-28 09:02:22 +0800222 /* flush range by one by one 'invlpg' */
Dave Hansen9824cf92013-09-11 14:20:23 -0700223 for (addr = start; addr < end; addr += PAGE_SIZE) {
Mel Gormanec659932014-01-21 14:33:16 -0800224 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
Alex Shi611ae8e2012-06-28 09:02:22 +0800225 __flush_tlb_single(addr);
Dave Hansen9824cf92013-09-11 14:20:23 -0700226 }
Alex Shie7b52ff2012-06-28 09:02:17 +0800227 }
Dave Hansend17d8f92014-07-31 08:40:59 -0700228 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
Dave Hansen4995ab92014-07-31 08:40:54 -0700229out:
Dave Hansen9dfa6de2014-07-31 08:40:56 -0700230 if (base_pages_to_flush == TLB_FLUSH_ALL) {
Dave Hansen4995ab92014-07-31 08:40:54 -0700231 start = 0UL;
232 end = TLB_FLUSH_ALL;
233 }
Alex Shie7b52ff2012-06-28 09:02:17 +0800234 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Dave Hansen4995ab92014-07-31 08:40:54 -0700235 flush_tlb_others(mm_cpumask(mm), mm, start, end);
Alex Shie7b52ff2012-06-28 09:02:17 +0800236 preempt_enable();
237}
238
Alex Shie7b52ff2012-06-28 09:02:17 +0800239void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
Glauber Costac048fdf2008-03-03 14:12:54 -0300240{
241 struct mm_struct *mm = vma->vm_mm;
Glauber Costac048fdf2008-03-03 14:12:54 -0300242
243 preempt_disable();
Glauber Costac048fdf2008-03-03 14:12:54 -0300244
245 if (current->active_mm == mm) {
Andy Lutomirski71b3c122016-01-06 12:21:01 -0800246 if (current->mm) {
247 /*
248 * Implicit full barrier (INVLPG) that synchronizes
249 * with switch_mm.
250 */
Alex Shie7b52ff2012-06-28 09:02:17 +0800251 __flush_tlb_one(start);
Andy Lutomirski71b3c122016-01-06 12:21:01 -0800252 } else {
Glauber Costac048fdf2008-03-03 14:12:54 -0300253 leave_mm(smp_processor_id());
Andy Lutomirski71b3c122016-01-06 12:21:01 -0800254
255 /* Synchronize with switch_mm. */
256 smp_mb();
257 }
Glauber Costac048fdf2008-03-03 14:12:54 -0300258 }
259
Rusty Russell78f1c4d2009-09-24 09:34:51 -0600260 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Alex Shie7b52ff2012-06-28 09:02:17 +0800261 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300262
263 preempt_enable();
264}
265
266static void do_flush_tlb_all(void *info)
267{
Mel Gormanec659932014-01-21 14:33:16 -0800268 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
Glauber Costac048fdf2008-03-03 14:12:54 -0300269 __flush_tlb_all();
Alex Shic6ae41e2012-05-11 15:35:27 +0800270 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
Borislav Petkov3f8afb72010-07-21 14:47:05 +0200271 leave_mm(smp_processor_id());
Glauber Costac048fdf2008-03-03 14:12:54 -0300272}
273
274void flush_tlb_all(void)
275{
Mel Gormanec659932014-01-21 14:33:16 -0800276 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200277 on_each_cpu(do_flush_tlb_all, NULL, 1);
Glauber Costac048fdf2008-03-03 14:12:54 -0300278}
Alex Shi3df32122012-06-28 09:02:20 +0800279
Alex Shieffee4b2012-06-28 09:02:24 +0800280static void do_kernel_range_flush(void *info)
281{
282 struct flush_tlb_info *f = info;
283 unsigned long addr;
284
285 /* flush range by one by one 'invlpg' */
Dave Hansen6df46862013-09-11 14:20:24 -0700286 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
Alex Shieffee4b2012-06-28 09:02:24 +0800287 __flush_tlb_single(addr);
288}
289
290void flush_tlb_kernel_range(unsigned long start, unsigned long end)
291{
Alex Shieffee4b2012-06-28 09:02:24 +0800292
293 /* Balance as user space task's flush, a bit conservative */
Dave Hansene9f4e0a2014-07-31 08:40:55 -0700294 if (end == TLB_FLUSH_ALL ||
295 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
Alex Shieffee4b2012-06-28 09:02:24 +0800296 on_each_cpu(do_flush_tlb_all, NULL, 1);
Dave Hansene9f4e0a2014-07-31 08:40:55 -0700297 } else {
298 struct flush_tlb_info info;
Alex Shieffee4b2012-06-28 09:02:24 +0800299 info.flush_start = start;
300 info.flush_end = end;
301 on_each_cpu(do_kernel_range_flush, &info, 1);
302 }
303}
Dave Hansen2d040a12014-07-31 08:41:01 -0700304
305static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
306 size_t count, loff_t *ppos)
307{
308 char buf[32];
309 unsigned int len;
310
311 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
312 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
313}
314
315static ssize_t tlbflush_write_file(struct file *file,
316 const char __user *user_buf, size_t count, loff_t *ppos)
317{
318 char buf[32];
319 ssize_t len;
320 int ceiling;
321
322 len = min(count, sizeof(buf) - 1);
323 if (copy_from_user(buf, user_buf, len))
324 return -EFAULT;
325
326 buf[len] = '\0';
327 if (kstrtoint(buf, 0, &ceiling))
328 return -EINVAL;
329
330 if (ceiling < 0)
331 return -EINVAL;
332
333 tlb_single_page_flush_ceiling = ceiling;
334 return count;
335}
336
337static const struct file_operations fops_tlbflush = {
338 .read = tlbflush_read_file,
339 .write = tlbflush_write_file,
340 .llseek = default_llseek,
341};
342
343static int __init create_tlb_single_page_flush_ceiling(void)
344{
345 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
346 arch_debugfs_dir, NULL, &fops_tlbflush);
347 return 0;
348}
349late_initcall(create_tlb_single_page_flush_ceiling);