blob: 0777f042e4002e695b316c99889b594daebc5448 [file] [log] [blame]
Glauber Costac048fdf2008-03-03 14:12:54 -03001#include <linux/init.h>
2
3#include <linux/mm.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03004#include <linux/spinlock.h>
5#include <linux/smp.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03006#include <linux/interrupt.h>
Tejun Heo6dd01be2009-01-21 17:26:06 +09007#include <linux/module.h>
Shaohua Li93296722010-10-20 11:07:03 +08008#include <linux/cpu.h>
Glauber Costac048fdf2008-03-03 14:12:54 -03009
Glauber Costac048fdf2008-03-03 14:12:54 -030010#include <asm/tlbflush.h>
Glauber Costac048fdf2008-03-03 14:12:54 -030011#include <asm/mmu_context.h>
Jan Beulich350f8f52009-11-13 11:54:40 +000012#include <asm/cache.h>
Tejun Heo6dd01be2009-01-21 17:26:06 +090013#include <asm/apic.h>
Tejun Heobdbcdd42009-01-21 17:26:06 +090014#include <asm/uv/uv.h>
Alex Shi3df32122012-06-28 09:02:20 +080015#include <linux/debugfs.h>
Glauber Costa5af55732008-03-25 13:28:56 -030016
Brian Gerst9eb912d2009-01-19 00:38:57 +090017DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
18 = { &init_mm, 0, };
19
Glauber Costac048fdf2008-03-03 14:12:54 -030020/*
21 * Smarter SMP flushing macros.
22 * c/o Linus Torvalds.
23 *
24 * These mean you can really definitely utterly forget about
25 * writing to user space from interrupts. (Its not allowed anyway).
26 *
27 * Optimizations Manfred Spraul <manfred@colorfullife.com>
28 *
29 * More scalable flush, from Andi Kleen
30 *
Alex Shi52aec332012-06-28 09:02:23 +080031 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
Glauber Costac048fdf2008-03-03 14:12:54 -030032 */
33
Alex Shi52aec332012-06-28 09:02:23 +080034struct flush_tlb_info {
35 struct mm_struct *flush_mm;
36 unsigned long flush_start;
37 unsigned long flush_end;
38};
Shaohua Li93296722010-10-20 11:07:03 +080039
Glauber Costac048fdf2008-03-03 14:12:54 -030040/*
41 * We cannot call mmdrop() because we are in interrupt context,
42 * instead update mm->cpu_vm_mask.
43 */
44void leave_mm(int cpu)
45{
Linus Torvalds02171b42012-05-23 11:06:59 -070046 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
Alex Shic6ae41e2012-05-11 15:35:27 +080047 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
Glauber Costac048fdf2008-03-03 14:12:54 -030048 BUG();
Suresh Siddhaa6fca402012-03-22 17:01:25 -070049 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
50 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
51 load_cr3(swapper_pg_dir);
52 }
Glauber Costac048fdf2008-03-03 14:12:54 -030053}
54EXPORT_SYMBOL_GPL(leave_mm);
55
56/*
Glauber Costac048fdf2008-03-03 14:12:54 -030057 * The flush IPI assumes that a thread switch happens in this order:
58 * [cpu0: the cpu that switches]
59 * 1) switch_mm() either 1a) or 1b)
60 * 1a) thread switch to a different mm
Alex Shi52aec332012-06-28 09:02:23 +080061 * 1a1) set cpu_tlbstate to TLBSTATE_OK
62 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
63 * if cpu0 was in lazy tlb mode.
64 * 1a2) update cpu active_mm
Glauber Costac048fdf2008-03-03 14:12:54 -030065 * Now cpu0 accepts tlb flushes for the new mm.
Alex Shi52aec332012-06-28 09:02:23 +080066 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
Glauber Costac048fdf2008-03-03 14:12:54 -030067 * Now the other cpus will send tlb flush ipis.
68 * 1a4) change cr3.
Alex Shi52aec332012-06-28 09:02:23 +080069 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
70 * Stop ipi delivery for the old mm. This is not synchronized with
71 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
72 * mm, and in the worst case we perform a superfluous tlb flush.
Glauber Costac048fdf2008-03-03 14:12:54 -030073 * 1b) thread switch without mm change
Alex Shi52aec332012-06-28 09:02:23 +080074 * cpu active_mm is correct, cpu0 already handles flush ipis.
75 * 1b1) set cpu_tlbstate to TLBSTATE_OK
Glauber Costac048fdf2008-03-03 14:12:54 -030076 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
77 * Atomically set the bit [other cpus will start sending flush ipis],
78 * and test the bit.
79 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
80 * 2) switch %%esp, ie current
81 *
82 * The interrupt must handle 2 special cases:
83 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
84 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
85 * runs in kernel space, the cpu could load tlb entries for user space
86 * pages.
87 *
Alex Shi52aec332012-06-28 09:02:23 +080088 * The good news is that cpu_tlbstate is local to each cpu, no
Glauber Costac048fdf2008-03-03 14:12:54 -030089 * write/read ordering problems.
90 */
91
92/*
Alex Shi52aec332012-06-28 09:02:23 +080093 * TLB flush funcation:
Glauber Costac048fdf2008-03-03 14:12:54 -030094 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
95 * 2) Leave the mm if we are in the lazy tlb mode.
Glauber Costac048fdf2008-03-03 14:12:54 -030096 */
Alex Shi52aec332012-06-28 09:02:23 +080097static void flush_tlb_func(void *info)
Glauber Costac048fdf2008-03-03 14:12:54 -030098{
Alex Shi52aec332012-06-28 09:02:23 +080099 struct flush_tlb_info *f = info;
Glauber Costac048fdf2008-03-03 14:12:54 -0300100
Tomoki Sekiyamafd0f5862012-09-26 11:11:28 +0900101 inc_irq_stat(irq_tlb_count);
102
Alex Shi52aec332012-06-28 09:02:23 +0800103 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
104 return;
Glauber Costac048fdf2008-03-03 14:12:54 -0300105
Alex Shi52aec332012-06-28 09:02:23 +0800106 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
107 if (f->flush_end == TLB_FLUSH_ALL || !cpu_has_invlpg)
108 local_flush_tlb();
109 else if (!f->flush_end)
110 __flush_tlb_single(f->flush_start);
111 else {
112 unsigned long addr;
113 addr = f->flush_start;
114 while (addr < f->flush_end) {
115 __flush_tlb_single(addr);
116 addr += PAGE_SIZE;
Alex Shie7b52ff2012-06-28 09:02:17 +0800117 }
Alex Shi52aec332012-06-28 09:02:23 +0800118 }
119 } else
120 leave_mm(smp_processor_id());
Glauber Costac048fdf2008-03-03 14:12:54 -0300121
Glauber Costac048fdf2008-03-03 14:12:54 -0300122}
123
Rusty Russell4595f962009-01-10 21:58:09 -0800124void native_flush_tlb_others(const struct cpumask *cpumask,
Alex Shie7b52ff2012-06-28 09:02:17 +0800125 struct mm_struct *mm, unsigned long start,
126 unsigned long end)
Rusty Russell4595f962009-01-10 21:58:09 -0800127{
Alex Shi52aec332012-06-28 09:02:23 +0800128 struct flush_tlb_info info;
129 info.flush_mm = mm;
130 info.flush_start = start;
131 info.flush_end = end;
132
Rusty Russell4595f962009-01-10 21:58:09 -0800133 if (is_uv_system()) {
Tejun Heobdbcdd42009-01-21 17:26:06 +0900134 unsigned int cpu;
Rusty Russell4595f962009-01-10 21:58:09 -0800135
Xiao Guangrong25542c62011-03-15 09:57:37 +0800136 cpu = smp_processor_id();
Alex Shie7b52ff2012-06-28 09:02:17 +0800137 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
Tejun Heobdbcdd42009-01-21 17:26:06 +0900138 if (cpumask)
Alex Shi52aec332012-06-28 09:02:23 +0800139 smp_call_function_many(cpumask, flush_tlb_func,
140 &info, 1);
Mike Travis0e219902009-01-10 21:58:10 -0800141 return;
Rusty Russell4595f962009-01-10 21:58:09 -0800142 }
Alex Shi52aec332012-06-28 09:02:23 +0800143 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
Rusty Russell4595f962009-01-10 21:58:09 -0800144}
145
Glauber Costac048fdf2008-03-03 14:12:54 -0300146void flush_tlb_current_task(void)
147{
148 struct mm_struct *mm = current->mm;
Glauber Costac048fdf2008-03-03 14:12:54 -0300149
150 preempt_disable();
Glauber Costac048fdf2008-03-03 14:12:54 -0300151
152 local_flush_tlb();
Rusty Russell78f1c4d2009-09-24 09:34:51 -0600153 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Alex Shie7b52ff2012-06-28 09:02:17 +0800154 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300155 preempt_enable();
156}
157
Alex Shi611ae8e2012-06-28 09:02:22 +0800158/*
159 * It can find out the THP large page, or
160 * HUGETLB page in tlb_flush when THP disabled
161 */
Alex Shid8dfe602012-06-28 09:02:18 +0800162static inline unsigned long has_large_page(struct mm_struct *mm,
163 unsigned long start, unsigned long end)
164{
165 pgd_t *pgd;
166 pud_t *pud;
167 pmd_t *pmd;
168 unsigned long addr = ALIGN(start, HPAGE_SIZE);
169 for (; addr < end; addr += HPAGE_SIZE) {
170 pgd = pgd_offset(mm, addr);
171 if (likely(!pgd_none(*pgd))) {
172 pud = pud_offset(pgd, addr);
173 if (likely(!pud_none(*pud))) {
174 pmd = pmd_offset(pud, addr);
175 if (likely(!pmd_none(*pmd)))
176 if (pmd_large(*pmd))
177 return addr;
178 }
179 }
180 }
181 return 0;
182}
Alex Shie7b52ff2012-06-28 09:02:17 +0800183
Alex Shi611ae8e2012-06-28 09:02:22 +0800184void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
185 unsigned long end, unsigned long vmflag)
186{
187 unsigned long addr;
188 unsigned act_entries, tlb_entries = 0;
189
190 preempt_disable();
191 if (current->active_mm != mm)
192 goto flush_all;
193
194 if (!current->mm) {
195 leave_mm(smp_processor_id());
196 goto flush_all;
197 }
198
199 if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
200 || vmflag == VM_HUGETLB) {
201 local_flush_tlb();
202 goto flush_all;
203 }
204
205 /* In modern CPU, last level tlb used for both data/ins */
206 if (vmflag & VM_EXEC)
207 tlb_entries = tlb_lli_4k[ENTRIES];
208 else
209 tlb_entries = tlb_lld_4k[ENTRIES];
210 /* Assume all of TLB entries was occupied by this task */
211 act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
212
213 /* tlb_flushall_shift is on balance point, details in commit log */
214 if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
215 local_flush_tlb();
216 else {
217 if (has_large_page(mm, start, end)) {
218 local_flush_tlb();
219 goto flush_all;
220 }
221 /* flush range by one by one 'invlpg' */
222 for (addr = start; addr < end; addr += PAGE_SIZE)
223 __flush_tlb_single(addr);
224
225 if (cpumask_any_but(mm_cpumask(mm),
226 smp_processor_id()) < nr_cpu_ids)
227 flush_tlb_others(mm_cpumask(mm), mm, start, end);
228 preempt_enable();
Alex Shie7b52ff2012-06-28 09:02:17 +0800229 return;
230 }
231
Alex Shi611ae8e2012-06-28 09:02:22 +0800232flush_all:
Alex Shie7b52ff2012-06-28 09:02:17 +0800233 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
234 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
235 preempt_enable();
236}
237
Alex Shie7b52ff2012-06-28 09:02:17 +0800238void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
Glauber Costac048fdf2008-03-03 14:12:54 -0300239{
240 struct mm_struct *mm = vma->vm_mm;
Glauber Costac048fdf2008-03-03 14:12:54 -0300241
242 preempt_disable();
Glauber Costac048fdf2008-03-03 14:12:54 -0300243
244 if (current->active_mm == mm) {
245 if (current->mm)
Alex Shie7b52ff2012-06-28 09:02:17 +0800246 __flush_tlb_one(start);
Glauber Costac048fdf2008-03-03 14:12:54 -0300247 else
248 leave_mm(smp_processor_id());
249 }
250
Rusty Russell78f1c4d2009-09-24 09:34:51 -0600251 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
Alex Shie7b52ff2012-06-28 09:02:17 +0800252 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
Glauber Costac048fdf2008-03-03 14:12:54 -0300253
254 preempt_enable();
255}
256
257static void do_flush_tlb_all(void *info)
258{
Glauber Costac048fdf2008-03-03 14:12:54 -0300259 __flush_tlb_all();
Alex Shic6ae41e2012-05-11 15:35:27 +0800260 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
Borislav Petkov3f8afb72010-07-21 14:47:05 +0200261 leave_mm(smp_processor_id());
Glauber Costac048fdf2008-03-03 14:12:54 -0300262}
263
264void flush_tlb_all(void)
265{
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200266 on_each_cpu(do_flush_tlb_all, NULL, 1);
Glauber Costac048fdf2008-03-03 14:12:54 -0300267}
Alex Shi3df32122012-06-28 09:02:20 +0800268
Alex Shieffee4b2012-06-28 09:02:24 +0800269static void do_kernel_range_flush(void *info)
270{
271 struct flush_tlb_info *f = info;
272 unsigned long addr;
273
274 /* flush range by one by one 'invlpg' */
275 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
276 __flush_tlb_single(addr);
277}
278
279void flush_tlb_kernel_range(unsigned long start, unsigned long end)
280{
281 unsigned act_entries;
282 struct flush_tlb_info info;
283
284 /* In modern CPU, last level tlb used for both data/ins */
285 act_entries = tlb_lld_4k[ENTRIES];
286
287 /* Balance as user space task's flush, a bit conservative */
288 if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 ||
289 (end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
290
291 on_each_cpu(do_flush_tlb_all, NULL, 1);
292 else {
293 info.flush_start = start;
294 info.flush_end = end;
295 on_each_cpu(do_kernel_range_flush, &info, 1);
296 }
297}
298
Alex Shi3df32122012-06-28 09:02:20 +0800299#ifdef CONFIG_DEBUG_TLBFLUSH
300static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
301 size_t count, loff_t *ppos)
302{
303 char buf[32];
304 unsigned int len;
305
306 len = sprintf(buf, "%hd\n", tlb_flushall_shift);
307 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
308}
309
310static ssize_t tlbflush_write_file(struct file *file,
311 const char __user *user_buf, size_t count, loff_t *ppos)
312{
313 char buf[32];
314 ssize_t len;
315 s8 shift;
316
317 len = min(count, sizeof(buf) - 1);
318 if (copy_from_user(buf, user_buf, len))
319 return -EFAULT;
320
321 buf[len] = '\0';
322 if (kstrtos8(buf, 0, &shift))
323 return -EINVAL;
324
Jan Beulichd4c9dbc2012-09-07 07:54:52 +0100325 if (shift < -1 || shift >= BITS_PER_LONG)
Alex Shi3df32122012-06-28 09:02:20 +0800326 return -EINVAL;
327
328 tlb_flushall_shift = shift;
329 return count;
330}
331
332static const struct file_operations fops_tlbflush = {
333 .read = tlbflush_read_file,
334 .write = tlbflush_write_file,
335 .llseek = default_llseek,
336};
337
338static int __cpuinit create_tlb_flushall_shift(void)
339{
340 if (cpu_has_invlpg) {
341 debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR,
342 arch_debugfs_dir, NULL, &fops_tlbflush);
343 }
344 return 0;
345}
346late_initcall(create_tlb_flushall_shift);
347#endif