Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 1 | #include <linux/init.h> |
| 2 | |
| 3 | #include <linux/mm.h> |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 4 | #include <linux/spinlock.h> |
| 5 | #include <linux/smp.h> |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 6 | #include <linux/interrupt.h> |
Paul Gortmaker | 4b599fe | 2016-07-13 20:18:55 -0400 | [diff] [blame] | 7 | #include <linux/export.h> |
Shaohua Li | 9329672 | 2010-10-20 11:07:03 +0800 | [diff] [blame] | 8 | #include <linux/cpu.h> |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 9 | |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 10 | #include <asm/tlbflush.h> |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 11 | #include <asm/mmu_context.h> |
Jan Beulich | 350f8f5 | 2009-11-13 11:54:40 +0000 | [diff] [blame] | 12 | #include <asm/cache.h> |
Tejun Heo | 6dd01be | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 13 | #include <asm/apic.h> |
Tejun Heo | bdbcdd4 | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 14 | #include <asm/uv/uv.h> |
Alex Shi | 3df3212 | 2012-06-28 09:02:20 +0800 | [diff] [blame] | 15 | #include <linux/debugfs.h> |
Glauber Costa | 5af5573 | 2008-03-25 13:28:56 -0300 | [diff] [blame] | 16 | |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 17 | /* |
| 18 | * Smarter SMP flushing macros. |
| 19 | * c/o Linus Torvalds. |
| 20 | * |
| 21 | * These mean you can really definitely utterly forget about |
| 22 | * writing to user space from interrupts. (Its not allowed anyway). |
| 23 | * |
| 24 | * Optimizations Manfred Spraul <manfred@colorfullife.com> |
| 25 | * |
| 26 | * More scalable flush, from Andi Kleen |
| 27 | * |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 28 | * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 29 | */ |
| 30 | |
Andy Lutomirski | e107488 | 2016-04-26 09:39:07 -0700 | [diff] [blame] | 31 | #ifdef CONFIG_SMP |
| 32 | |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 33 | struct flush_tlb_info { |
| 34 | struct mm_struct *flush_mm; |
| 35 | unsigned long flush_start; |
| 36 | unsigned long flush_end; |
| 37 | }; |
Shaohua Li | 9329672 | 2010-10-20 11:07:03 +0800 | [diff] [blame] | 38 | |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 39 | /* |
| 40 | * We cannot call mmdrop() because we are in interrupt context, |
| 41 | * instead update mm->cpu_vm_mask. |
| 42 | */ |
| 43 | void leave_mm(int cpu) |
| 44 | { |
Linus Torvalds | 02171b4 | 2012-05-23 11:06:59 -0700 | [diff] [blame] | 45 | struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 46 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 47 | BUG(); |
Suresh Siddha | a6fca40 | 2012-03-22 17:01:25 -0700 | [diff] [blame] | 48 | if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { |
| 49 | cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); |
| 50 | load_cr3(swapper_pg_dir); |
Dave Hansen | 7c7f154 | 2014-08-07 10:58:41 -0700 | [diff] [blame] | 51 | /* |
| 52 | * This gets called in the idle path where RCU |
| 53 | * functions differently. Tracing normally |
| 54 | * uses RCU, so we have to call the tracepoint |
| 55 | * specially here. |
| 56 | */ |
| 57 | trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
Suresh Siddha | a6fca40 | 2012-03-22 17:01:25 -0700 | [diff] [blame] | 58 | } |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 59 | } |
| 60 | EXPORT_SYMBOL_GPL(leave_mm); |
| 61 | |
Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 62 | #endif /* CONFIG_SMP */ |
| 63 | |
| 64 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 65 | struct task_struct *tsk) |
| 66 | { |
Andy Lutomirski | 078194f | 2016-04-26 09:39:09 -0700 | [diff] [blame] | 67 | unsigned long flags; |
| 68 | |
| 69 | local_irq_save(flags); |
| 70 | switch_mm_irqs_off(prev, next, tsk); |
| 71 | local_irq_restore(flags); |
| 72 | } |
| 73 | |
| 74 | void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
| 75 | struct task_struct *tsk) |
| 76 | { |
Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 77 | unsigned cpu = smp_processor_id(); |
| 78 | |
| 79 | if (likely(prev != next)) { |
Andy Lutomirski | e37e43a | 2016-08-11 02:35:23 -0700 | [diff] [blame] | 80 | if (IS_ENABLED(CONFIG_VMAP_STACK)) { |
| 81 | /* |
| 82 | * If our current stack is in vmalloc space and isn't |
| 83 | * mapped in the new pgd, we'll double-fault. Forcibly |
| 84 | * map it. |
| 85 | */ |
| 86 | unsigned int stack_pgd_index = pgd_index(current_stack_pointer()); |
| 87 | |
| 88 | pgd_t *pgd = next->pgd + stack_pgd_index; |
| 89 | |
| 90 | if (unlikely(pgd_none(*pgd))) |
| 91 | set_pgd(pgd, init_mm.pgd[stack_pgd_index]); |
| 92 | } |
| 93 | |
Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 94 | #ifdef CONFIG_SMP |
| 95 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
| 96 | this_cpu_write(cpu_tlbstate.active_mm, next); |
| 97 | #endif |
Andy Lutomirski | e37e43a | 2016-08-11 02:35:23 -0700 | [diff] [blame] | 98 | |
Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 99 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
| 100 | |
| 101 | /* |
| 102 | * Re-load page tables. |
| 103 | * |
| 104 | * This logic has an ordering constraint: |
| 105 | * |
| 106 | * CPU 0: Write to a PTE for 'next' |
| 107 | * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI. |
| 108 | * CPU 1: set bit 1 in next's mm_cpumask |
| 109 | * CPU 1: load from the PTE that CPU 0 writes (implicit) |
| 110 | * |
| 111 | * We need to prevent an outcome in which CPU 1 observes |
| 112 | * the new PTE value and CPU 0 observes bit 1 clear in |
| 113 | * mm_cpumask. (If that occurs, then the IPI will never |
| 114 | * be sent, and CPU 0's TLB will contain a stale entry.) |
| 115 | * |
| 116 | * The bad outcome can occur if either CPU's load is |
| 117 | * reordered before that CPU's store, so both CPUs must |
| 118 | * execute full barriers to prevent this from happening. |
| 119 | * |
| 120 | * Thus, switch_mm needs a full barrier between the |
| 121 | * store to mm_cpumask and any operation that could load |
| 122 | * from next->pgd. TLB fills are special and can happen |
| 123 | * due to instruction fetches or for no reason at all, |
| 124 | * and neither LOCK nor MFENCE orders them. |
| 125 | * Fortunately, load_cr3() is serializing and gives the |
| 126 | * ordering guarantee we need. |
| 127 | * |
| 128 | */ |
| 129 | load_cr3(next->pgd); |
| 130 | |
| 131 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
| 132 | |
| 133 | /* Stop flush ipis for the previous mm */ |
| 134 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); |
| 135 | |
| 136 | /* Load per-mm CR4 state */ |
| 137 | load_mm_cr4(next); |
| 138 | |
| 139 | #ifdef CONFIG_MODIFY_LDT_SYSCALL |
| 140 | /* |
| 141 | * Load the LDT, if the LDT is different. |
| 142 | * |
| 143 | * It's possible that prev->context.ldt doesn't match |
| 144 | * the LDT register. This can happen if leave_mm(prev) |
| 145 | * was called and then modify_ldt changed |
| 146 | * prev->context.ldt but suppressed an IPI to this CPU. |
| 147 | * In this case, prev->context.ldt != NULL, because we |
| 148 | * never set context.ldt to NULL while the mm still |
| 149 | * exists. That means that next->context.ldt != |
| 150 | * prev->context.ldt, because mms never share an LDT. |
| 151 | */ |
| 152 | if (unlikely(prev->context.ldt != next->context.ldt)) |
| 153 | load_mm_ldt(next); |
| 154 | #endif |
| 155 | } |
| 156 | #ifdef CONFIG_SMP |
| 157 | else { |
| 158 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
| 159 | BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); |
| 160 | |
| 161 | if (!cpumask_test_cpu(cpu, mm_cpumask(next))) { |
| 162 | /* |
| 163 | * On established mms, the mm_cpumask is only changed |
| 164 | * from irq context, from ptep_clear_flush() while in |
| 165 | * lazy tlb mode, and here. Irqs are blocked during |
| 166 | * schedule, protecting us from simultaneous changes. |
| 167 | */ |
| 168 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
| 169 | |
| 170 | /* |
| 171 | * We were in lazy tlb mode and leave_mm disabled |
| 172 | * tlb flush IPI delivery. We must reload CR3 |
| 173 | * to make sure to use no freed page tables. |
| 174 | * |
| 175 | * As above, load_cr3() is serializing and orders TLB |
| 176 | * fills with respect to the mm_cpumask write. |
| 177 | */ |
| 178 | load_cr3(next->pgd); |
| 179 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
| 180 | load_mm_cr4(next); |
| 181 | load_mm_ldt(next); |
| 182 | } |
| 183 | } |
| 184 | #endif |
| 185 | } |
| 186 | |
| 187 | #ifdef CONFIG_SMP |
| 188 | |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 189 | /* |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 190 | * The flush IPI assumes that a thread switch happens in this order: |
| 191 | * [cpu0: the cpu that switches] |
| 192 | * 1) switch_mm() either 1a) or 1b) |
| 193 | * 1a) thread switch to a different mm |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 194 | * 1a1) set cpu_tlbstate to TLBSTATE_OK |
| 195 | * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm |
| 196 | * if cpu0 was in lazy tlb mode. |
| 197 | * 1a2) update cpu active_mm |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 198 | * Now cpu0 accepts tlb flushes for the new mm. |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 199 | * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 200 | * Now the other cpus will send tlb flush ipis. |
| 201 | * 1a4) change cr3. |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 202 | * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); |
| 203 | * Stop ipi delivery for the old mm. This is not synchronized with |
| 204 | * the other cpus, but flush_tlb_func ignore flush ipis for the wrong |
| 205 | * mm, and in the worst case we perform a superfluous tlb flush. |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 206 | * 1b) thread switch without mm change |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 207 | * cpu active_mm is correct, cpu0 already handles flush ipis. |
| 208 | * 1b1) set cpu_tlbstate to TLBSTATE_OK |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 209 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. |
| 210 | * Atomically set the bit [other cpus will start sending flush ipis], |
| 211 | * and test the bit. |
| 212 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. |
| 213 | * 2) switch %%esp, ie current |
| 214 | * |
| 215 | * The interrupt must handle 2 special cases: |
| 216 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. |
| 217 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only |
| 218 | * runs in kernel space, the cpu could load tlb entries for user space |
| 219 | * pages. |
| 220 | * |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 221 | * The good news is that cpu_tlbstate is local to each cpu, no |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 222 | * write/read ordering problems. |
| 223 | */ |
| 224 | |
| 225 | /* |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 226 | * TLB flush funcation: |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 227 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. |
| 228 | * 2) Leave the mm if we are in the lazy tlb mode. |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 229 | */ |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 230 | static void flush_tlb_func(void *info) |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 231 | { |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 232 | struct flush_tlb_info *f = info; |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 233 | |
Tomoki Sekiyama | fd0f586 | 2012-09-26 11:11:28 +0900 | [diff] [blame] | 234 | inc_irq_stat(irq_tlb_count); |
| 235 | |
Nadav Amit | 858eaaa7 | 2016-04-01 14:31:26 -0700 | [diff] [blame] | 236 | if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 237 | return; |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 238 | |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 239 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 240 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { |
Dave Hansen | d17d8f9 | 2014-07-31 08:40:59 -0700 | [diff] [blame] | 241 | if (f->flush_end == TLB_FLUSH_ALL) { |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 242 | local_flush_tlb(); |
Dave Hansen | d17d8f9 | 2014-07-31 08:40:59 -0700 | [diff] [blame] | 243 | trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); |
| 244 | } else { |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 245 | unsigned long addr; |
Dave Hansen | d17d8f9 | 2014-07-31 08:40:59 -0700 | [diff] [blame] | 246 | unsigned long nr_pages = |
Dave Hansen | bbc0377 | 2015-07-20 16:01:53 -0700 | [diff] [blame] | 247 | (f->flush_end - f->flush_start) / PAGE_SIZE; |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 248 | addr = f->flush_start; |
| 249 | while (addr < f->flush_end) { |
| 250 | __flush_tlb_single(addr); |
| 251 | addr += PAGE_SIZE; |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 252 | } |
Dave Hansen | d17d8f9 | 2014-07-31 08:40:59 -0700 | [diff] [blame] | 253 | trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 254 | } |
| 255 | } else |
| 256 | leave_mm(smp_processor_id()); |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 257 | |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 258 | } |
| 259 | |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 260 | void native_flush_tlb_others(const struct cpumask *cpumask, |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 261 | struct mm_struct *mm, unsigned long start, |
| 262 | unsigned long end) |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 263 | { |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 264 | struct flush_tlb_info info; |
Nadav Amit | 18c9824 | 2016-04-01 14:31:23 -0700 | [diff] [blame] | 265 | |
| 266 | if (end == 0) |
| 267 | end = start + PAGE_SIZE; |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 268 | info.flush_mm = mm; |
| 269 | info.flush_start = start; |
| 270 | info.flush_end = end; |
| 271 | |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 272 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); |
Nadav Amit | 18c9824 | 2016-04-01 14:31:23 -0700 | [diff] [blame] | 273 | if (end == TLB_FLUSH_ALL) |
| 274 | trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL); |
| 275 | else |
| 276 | trace_tlb_flush(TLB_REMOTE_SEND_IPI, |
| 277 | (end - start) >> PAGE_SHIFT); |
| 278 | |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 279 | if (is_uv_system()) { |
Tejun Heo | bdbcdd4 | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 280 | unsigned int cpu; |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 281 | |
Xiao Guangrong | 25542c6 | 2011-03-15 09:57:37 +0800 | [diff] [blame] | 282 | cpu = smp_processor_id(); |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 283 | cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); |
Tejun Heo | bdbcdd4 | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 284 | if (cpumask) |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 285 | smp_call_function_many(cpumask, flush_tlb_func, |
| 286 | &info, 1); |
Mike Travis | 0e21990 | 2009-01-10 21:58:10 -0800 | [diff] [blame] | 287 | return; |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 288 | } |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 289 | smp_call_function_many(cpumask, flush_tlb_func, &info, 1); |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 290 | } |
| 291 | |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 292 | void flush_tlb_current_task(void) |
| 293 | { |
| 294 | struct mm_struct *mm = current->mm; |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 295 | |
| 296 | preempt_disable(); |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 297 | |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 298 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
Andy Lutomirski | 71b3c12 | 2016-01-06 12:21:01 -0800 | [diff] [blame] | 299 | |
| 300 | /* This is an implicit full barrier that synchronizes with switch_mm. */ |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 301 | local_flush_tlb(); |
Andy Lutomirski | 71b3c12 | 2016-01-06 12:21:01 -0800 | [diff] [blame] | 302 | |
Dave Hansen | d17d8f9 | 2014-07-31 08:40:59 -0700 | [diff] [blame] | 303 | trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); |
Rusty Russell | 78f1c4d | 2009-09-24 09:34:51 -0600 | [diff] [blame] | 304 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 305 | flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 306 | preempt_enable(); |
| 307 | } |
| 308 | |
Dave Hansen | a510247 | 2014-07-31 08:41:03 -0700 | [diff] [blame] | 309 | /* |
| 310 | * See Documentation/x86/tlb.txt for details. We choose 33 |
| 311 | * because it is large enough to cover the vast majority (at |
| 312 | * least 95%) of allocations, and is small enough that we are |
| 313 | * confident it will not cause too much overhead. Each single |
| 314 | * flush is about 100 ns, so this caps the maximum overhead at |
| 315 | * _about_ 3,000 ns. |
| 316 | * |
| 317 | * This is in units of pages. |
| 318 | */ |
Jeremiah Mahler | 8642685 | 2014-08-09 00:38:33 -0700 | [diff] [blame] | 319 | static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; |
Dave Hansen | e9f4e0a | 2014-07-31 08:40:55 -0700 | [diff] [blame] | 320 | |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 321 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
| 322 | unsigned long end, unsigned long vmflag) |
| 323 | { |
| 324 | unsigned long addr; |
Dave Hansen | 9dfa6de | 2014-07-31 08:40:56 -0700 | [diff] [blame] | 325 | /* do a global flush by default */ |
| 326 | unsigned long base_pages_to_flush = TLB_FLUSH_ALL; |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 327 | |
| 328 | preempt_disable(); |
Andy Lutomirski | 71b3c12 | 2016-01-06 12:21:01 -0800 | [diff] [blame] | 329 | if (current->active_mm != mm) { |
| 330 | /* Synchronize with switch_mm. */ |
| 331 | smp_mb(); |
| 332 | |
Dave Hansen | 4995ab9 | 2014-07-31 08:40:54 -0700 | [diff] [blame] | 333 | goto out; |
Andy Lutomirski | 71b3c12 | 2016-01-06 12:21:01 -0800 | [diff] [blame] | 334 | } |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 335 | |
| 336 | if (!current->mm) { |
| 337 | leave_mm(smp_processor_id()); |
Andy Lutomirski | 71b3c12 | 2016-01-06 12:21:01 -0800 | [diff] [blame] | 338 | |
| 339 | /* Synchronize with switch_mm. */ |
| 340 | smp_mb(); |
| 341 | |
Dave Hansen | 4995ab9 | 2014-07-31 08:40:54 -0700 | [diff] [blame] | 342 | goto out; |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 343 | } |
| 344 | |
Dave Hansen | 9dfa6de | 2014-07-31 08:40:56 -0700 | [diff] [blame] | 345 | if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) |
| 346 | base_pages_to_flush = (end - start) >> PAGE_SHIFT; |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 347 | |
Andy Lutomirski | 71b3c12 | 2016-01-06 12:21:01 -0800 | [diff] [blame] | 348 | /* |
| 349 | * Both branches below are implicit full barriers (MOV to CR or |
| 350 | * INVLPG) that synchronize with switch_mm. |
| 351 | */ |
Dave Hansen | 9dfa6de | 2014-07-31 08:40:56 -0700 | [diff] [blame] | 352 | if (base_pages_to_flush > tlb_single_page_flush_ceiling) { |
| 353 | base_pages_to_flush = TLB_FLUSH_ALL; |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 354 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 355 | local_flush_tlb(); |
Dave Hansen | 9824cf9 | 2013-09-11 14:20:23 -0700 | [diff] [blame] | 356 | } else { |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 357 | /* flush range by one by one 'invlpg' */ |
Dave Hansen | 9824cf9 | 2013-09-11 14:20:23 -0700 | [diff] [blame] | 358 | for (addr = start; addr < end; addr += PAGE_SIZE) { |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 359 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 360 | __flush_tlb_single(addr); |
Dave Hansen | 9824cf9 | 2013-09-11 14:20:23 -0700 | [diff] [blame] | 361 | } |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 362 | } |
Dave Hansen | d17d8f9 | 2014-07-31 08:40:59 -0700 | [diff] [blame] | 363 | trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); |
Dave Hansen | 4995ab9 | 2014-07-31 08:40:54 -0700 | [diff] [blame] | 364 | out: |
Dave Hansen | 9dfa6de | 2014-07-31 08:40:56 -0700 | [diff] [blame] | 365 | if (base_pages_to_flush == TLB_FLUSH_ALL) { |
Dave Hansen | 4995ab9 | 2014-07-31 08:40:54 -0700 | [diff] [blame] | 366 | start = 0UL; |
| 367 | end = TLB_FLUSH_ALL; |
| 368 | } |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 369 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
Dave Hansen | 4995ab9 | 2014-07-31 08:40:54 -0700 | [diff] [blame] | 370 | flush_tlb_others(mm_cpumask(mm), mm, start, end); |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 371 | preempt_enable(); |
| 372 | } |
| 373 | |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 374 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 375 | { |
| 376 | struct mm_struct *mm = vma->vm_mm; |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 377 | |
| 378 | preempt_disable(); |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 379 | |
| 380 | if (current->active_mm == mm) { |
Andy Lutomirski | 71b3c12 | 2016-01-06 12:21:01 -0800 | [diff] [blame] | 381 | if (current->mm) { |
| 382 | /* |
| 383 | * Implicit full barrier (INVLPG) that synchronizes |
| 384 | * with switch_mm. |
| 385 | */ |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 386 | __flush_tlb_one(start); |
Andy Lutomirski | 71b3c12 | 2016-01-06 12:21:01 -0800 | [diff] [blame] | 387 | } else { |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 388 | leave_mm(smp_processor_id()); |
Andy Lutomirski | 71b3c12 | 2016-01-06 12:21:01 -0800 | [diff] [blame] | 389 | |
| 390 | /* Synchronize with switch_mm. */ |
| 391 | smp_mb(); |
| 392 | } |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 393 | } |
| 394 | |
Rusty Russell | 78f1c4d | 2009-09-24 09:34:51 -0600 | [diff] [blame] | 395 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 396 | flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 397 | |
| 398 | preempt_enable(); |
| 399 | } |
| 400 | |
| 401 | static void do_flush_tlb_all(void *info) |
| 402 | { |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 403 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 404 | __flush_tlb_all(); |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 405 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) |
Borislav Petkov | 3f8afb7 | 2010-07-21 14:47:05 +0200 | [diff] [blame] | 406 | leave_mm(smp_processor_id()); |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 407 | } |
| 408 | |
| 409 | void flush_tlb_all(void) |
| 410 | { |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 411 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); |
Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 412 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 413 | } |
Alex Shi | 3df3212 | 2012-06-28 09:02:20 +0800 | [diff] [blame] | 414 | |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 415 | static void do_kernel_range_flush(void *info) |
| 416 | { |
| 417 | struct flush_tlb_info *f = info; |
| 418 | unsigned long addr; |
| 419 | |
| 420 | /* flush range by one by one 'invlpg' */ |
Dave Hansen | 6df4686 | 2013-09-11 14:20:24 -0700 | [diff] [blame] | 421 | for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 422 | __flush_tlb_single(addr); |
| 423 | } |
| 424 | |
| 425 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 426 | { |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 427 | |
| 428 | /* Balance as user space task's flush, a bit conservative */ |
Dave Hansen | e9f4e0a | 2014-07-31 08:40:55 -0700 | [diff] [blame] | 429 | if (end == TLB_FLUSH_ALL || |
| 430 | (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 431 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
Dave Hansen | e9f4e0a | 2014-07-31 08:40:55 -0700 | [diff] [blame] | 432 | } else { |
| 433 | struct flush_tlb_info info; |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 434 | info.flush_start = start; |
| 435 | info.flush_end = end; |
| 436 | on_each_cpu(do_kernel_range_flush, &info, 1); |
| 437 | } |
| 438 | } |
Dave Hansen | 2d040a1 | 2014-07-31 08:41:01 -0700 | [diff] [blame] | 439 | |
| 440 | static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, |
| 441 | size_t count, loff_t *ppos) |
| 442 | { |
| 443 | char buf[32]; |
| 444 | unsigned int len; |
| 445 | |
| 446 | len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); |
| 447 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); |
| 448 | } |
| 449 | |
| 450 | static ssize_t tlbflush_write_file(struct file *file, |
| 451 | const char __user *user_buf, size_t count, loff_t *ppos) |
| 452 | { |
| 453 | char buf[32]; |
| 454 | ssize_t len; |
| 455 | int ceiling; |
| 456 | |
| 457 | len = min(count, sizeof(buf) - 1); |
| 458 | if (copy_from_user(buf, user_buf, len)) |
| 459 | return -EFAULT; |
| 460 | |
| 461 | buf[len] = '\0'; |
| 462 | if (kstrtoint(buf, 0, &ceiling)) |
| 463 | return -EINVAL; |
| 464 | |
| 465 | if (ceiling < 0) |
| 466 | return -EINVAL; |
| 467 | |
| 468 | tlb_single_page_flush_ceiling = ceiling; |
| 469 | return count; |
| 470 | } |
| 471 | |
| 472 | static const struct file_operations fops_tlbflush = { |
| 473 | .read = tlbflush_read_file, |
| 474 | .write = tlbflush_write_file, |
| 475 | .llseek = default_llseek, |
| 476 | }; |
| 477 | |
| 478 | static int __init create_tlb_single_page_flush_ceiling(void) |
| 479 | { |
| 480 | debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, |
| 481 | arch_debugfs_dir, NULL, &fops_tlbflush); |
| 482 | return 0; |
| 483 | } |
| 484 | late_initcall(create_tlb_single_page_flush_ceiling); |
Andy Lutomirski | e107488 | 2016-04-26 09:39:07 -0700 | [diff] [blame] | 485 | |
| 486 | #endif /* CONFIG_SMP */ |