Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 1 | #include <linux/init.h> |
| 2 | |
| 3 | #include <linux/mm.h> |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 4 | #include <linux/spinlock.h> |
| 5 | #include <linux/smp.h> |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 6 | #include <linux/interrupt.h> |
Paul Gortmaker | 4b599fe | 2016-07-13 20:18:55 -0400 | [diff] [blame] | 7 | #include <linux/export.h> |
Shaohua Li | 9329672 | 2010-10-20 11:07:03 +0800 | [diff] [blame] | 8 | #include <linux/cpu.h> |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 9 | |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 10 | #include <asm/tlbflush.h> |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 11 | #include <asm/mmu_context.h> |
Jan Beulich | 350f8f5 | 2009-11-13 11:54:40 +0000 | [diff] [blame] | 12 | #include <asm/cache.h> |
Tejun Heo | 6dd01be | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 13 | #include <asm/apic.h> |
Tejun Heo | bdbcdd4 | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 14 | #include <asm/uv/uv.h> |
Alex Shi | 3df3212 | 2012-06-28 09:02:20 +0800 | [diff] [blame] | 15 | #include <linux/debugfs.h> |
Glauber Costa | 5af5573 | 2008-03-25 13:28:56 -0300 | [diff] [blame] | 16 | |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 17 | /* |
Andy Lutomirski | ce4a4e56 | 2017-05-28 10:00:14 -0700 | [diff] [blame] | 18 | * TLB flushing, formerly SMP-only |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 19 | * c/o Linus Torvalds. |
| 20 | * |
| 21 | * These mean you can really definitely utterly forget about |
| 22 | * writing to user space from interrupts. (Its not allowed anyway). |
| 23 | * |
| 24 | * Optimizations Manfred Spraul <manfred@colorfullife.com> |
| 25 | * |
| 26 | * More scalable flush, from Andi Kleen |
| 27 | * |
Alex Shi | 52aec33 | 2012-06-28 09:02:23 +0800 | [diff] [blame] | 28 | * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 29 | */ |
| 30 | |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 31 | void leave_mm(int cpu) |
| 32 | { |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 33 | struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); |
| 34 | |
| 35 | /* |
| 36 | * It's plausible that we're in lazy TLB mode while our mm is init_mm. |
| 37 | * If so, our callers still expect us to flush the TLB, but there |
| 38 | * aren't any user TLB entries in init_mm to worry about. |
| 39 | * |
| 40 | * This needs to happen before any other sanity checks due to |
| 41 | * intel_idle's shenanigans. |
| 42 | */ |
| 43 | if (loaded_mm == &init_mm) |
| 44 | return; |
| 45 | |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 46 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 47 | BUG(); |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 48 | |
| 49 | switch_mm(NULL, &init_mm, NULL); |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 50 | } |
| 51 | EXPORT_SYMBOL_GPL(leave_mm); |
| 52 | |
Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 53 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 54 | struct task_struct *tsk) |
| 55 | { |
Andy Lutomirski | 078194f | 2016-04-26 09:39:09 -0700 | [diff] [blame] | 56 | unsigned long flags; |
| 57 | |
| 58 | local_irq_save(flags); |
| 59 | switch_mm_irqs_off(prev, next, tsk); |
| 60 | local_irq_restore(flags); |
| 61 | } |
| 62 | |
| 63 | void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
| 64 | struct task_struct *tsk) |
| 65 | { |
Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 66 | unsigned cpu = smp_processor_id(); |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 67 | struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm); |
Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 68 | |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 69 | /* |
| 70 | * NB: The scheduler will call us with prev == next when |
| 71 | * switching from lazy TLB mode to normal mode if active_mm |
| 72 | * isn't changing. When this happens, there is no guarantee |
| 73 | * that CR3 (and hence cpu_tlbstate.loaded_mm) matches next. |
| 74 | * |
| 75 | * NB: leave_mm() calls us with prev == NULL and tsk == NULL. |
| 76 | */ |
Andy Lutomirski | e37e43a | 2016-08-11 02:35:23 -0700 | [diff] [blame] | 77 | |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 78 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
Andy Lutomirski | e37e43a | 2016-08-11 02:35:23 -0700 | [diff] [blame] | 79 | |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 80 | if (real_prev == next) { |
Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 81 | /* |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 82 | * There's nothing to do: we always keep the per-mm control |
| 83 | * regs in sync with cpu_tlbstate.loaded_mm. Just |
| 84 | * sanity-check mm_cpumask. |
Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 85 | */ |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 86 | if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(next)))) |
| 87 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
| 88 | return; |
| 89 | } |
Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 90 | |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 91 | if (IS_ENABLED(CONFIG_VMAP_STACK)) { |
| 92 | /* |
| 93 | * If our current stack is in vmalloc space and isn't |
| 94 | * mapped in the new pgd, we'll double-fault. Forcibly |
| 95 | * map it. |
| 96 | */ |
| 97 | unsigned int stack_pgd_index = pgd_index(current_stack_pointer()); |
Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 98 | |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 99 | pgd_t *pgd = next->pgd + stack_pgd_index; |
Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 100 | |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 101 | if (unlikely(pgd_none(*pgd))) |
| 102 | set_pgd(pgd, init_mm.pgd[stack_pgd_index]); |
| 103 | } |
| 104 | |
| 105 | this_cpu_write(cpu_tlbstate.loaded_mm, next); |
| 106 | |
| 107 | WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next))); |
| 108 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
| 109 | |
| 110 | /* |
| 111 | * Re-load page tables. |
| 112 | * |
| 113 | * This logic has an ordering constraint: |
| 114 | * |
| 115 | * CPU 0: Write to a PTE for 'next' |
| 116 | * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI. |
| 117 | * CPU 1: set bit 1 in next's mm_cpumask |
| 118 | * CPU 1: load from the PTE that CPU 0 writes (implicit) |
| 119 | * |
| 120 | * We need to prevent an outcome in which CPU 1 observes |
| 121 | * the new PTE value and CPU 0 observes bit 1 clear in |
| 122 | * mm_cpumask. (If that occurs, then the IPI will never |
| 123 | * be sent, and CPU 0's TLB will contain a stale entry.) |
| 124 | * |
| 125 | * The bad outcome can occur if either CPU's load is |
| 126 | * reordered before that CPU's store, so both CPUs must |
| 127 | * execute full barriers to prevent this from happening. |
| 128 | * |
| 129 | * Thus, switch_mm needs a full barrier between the |
| 130 | * store to mm_cpumask and any operation that could load |
| 131 | * from next->pgd. TLB fills are special and can happen |
| 132 | * due to instruction fetches or for no reason at all, |
| 133 | * and neither LOCK nor MFENCE orders them. |
| 134 | * Fortunately, load_cr3() is serializing and gives the |
| 135 | * ordering guarantee we need. |
| 136 | */ |
| 137 | load_cr3(next->pgd); |
| 138 | |
| 139 | /* |
| 140 | * This gets called via leave_mm() in the idle path where RCU |
| 141 | * functions differently. Tracing normally uses RCU, so we have to |
| 142 | * call the tracepoint specially here. |
| 143 | */ |
| 144 | trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); |
| 145 | |
| 146 | /* Stop flush ipis for the previous mm */ |
| 147 | WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) && |
| 148 | real_prev != &init_mm); |
| 149 | cpumask_clear_cpu(cpu, mm_cpumask(real_prev)); |
| 150 | |
Andy Lutomirski | 7353425 | 2017-06-20 22:22:08 -0700 | [diff] [blame] | 151 | /* Load per-mm CR4 and LDTR state */ |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 152 | load_mm_cr4(next); |
Andy Lutomirski | 7353425 | 2017-06-20 22:22:08 -0700 | [diff] [blame] | 153 | switch_ldt(real_prev, next); |
Andy Lutomirski | 69c0319 | 2016-04-26 09:39:08 -0700 | [diff] [blame] | 154 | } |
| 155 | |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 156 | static void flush_tlb_func_common(const struct flush_tlb_info *f, |
| 157 | bool local, enum tlb_flush_reason reason) |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 158 | { |
Andy Lutomirski | bc0d5a8 | 2017-06-29 08:53:13 -0700 | [diff] [blame] | 159 | /* This code cannot presently handle being reentered. */ |
| 160 | VM_WARN_ON(!irqs_disabled()); |
| 161 | |
Andy Lutomirski | b3b90e5 | 2017-05-22 15:30:02 -0700 | [diff] [blame] | 162 | if (this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) { |
| 163 | leave_mm(smp_processor_id()); |
| 164 | return; |
| 165 | } |
| 166 | |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 167 | if (f->end == TLB_FLUSH_ALL) { |
Andy Lutomirski | b3b90e5 | 2017-05-22 15:30:02 -0700 | [diff] [blame] | 168 | local_flush_tlb(); |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 169 | if (local) |
| 170 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
| 171 | trace_tlb_flush(reason, TLB_FLUSH_ALL); |
Andy Lutomirski | b3b90e5 | 2017-05-22 15:30:02 -0700 | [diff] [blame] | 172 | } else { |
| 173 | unsigned long addr; |
Andy Lutomirski | be4ffc0 | 2017-05-28 10:00:16 -0700 | [diff] [blame] | 174 | unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT; |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 175 | addr = f->start; |
| 176 | while (addr < f->end) { |
Andy Lutomirski | b3b90e5 | 2017-05-22 15:30:02 -0700 | [diff] [blame] | 177 | __flush_tlb_single(addr); |
| 178 | addr += PAGE_SIZE; |
| 179 | } |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 180 | if (local) |
| 181 | count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages); |
| 182 | trace_tlb_flush(reason, nr_pages); |
Andy Lutomirski | b3b90e5 | 2017-05-22 15:30:02 -0700 | [diff] [blame] | 183 | } |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 184 | } |
| 185 | |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 186 | static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason) |
| 187 | { |
| 188 | const struct flush_tlb_info *f = info; |
| 189 | |
| 190 | flush_tlb_func_common(f, true, reason); |
| 191 | } |
| 192 | |
| 193 | static void flush_tlb_func_remote(void *info) |
| 194 | { |
| 195 | const struct flush_tlb_info *f = info; |
| 196 | |
| 197 | inc_irq_stat(irq_tlb_count); |
| 198 | |
Andy Lutomirski | 3d28ebc | 2017-05-28 10:00:15 -0700 | [diff] [blame] | 199 | if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm)) |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 200 | return; |
| 201 | |
| 202 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
| 203 | flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN); |
| 204 | } |
| 205 | |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 206 | void native_flush_tlb_others(const struct cpumask *cpumask, |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 207 | const struct flush_tlb_info *info) |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 208 | { |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 209 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 210 | if (info->end == TLB_FLUSH_ALL) |
Nadav Amit | 18c9824 | 2016-04-01 14:31:23 -0700 | [diff] [blame] | 211 | trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL); |
| 212 | else |
| 213 | trace_tlb_flush(TLB_REMOTE_SEND_IPI, |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 214 | (info->end - info->start) >> PAGE_SHIFT); |
Nadav Amit | 18c9824 | 2016-04-01 14:31:23 -0700 | [diff] [blame] | 215 | |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 216 | if (is_uv_system()) { |
Tejun Heo | bdbcdd4 | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 217 | unsigned int cpu; |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 218 | |
Xiao Guangrong | 25542c6 | 2011-03-15 09:57:37 +0800 | [diff] [blame] | 219 | cpu = smp_processor_id(); |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 220 | cpumask = uv_flush_tlb_others(cpumask, info); |
Tejun Heo | bdbcdd4 | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 221 | if (cpumask) |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 222 | smp_call_function_many(cpumask, flush_tlb_func_remote, |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 223 | (void *)info, 1); |
Mike Travis | 0e21990 | 2009-01-10 21:58:10 -0800 | [diff] [blame] | 224 | return; |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 225 | } |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 226 | smp_call_function_many(cpumask, flush_tlb_func_remote, |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 227 | (void *)info, 1); |
Rusty Russell | 4595f96 | 2009-01-10 21:58:09 -0800 | [diff] [blame] | 228 | } |
| 229 | |
Dave Hansen | a510247 | 2014-07-31 08:41:03 -0700 | [diff] [blame] | 230 | /* |
| 231 | * See Documentation/x86/tlb.txt for details. We choose 33 |
| 232 | * because it is large enough to cover the vast majority (at |
| 233 | * least 95%) of allocations, and is small enough that we are |
| 234 | * confident it will not cause too much overhead. Each single |
| 235 | * flush is about 100 ns, so this caps the maximum overhead at |
| 236 | * _about_ 3,000 ns. |
| 237 | * |
| 238 | * This is in units of pages. |
| 239 | */ |
Jeremiah Mahler | 8642685 | 2014-08-09 00:38:33 -0700 | [diff] [blame] | 240 | static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; |
Dave Hansen | e9f4e0a | 2014-07-31 08:40:55 -0700 | [diff] [blame] | 241 | |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 242 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
| 243 | unsigned long end, unsigned long vmflag) |
| 244 | { |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 245 | int cpu; |
Alex Shi | 611ae8e | 2012-06-28 09:02:22 +0800 | [diff] [blame] | 246 | |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 247 | struct flush_tlb_info info = { |
| 248 | .mm = mm, |
| 249 | }; |
Andy Lutomirski | ce27374 | 2017-04-22 00:01:21 -0700 | [diff] [blame] | 250 | |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 251 | cpu = get_cpu(); |
Andy Lutomirski | ce27374 | 2017-04-22 00:01:21 -0700 | [diff] [blame] | 252 | |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 253 | /* Synchronize with switch_mm. */ |
| 254 | smp_mb(); |
Andy Lutomirski | 71b3c12 | 2016-01-06 12:21:01 -0800 | [diff] [blame] | 255 | |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 256 | /* Should we flush just the requested range? */ |
| 257 | if ((end != TLB_FLUSH_ALL) && |
| 258 | !(vmflag & VM_HUGETLB) && |
| 259 | ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) { |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 260 | info.start = start; |
| 261 | info.end = end; |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 262 | } else { |
| 263 | info.start = 0UL; |
| 264 | info.end = TLB_FLUSH_ALL; |
Dave Hansen | 4995ab9 | 2014-07-31 08:40:54 -0700 | [diff] [blame] | 265 | } |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 266 | |
Andy Lutomirski | bc0d5a8 | 2017-06-29 08:53:13 -0700 | [diff] [blame] | 267 | if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) { |
| 268 | VM_WARN_ON(irqs_disabled()); |
| 269 | local_irq_disable(); |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 270 | flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN); |
Andy Lutomirski | bc0d5a8 | 2017-06-29 08:53:13 -0700 | [diff] [blame] | 271 | local_irq_enable(); |
| 272 | } |
| 273 | |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 274 | if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 275 | flush_tlb_others(mm_cpumask(mm), &info); |
Andy Lutomirski | 454bbad | 2017-05-28 10:00:12 -0700 | [diff] [blame] | 276 | put_cpu(); |
Alex Shi | e7b52ff | 2012-06-28 09:02:17 +0800 | [diff] [blame] | 277 | } |
| 278 | |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 279 | |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 280 | static void do_flush_tlb_all(void *info) |
| 281 | { |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 282 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 283 | __flush_tlb_all(); |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 284 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) |
Borislav Petkov | 3f8afb7 | 2010-07-21 14:47:05 +0200 | [diff] [blame] | 285 | leave_mm(smp_processor_id()); |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 286 | } |
| 287 | |
| 288 | void flush_tlb_all(void) |
| 289 | { |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 290 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); |
Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 291 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
Glauber Costa | c048fdf | 2008-03-03 14:12:54 -0300 | [diff] [blame] | 292 | } |
Alex Shi | 3df3212 | 2012-06-28 09:02:20 +0800 | [diff] [blame] | 293 | |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 294 | static void do_kernel_range_flush(void *info) |
| 295 | { |
| 296 | struct flush_tlb_info *f = info; |
| 297 | unsigned long addr; |
| 298 | |
| 299 | /* flush range by one by one 'invlpg' */ |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 300 | for (addr = f->start; addr < f->end; addr += PAGE_SIZE) |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 301 | __flush_tlb_single(addr); |
| 302 | } |
| 303 | |
| 304 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 305 | { |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 306 | |
| 307 | /* Balance as user space task's flush, a bit conservative */ |
Dave Hansen | e9f4e0a | 2014-07-31 08:40:55 -0700 | [diff] [blame] | 308 | if (end == TLB_FLUSH_ALL || |
Andy Lutomirski | be4ffc0 | 2017-05-28 10:00:16 -0700 | [diff] [blame] | 309 | (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) { |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 310 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
Dave Hansen | e9f4e0a | 2014-07-31 08:40:55 -0700 | [diff] [blame] | 311 | } else { |
| 312 | struct flush_tlb_info info; |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 313 | info.start = start; |
| 314 | info.end = end; |
Alex Shi | effee4b | 2012-06-28 09:02:24 +0800 | [diff] [blame] | 315 | on_each_cpu(do_kernel_range_flush, &info, 1); |
| 316 | } |
| 317 | } |
Dave Hansen | 2d040a1 | 2014-07-31 08:41:01 -0700 | [diff] [blame] | 318 | |
Andy Lutomirski | e73ad5f | 2017-05-22 15:30:03 -0700 | [diff] [blame] | 319 | void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) |
| 320 | { |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 321 | struct flush_tlb_info info = { |
| 322 | .mm = NULL, |
| 323 | .start = 0UL, |
| 324 | .end = TLB_FLUSH_ALL, |
| 325 | }; |
| 326 | |
Andy Lutomirski | e73ad5f | 2017-05-22 15:30:03 -0700 | [diff] [blame] | 327 | int cpu = get_cpu(); |
| 328 | |
Andy Lutomirski | bc0d5a8 | 2017-06-29 08:53:13 -0700 | [diff] [blame] | 329 | if (cpumask_test_cpu(cpu, &batch->cpumask)) { |
| 330 | VM_WARN_ON(irqs_disabled()); |
| 331 | local_irq_disable(); |
Andy Lutomirski | 3f79e4c | 2017-05-28 10:00:13 -0700 | [diff] [blame] | 332 | flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN); |
Andy Lutomirski | bc0d5a8 | 2017-06-29 08:53:13 -0700 | [diff] [blame] | 333 | local_irq_enable(); |
| 334 | } |
| 335 | |
Andy Lutomirski | e73ad5f | 2017-05-22 15:30:03 -0700 | [diff] [blame] | 336 | if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) |
Andy Lutomirski | a2055ab | 2017-05-28 10:00:10 -0700 | [diff] [blame] | 337 | flush_tlb_others(&batch->cpumask, &info); |
Andy Lutomirski | e73ad5f | 2017-05-22 15:30:03 -0700 | [diff] [blame] | 338 | cpumask_clear(&batch->cpumask); |
| 339 | |
| 340 | put_cpu(); |
| 341 | } |
| 342 | |
Dave Hansen | 2d040a1 | 2014-07-31 08:41:01 -0700 | [diff] [blame] | 343 | static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, |
| 344 | size_t count, loff_t *ppos) |
| 345 | { |
| 346 | char buf[32]; |
| 347 | unsigned int len; |
| 348 | |
| 349 | len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); |
| 350 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); |
| 351 | } |
| 352 | |
| 353 | static ssize_t tlbflush_write_file(struct file *file, |
| 354 | const char __user *user_buf, size_t count, loff_t *ppos) |
| 355 | { |
| 356 | char buf[32]; |
| 357 | ssize_t len; |
| 358 | int ceiling; |
| 359 | |
| 360 | len = min(count, sizeof(buf) - 1); |
| 361 | if (copy_from_user(buf, user_buf, len)) |
| 362 | return -EFAULT; |
| 363 | |
| 364 | buf[len] = '\0'; |
| 365 | if (kstrtoint(buf, 0, &ceiling)) |
| 366 | return -EINVAL; |
| 367 | |
| 368 | if (ceiling < 0) |
| 369 | return -EINVAL; |
| 370 | |
| 371 | tlb_single_page_flush_ceiling = ceiling; |
| 372 | return count; |
| 373 | } |
| 374 | |
| 375 | static const struct file_operations fops_tlbflush = { |
| 376 | .read = tlbflush_read_file, |
| 377 | .write = tlbflush_write_file, |
| 378 | .llseek = default_llseek, |
| 379 | }; |
| 380 | |
| 381 | static int __init create_tlb_single_page_flush_ceiling(void) |
| 382 | { |
| 383 | debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, |
| 384 | arch_debugfs_dir, NULL, &fops_tlbflush); |
| 385 | return 0; |
| 386 | } |
| 387 | late_initcall(create_tlb_single_page_flush_ceiling); |