blob: 0e131c9c39f6aaca1ed7f27e830115d5a183ab36 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20 */
21#include <linux/cache.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010025#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/module.h>
29#include <linux/time.h>
30#include <linux/timex.h>
31#include <linux/sched.h>
32#include <linux/cpumask.h>
Rojhalat Ibrahim1e35aab2006-02-20 13:35:27 +000033#include <linux/cpu.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +040034#include <linux/err.h>
Wu Zhangjin8f99a162009-11-20 20:34:33 +080035#include <linux/ftrace.h>
Qais Youseffbde2d72015-12-08 13:20:27 +000036#include <linux/irqdomain.h>
37#include <linux/of.h>
38#include <linux/of_irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Arun Sharma600634972011-07-26 16:09:06 -070040#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <asm/cpu.h>
42#include <asm/processor.h>
Ralf Baechlebdc92d742013-05-21 16:59:19 +020043#include <asm/idle.h>
Ralf Baechle39b8d522008-04-28 17:14:26 +010044#include <asm/r4k-timer.h>
Qais Youseffbde2d72015-12-08 13:20:27 +000045#include <asm/mips-cpc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/mmu_context.h>
Ralf Baechle7bcf7712007-10-11 23:46:09 +010047#include <asm/time.h>
David Howellsb81947c2012-03-28 18:30:02 +010048#include <asm/setup.h>
Paul Burtone060f6e2015-09-25 08:59:38 -070049#include <asm/maar.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Ralf Baechlecafb45b2015-05-12 06:43:04 +020051cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
David Daney2dc2ae32010-07-23 18:41:45 -070052
Linus Torvalds1da177e2005-04-16 15:20:36 -070053int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
David Daney2dc2ae32010-07-23 18:41:45 -070054EXPORT_SYMBOL(__cpu_number_map);
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
David Daney2dc2ae32010-07-23 18:41:45 -070057EXPORT_SYMBOL(__cpu_logical_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Ralf Baechle0ab7aef2007-03-02 20:42:04 +000059/* Number of TCs (or siblings in Intel speak) per CPU core */
60int smp_num_siblings = 1;
61EXPORT_SYMBOL(smp_num_siblings);
62
63/* representing the TCs (or siblings in Intel speak) of each logical CPU */
64cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
65EXPORT_SYMBOL(cpu_sibling_map);
66
Huacai Chenbda45842014-06-26 11:41:26 +080067/* representing the core map of multi-core chips of each logical CPU */
68cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
69EXPORT_SYMBOL(cpu_core_map);
70
Markos Chandrascccf34e2015-07-10 09:29:10 +010071/*
72 * A logcal cpu mask containing only one VPE per core to
73 * reduce the number of IPIs on large MT systems.
74 */
James Hogan640511a2016-07-13 14:12:52 +010075cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
Markos Chandrascccf34e2015-07-10 09:29:10 +010076EXPORT_SYMBOL(cpu_foreign_map);
77
Ralf Baechle0ab7aef2007-03-02 20:42:04 +000078/* representing cpus for which sibling maps can be computed */
79static cpumask_t cpu_sibling_setup_map;
80
Huacai Chenbda45842014-06-26 11:41:26 +080081/* representing cpus for which core maps can be computed */
82static cpumask_t cpu_core_setup_map;
83
Paul Burton76306f42014-02-14 16:30:52 +000084cpumask_t cpu_coherent_mask;
85
Qais Youseffbde2d72015-12-08 13:20:27 +000086#ifdef CONFIG_GENERIC_IRQ_IPI
87static struct irq_desc *call_desc;
88static struct irq_desc *sched_desc;
89#endif
90
Ralf Baechle0ab7aef2007-03-02 20:42:04 +000091static inline void set_cpu_sibling_map(int cpu)
92{
93 int i;
94
Rusty Russell8dd92892015-03-05 10:49:17 +103095 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
Ralf Baechle0ab7aef2007-03-02 20:42:04 +000096
97 if (smp_num_siblings > 1) {
Rusty Russell8dd92892015-03-05 10:49:17 +103098 for_each_cpu(i, &cpu_sibling_setup_map) {
Huacai Chenbda45842014-06-26 11:41:26 +080099 if (cpu_data[cpu].package == cpu_data[i].package &&
100 cpu_data[cpu].core == cpu_data[i].core) {
Rusty Russell8dd92892015-03-05 10:49:17 +1030101 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
102 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
Ralf Baechle0ab7aef2007-03-02 20:42:04 +0000103 }
104 }
105 } else
Rusty Russell8dd92892015-03-05 10:49:17 +1030106 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
Ralf Baechle0ab7aef2007-03-02 20:42:04 +0000107}
108
Huacai Chenbda45842014-06-26 11:41:26 +0800109static inline void set_cpu_core_map(int cpu)
110{
111 int i;
112
Rusty Russell8dd92892015-03-05 10:49:17 +1030113 cpumask_set_cpu(cpu, &cpu_core_setup_map);
Huacai Chenbda45842014-06-26 11:41:26 +0800114
Rusty Russell8dd92892015-03-05 10:49:17 +1030115 for_each_cpu(i, &cpu_core_setup_map) {
Huacai Chenbda45842014-06-26 11:41:26 +0800116 if (cpu_data[cpu].package == cpu_data[i].package) {
Rusty Russell8dd92892015-03-05 10:49:17 +1030117 cpumask_set_cpu(i, &cpu_core_map[cpu]);
118 cpumask_set_cpu(cpu, &cpu_core_map[i]);
Huacai Chenbda45842014-06-26 11:41:26 +0800119 }
120 }
121}
122
Markos Chandrascccf34e2015-07-10 09:29:10 +0100123/*
124 * Calculate a new cpu_foreign_map mask whenever a
125 * new cpu appears or disappears.
126 */
James Hogan826e99b2016-07-13 14:12:45 +0100127void calculate_cpu_foreign_map(void)
Markos Chandrascccf34e2015-07-10 09:29:10 +0100128{
129 int i, k, core_present;
130 cpumask_t temp_foreign_map;
131
132 /* Re-calculate the mask */
James Hogand825c062016-03-04 10:10:51 +0000133 cpumask_clear(&temp_foreign_map);
Markos Chandrascccf34e2015-07-10 09:29:10 +0100134 for_each_online_cpu(i) {
135 core_present = 0;
136 for_each_cpu(k, &temp_foreign_map)
137 if (cpu_data[i].package == cpu_data[k].package &&
138 cpu_data[i].core == cpu_data[k].core)
139 core_present = 1;
140 if (!core_present)
141 cpumask_set_cpu(i, &temp_foreign_map);
142 }
143
James Hogan640511a2016-07-13 14:12:52 +0100144 for_each_online_cpu(i)
145 cpumask_andnot(&cpu_foreign_map[i],
146 &temp_foreign_map, &cpu_sibling_map[i]);
Markos Chandrascccf34e2015-07-10 09:29:10 +0100147}
148
Ralf Baechle87353d82007-11-19 12:23:51 +0000149struct plat_smp_ops *mp_ops;
Sanjay Lal82d45de2012-11-21 18:34:14 -0800150EXPORT_SYMBOL(mp_ops);
Ralf Baechle87353d82007-11-19 12:23:51 +0000151
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000152void register_smp_ops(struct plat_smp_ops *ops)
Ralf Baechle87353d82007-11-19 12:23:51 +0000153{
Thiemo Seufer83738e32008-05-06 11:21:22 +0100154 if (mp_ops)
155 printk(KERN_WARNING "Overriding previously set SMP ops\n");
Ralf Baechle87353d82007-11-19 12:23:51 +0000156
157 mp_ops = ops;
158}
159
Qais Youseffbde2d72015-12-08 13:20:27 +0000160#ifdef CONFIG_GENERIC_IRQ_IPI
161void mips_smp_send_ipi_single(int cpu, unsigned int action)
162{
163 mips_smp_send_ipi_mask(cpumask_of(cpu), action);
164}
165
166void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
167{
168 unsigned long flags;
169 unsigned int core;
170 int cpu;
171
172 local_irq_save(flags);
173
174 switch (action) {
175 case SMP_CALL_FUNCTION:
176 __ipi_send_mask(call_desc, mask);
177 break;
178
179 case SMP_RESCHEDULE_YOURSELF:
180 __ipi_send_mask(sched_desc, mask);
181 break;
182
183 default:
184 BUG();
185 }
186
187 if (mips_cpc_present()) {
188 for_each_cpu(cpu, mask) {
189 core = cpu_data[cpu].core;
190
191 if (core == current_cpu_data.core)
192 continue;
193
194 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
Matt Redfearn4b640132016-09-07 10:45:19 +0100195 mips_cm_lock_other(core, 0);
Qais Youseffbde2d72015-12-08 13:20:27 +0000196 mips_cpc_lock_other(core);
197 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
198 mips_cpc_unlock_other();
Matt Redfearn4b640132016-09-07 10:45:19 +0100199 mips_cm_unlock_other();
Qais Youseffbde2d72015-12-08 13:20:27 +0000200 }
201 }
202 }
203
204 local_irq_restore(flags);
205}
206
207
208static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
209{
210 scheduler_ipi();
211
212 return IRQ_HANDLED;
213}
214
215static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
216{
217 generic_smp_call_function_interrupt();
218
219 return IRQ_HANDLED;
220}
221
222static struct irqaction irq_resched = {
223 .handler = ipi_resched_interrupt,
224 .flags = IRQF_PERCPU,
225 .name = "IPI resched"
226};
227
228static struct irqaction irq_call = {
229 .handler = ipi_call_interrupt,
230 .flags = IRQF_PERCPU,
231 .name = "IPI call"
232};
233
Matt Redfearn7688c532016-09-20 09:47:26 +0100234static void smp_ipi_init_one(unsigned int virq,
Qais Youseffbde2d72015-12-08 13:20:27 +0000235 struct irqaction *action)
236{
237 int ret;
238
239 irq_set_handler(virq, handle_percpu_irq);
240 ret = setup_irq(virq, action);
241 BUG_ON(ret);
242}
243
Matt Redfearn7688c532016-09-20 09:47:26 +0100244static unsigned int call_virq, sched_virq;
245
246int mips_smp_ipi_allocate(const struct cpumask *mask)
Qais Youseffbde2d72015-12-08 13:20:27 +0000247{
Matt Redfearn7688c532016-09-20 09:47:26 +0100248 int virq;
Qais Youseffbde2d72015-12-08 13:20:27 +0000249 struct irq_domain *ipidomain;
250 struct device_node *node;
251
252 node = of_irq_find_parent(of_root);
253 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
254
255 /*
256 * Some platforms have half DT setup. So if we found irq node but
257 * didn't find an ipidomain, try to search for one that is not in the
258 * DT.
259 */
260 if (node && !ipidomain)
261 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
262
Paul Burton578bffc2016-04-04 10:04:52 +0100263 /*
264 * There are systems which only use IPI domains some of the time,
265 * depending upon configuration we don't know until runtime. An
266 * example is Malta where we may compile in support for GIC & the
267 * MT ASE, but run on a system which has multiple VPEs in a single
268 * core and doesn't include a GIC. Until all IPI implementations
269 * have been converted to use IPI domains the best we can do here
270 * is to return & hope some other code sets up the IPIs.
271 */
272 if (!ipidomain)
273 return 0;
Qais Youseffbde2d72015-12-08 13:20:27 +0000274
Matt Redfearn7688c532016-09-20 09:47:26 +0100275 virq = irq_reserve_ipi(ipidomain, mask);
276 BUG_ON(!virq);
277 if (!call_virq)
278 call_virq = virq;
Qais Youseffbde2d72015-12-08 13:20:27 +0000279
Matt Redfearn7688c532016-09-20 09:47:26 +0100280 virq = irq_reserve_ipi(ipidomain, mask);
281 BUG_ON(!virq);
282 if (!sched_virq)
283 sched_virq = virq;
Qais Youseffbde2d72015-12-08 13:20:27 +0000284
285 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
286 int cpu;
287
Matt Redfearn7688c532016-09-20 09:47:26 +0100288 for_each_cpu(cpu, mask) {
Qais Youseffbde2d72015-12-08 13:20:27 +0000289 smp_ipi_init_one(call_virq + cpu, &irq_call);
290 smp_ipi_init_one(sched_virq + cpu, &irq_resched);
291 }
292 } else {
293 smp_ipi_init_one(call_virq, &irq_call);
294 smp_ipi_init_one(sched_virq, &irq_resched);
295 }
296
Matt Redfearn7688c532016-09-20 09:47:26 +0100297 return 0;
298}
299
300int mips_smp_ipi_free(const struct cpumask *mask)
301{
302 struct irq_domain *ipidomain;
303 struct device_node *node;
304
305 node = of_irq_find_parent(of_root);
306 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
307
308 /*
309 * Some platforms have half DT setup. So if we found irq node but
310 * didn't find an ipidomain, try to search for one that is not in the
311 * DT.
312 */
313 if (node && !ipidomain)
314 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
315
316 BUG_ON(!ipidomain);
317
318 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
319 int cpu;
320
321 for_each_cpu(cpu, mask) {
322 remove_irq(call_virq + cpu, &irq_call);
323 remove_irq(sched_virq + cpu, &irq_resched);
324 }
325 }
326 irq_destroy_ipi(call_virq, mask);
327 irq_destroy_ipi(sched_virq, mask);
328 return 0;
329}
330
331
332static int __init mips_smp_ipi_init(void)
333{
334 mips_smp_ipi_allocate(cpu_possible_mask);
335
Qais Youseffbde2d72015-12-08 13:20:27 +0000336 call_desc = irq_to_desc(call_virq);
337 sched_desc = irq_to_desc(sched_virq);
338
339 return 0;
340}
341early_initcall(mips_smp_ipi_init);
342#endif
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344/*
345 * First C code run on the secondary CPUs after being started up by
346 * the master.
347 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000348asmlinkage void start_secondary(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349{
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800350 unsigned int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
352 cpu_probe();
David Daney6650df32012-05-15 00:04:50 -0700353 per_cpu_trap_init(false);
Ralf Baechle7bcf7712007-10-11 23:46:09 +0100354 mips_clockevent_init();
Ralf Baechle87353d82007-11-19 12:23:51 +0000355 mp_ops->init_secondary();
Hemmo Nieminenc7754e72015-01-15 23:01:59 +0200356 cpu_report();
Paul Burtone060f6e2015-09-25 08:59:38 -0700357 maar_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
359 /*
360 * XXX parity protection should be folded in here when it's converted
361 * to an option instead of something based on .cputype
362 */
363
364 calibrate_delay();
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800365 preempt_disable();
366 cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 cpu_data[cpu].udelay_val = loops_per_jiffy;
368
Rusty Russell8dd92892015-03-05 10:49:17 +1030369 cpumask_set_cpu(cpu, &cpu_coherent_mask);
Manfred Spraule545a612008-09-07 16:57:22 +0200370 notify_cpu_starting(cpu);
371
Matt Redfearn8f46cca2016-09-22 17:15:47 +0100372 cpumask_set_cpu(cpu, &cpu_callin_map);
373 synchronise_count_slave(cpu);
374
Yong Zhangb9a09a02012-07-19 09:13:53 +0200375 set_cpu_online(cpu, true);
376
Ralf Baechle0ab7aef2007-03-02 20:42:04 +0000377 set_cpu_sibling_map(cpu);
Huacai Chenbda45842014-06-26 11:41:26 +0800378 set_cpu_core_map(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379
Markos Chandrascccf34e2015-07-10 09:29:10 +0100380 calculate_cpu_foreign_map();
381
Yong Zhangb789ad62012-07-19 09:13:53 +0200382 /*
383 * irq will be enabled in ->smp_finish(), enabling it too early
384 * is dangerous.
385 */
386 WARN_ON_ONCE(!irqs_disabled());
Yong Zhang5309bda2012-07-19 09:13:53 +0200387 mp_ops->smp_finish();
388
Thomas Gleixnerfc6d73d2016-02-26 18:43:40 +0000389 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390}
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392static void stop_this_cpu(void *dummy)
393{
394 /*
James Hogan92696312016-07-13 14:12:46 +0100395 * Remove this CPU:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 */
Markos Chandrascccf34e2015-07-10 09:29:10 +0100397
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030398 set_cpu_online(smp_processor_id(), false);
Markos Chandrascccf34e2015-07-10 09:29:10 +0100399 calculate_cpu_foreign_map();
Andrew Brestickerea925a72015-03-25 10:25:43 -0700400 local_irq_disable();
401 while (1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402}
403
404void smp_send_stop(void)
405{
Jens Axboe8691e5a2008-06-06 11:18:06 +0200406 smp_call_function(stop_this_cpu, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407}
408
409void __init smp_cpus_done(unsigned int max_cpus)
410{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411}
412
413/* called from main before smp_init() */
414void __init smp_prepare_cpus(unsigned int max_cpus)
415{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 init_new_context(current, &init_mm);
417 current_thread_info()->cpu = 0;
Ralf Baechle87353d82007-11-19 12:23:51 +0000418 mp_ops->prepare_cpus(max_cpus);
Ralf Baechle0ab7aef2007-03-02 20:42:04 +0000419 set_cpu_sibling_map(0);
Huacai Chenbda45842014-06-26 11:41:26 +0800420 set_cpu_core_map(0);
Markos Chandrascccf34e2015-07-10 09:29:10 +0100421 calculate_cpu_foreign_map();
Ralf Baechle320e6ab2006-05-22 14:24:04 +0100422#ifndef CONFIG_HOTPLUG_CPU
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030423 init_cpu_present(cpu_possible_mask);
Ralf Baechle320e6ab2006-05-22 14:24:04 +0100424#endif
Paul Burton76306f42014-02-14 16:30:52 +0000425 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426}
427
428/* preload SMP state for boot cpu */
Greg Kroah-Hartman28eb0e42012-12-21 14:04:39 -0800429void smp_prepare_boot_cpu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430{
Rusty Russell4037ac62009-09-24 09:34:47 -0600431 set_cpu_possible(0, true);
432 set_cpu_online(0, true);
Rusty Russell8dd92892015-03-05 10:49:17 +1030433 cpumask_set_cpu(0, &cpu_callin_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434}
435
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000436int __cpu_up(unsigned int cpu, struct task_struct *tidle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437{
Thomas Gleixner360014a2012-04-20 13:05:51 +0000438 mp_ops->boot_secondary(cpu, tidle);
Ralf Baechleb727a602005-02-22 21:18:01 +0000439
440 /*
441 * Trust is futile. We should really have timeouts ...
442 */
Ralf Baechlecafb45b2015-05-12 06:43:04 +0200443 while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
Ralf Baechleb727a602005-02-22 21:18:01 +0000444 udelay(100);
Ralf Baechlecafb45b2015-05-12 06:43:04 +0200445 schedule();
446 }
Ralf Baechleb727a602005-02-22 21:18:01 +0000447
Jayachandran Ccf9bfe52012-08-14 18:56:13 +0530448 synchronise_count_master(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 return 0;
450}
451
452/* Not really SMP stuff ... */
453int setup_profiling_timer(unsigned int multiplier)
454{
455 return 0;
456}
457
458static void flush_tlb_all_ipi(void *info)
459{
460 local_flush_tlb_all();
461}
462
463void flush_tlb_all(void)
464{
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200465 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466}
467
468static void flush_tlb_mm_ipi(void *mm)
469{
470 local_flush_tlb_mm((struct mm_struct *)mm);
471}
472
473/*
Ralf Baechle25969352006-06-22 22:42:32 +0100474 * Special Variant of smp_call_function for use by TLB functions:
475 *
476 * o No return value
477 * o collapses to normal function call on UP kernels
478 * o collapses to normal function call on systems with a single shared
479 * primary cache.
Ralf Baechle25969352006-06-22 22:42:32 +0100480 */
481static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
482{
Jens Axboe8691e5a2008-06-06 11:18:06 +0200483 smp_call_function(func, info, 1);
Ralf Baechle25969352006-06-22 22:42:32 +0100484}
485
486static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
487{
488 preempt_disable();
489
490 smp_on_other_tlbs(func, info);
491 func(info);
492
493 preempt_enable();
494}
495
496/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 * The following tlb flush calls are invoked when old translations are
498 * being torn down, or pte attributes are changing. For single threaded
499 * address spaces, a new context is obtained on the current cpu, and tlb
500 * context on other cpus are invalidated to force a new context allocation
501 * at switch_mm time, should the mm ever be used on other cpus. For
502 * multithreaded address spaces, intercpu interrupts have to be sent.
503 * Another case where intercpu interrupts are required is when the target
504 * mm might be active on another cpu (eg debuggers doing the flushes on
505 * behalf of debugees, kswapd stealing pages from another process etc).
506 * Kanoj 07/00.
507 */
508
509void flush_tlb_mm(struct mm_struct *mm)
510{
511 preempt_disable();
512
513 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
Ralf Baechlec50cade2007-10-04 16:57:08 +0100514 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 } else {
Ralf Baechleb5eb5512007-10-03 19:16:57 +0100516 unsigned int cpu;
517
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030518 for_each_online_cpu(cpu) {
519 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
Ralf Baechleb5eb5512007-10-03 19:16:57 +0100520 cpu_context(cpu, mm) = 0;
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030521 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 }
523 local_flush_tlb_mm(mm);
524
525 preempt_enable();
526}
527
528struct flush_tlb_data {
529 struct vm_area_struct *vma;
530 unsigned long addr1;
531 unsigned long addr2;
532};
533
534static void flush_tlb_range_ipi(void *info)
535{
Ralf Baechlec50cade2007-10-04 16:57:08 +0100536 struct flush_tlb_data *fd = info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
538 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
539}
540
541void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
542{
543 struct mm_struct *mm = vma->vm_mm;
544
545 preempt_disable();
546 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
Ralf Baechle89a8a5a2007-10-04 18:18:52 +0100547 struct flush_tlb_data fd = {
548 .vma = vma,
549 .addr1 = start,
550 .addr2 = end,
551 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
Ralf Baechlec50cade2007-10-04 16:57:08 +0100553 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 } else {
Ralf Baechleb5eb5512007-10-03 19:16:57 +0100555 unsigned int cpu;
James Hogana05c3922016-07-13 14:12:44 +0100556 int exec = vma->vm_flags & VM_EXEC;
Ralf Baechleb5eb5512007-10-03 19:16:57 +0100557
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030558 for_each_online_cpu(cpu) {
James Hogana05c3922016-07-13 14:12:44 +0100559 /*
560 * flush_cache_range() will only fully flush icache if
561 * the VMA is executable, otherwise we must invalidate
562 * ASID without it appearing to has_valid_asid() as if
563 * mm has been completely unused by that CPU.
564 */
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030565 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
James Hogana05c3922016-07-13 14:12:44 +0100566 cpu_context(cpu, mm) = !exec;
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030567 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 }
569 local_flush_tlb_range(vma, start, end);
570 preempt_enable();
571}
572
573static void flush_tlb_kernel_range_ipi(void *info)
574{
Ralf Baechlec50cade2007-10-04 16:57:08 +0100575 struct flush_tlb_data *fd = info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
578}
579
580void flush_tlb_kernel_range(unsigned long start, unsigned long end)
581{
Ralf Baechle89a8a5a2007-10-04 18:18:52 +0100582 struct flush_tlb_data fd = {
583 .addr1 = start,
584 .addr2 = end,
585 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200587 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588}
589
590static void flush_tlb_page_ipi(void *info)
591{
Ralf Baechlec50cade2007-10-04 16:57:08 +0100592 struct flush_tlb_data *fd = info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
594 local_flush_tlb_page(fd->vma, fd->addr1);
595}
596
597void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
598{
599 preempt_disable();
600 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
Ralf Baechle89a8a5a2007-10-04 18:18:52 +0100601 struct flush_tlb_data fd = {
602 .vma = vma,
603 .addr1 = page,
604 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
Ralf Baechlec50cade2007-10-04 16:57:08 +0100606 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 } else {
Ralf Baechleb5eb5512007-10-03 19:16:57 +0100608 unsigned int cpu;
609
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030610 for_each_online_cpu(cpu) {
James Hogana05c3922016-07-13 14:12:44 +0100611 /*
612 * flush_cache_page() only does partial flushes, so
613 * invalidate ASID without it appearing to
614 * has_valid_asid() as if mm has been completely unused
615 * by that CPU.
616 */
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030617 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
James Hogana05c3922016-07-13 14:12:44 +0100618 cpu_context(cpu, vma->vm_mm) = 1;
Rusty Russell0b5f9c02012-03-29 15:38:30 +1030619 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 }
621 local_flush_tlb_page(vma, page);
622 preempt_enable();
623}
624
625static void flush_tlb_one_ipi(void *info)
626{
627 unsigned long vaddr = (unsigned long) info;
628
629 local_flush_tlb_one(vaddr);
630}
631
632void flush_tlb_one(unsigned long vaddr)
633{
Ralf Baechle25969352006-06-22 22:42:32 +0100634 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635}
636
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637EXPORT_SYMBOL(flush_tlb_page);
638EXPORT_SYMBOL(flush_tlb_one);
Ralf Baechle7aa1c8f2012-10-11 18:14:58 +0200639
640#if defined(CONFIG_KEXEC)
641void (*dump_ipi_function_ptr)(void *) = NULL;
642void dump_send_ipi(void (*dump_ipi_callback)(void *))
643{
644 int i;
645 int cpu = smp_processor_id();
646
647 dump_ipi_function_ptr = dump_ipi_callback;
648 smp_mb();
649 for_each_online_cpu(i)
650 if (i != cpu)
651 mp_ops->send_ipi_single(i, SMP_DUMP);
652
653}
654EXPORT_SYMBOL(dump_send_ipi);
655#endif
Paul Burtoncc7964a2014-02-14 09:24:58 +0000656
657#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
658
659static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
660static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
661
662void tick_broadcast(const struct cpumask *mask)
663{
664 atomic_t *count;
665 struct call_single_data *csd;
666 int cpu;
667
668 for_each_cpu(cpu, mask) {
669 count = &per_cpu(tick_broadcast_count, cpu);
670 csd = &per_cpu(tick_broadcast_csd, cpu);
671
672 if (atomic_inc_return(count) == 1)
673 smp_call_function_single_async(cpu, csd);
674 }
675}
676
677static void tick_broadcast_callee(void *info)
678{
679 int cpu = smp_processor_id();
680 tick_receive_broadcast();
681 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
682}
683
684static int __init tick_broadcast_init(void)
685{
686 struct call_single_data *csd;
687 int cpu;
688
689 for (cpu = 0; cpu < NR_CPUS; cpu++) {
690 csd = &per_cpu(tick_broadcast_csd, cpu);
691 csd->func = tick_broadcast_callee;
692 }
693
694 return 0;
695}
696early_initcall(tick_broadcast_init);
697
698#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */