blob: b6d3274ab4d71cb4068923b03b94ef9c98f3bc62 [file] [log] [blame]
Catalin Marinas08e875c2012-03-05 11:49:30 +00001/*
2 * SMP initialisation and IPI support
3 * Based on arch/arm/kernel/smp.c
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +010020#include <linux/acpi.h>
Catalin Marinas08e875c2012-03-05 11:49:30 +000021#include <linux/delay.h>
22#include <linux/init.h>
23#include <linux/spinlock.h>
24#include <linux/sched.h>
25#include <linux/interrupt.h>
26#include <linux/cache.h>
27#include <linux/profile.h>
28#include <linux/errno.h>
29#include <linux/mm.h>
30#include <linux/err.h>
31#include <linux/cpu.h>
32#include <linux/smp.h>
33#include <linux/seq_file.h>
34#include <linux/irq.h>
35#include <linux/percpu.h>
36#include <linux/clockchips.h>
37#include <linux/completion.h>
38#include <linux/of.h>
Larry Basseleb631bb2014-05-12 16:48:51 +010039#include <linux/irq_work.h>
Catalin Marinas08e875c2012-03-05 11:49:30 +000040
Andre Przywarae039ee42014-11-14 15:54:08 +000041#include <asm/alternative.h>
Catalin Marinas08e875c2012-03-05 11:49:30 +000042#include <asm/atomic.h>
43#include <asm/cacheflush.h>
Mark Rutlanddf857412014-07-16 16:32:44 +010044#include <asm/cpu.h>
Catalin Marinas08e875c2012-03-05 11:49:30 +000045#include <asm/cputype.h>
Mark Rutlandcd1aebf2013-10-24 20:30:15 +010046#include <asm/cpu_ops.h>
Catalin Marinas08e875c2012-03-05 11:49:30 +000047#include <asm/mmu_context.h>
Ganapatrao Kulkarni1a2db302016-04-08 15:50:27 -070048#include <asm/numa.h>
Catalin Marinas08e875c2012-03-05 11:49:30 +000049#include <asm/pgtable.h>
50#include <asm/pgalloc.h>
51#include <asm/processor.h>
Javi Merino4c7aa002012-08-29 09:47:19 +010052#include <asm/smp_plat.h>
Catalin Marinas08e875c2012-03-05 11:49:30 +000053#include <asm/sections.h>
54#include <asm/tlbflush.h>
55#include <asm/ptrace.h>
Jonas Rabenstein377bcff2015-07-29 12:07:57 +010056#include <asm/virt.h>
Lingutla Chandrasekharbe48b072017-09-25 19:07:12 +053057#include <soc/qcom/minidump.h>
Catalin Marinas08e875c2012-03-05 11:49:30 +000058
Nicolas Pitre45ed6952014-07-25 16:05:32 -040059#define CREATE_TRACE_POINTS
60#include <trace/events/ipi.h>
61
Mark Rutland26053a232016-11-03 20:23:11 +000062DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
63EXPORT_PER_CPU_SYMBOL(cpu_number);
64
Catalin Marinas08e875c2012-03-05 11:49:30 +000065/*
66 * as from 2.5, kernels no longer have an init_tasks structure
67 * so we need some other way of telling a new secondary core
68 * where to place its SVC stack
69 */
70struct secondary_data secondary_data;
Suzuki K Poulosebb905272016-02-23 10:31:42 +000071/* Number of CPUs which aren't online, but looping in kernel text. */
72int cpus_stuck_in_kernel;
Catalin Marinas08e875c2012-03-05 11:49:30 +000073
74enum ipi_msg_type {
75 IPI_RESCHEDULE,
76 IPI_CALL_FUNC,
Catalin Marinas08e875c2012-03-05 11:49:30 +000077 IPI_CPU_STOP,
Lorenzo Pieralisi1f850082013-09-04 10:55:17 +010078 IPI_TIMER,
Larry Basseleb631bb2014-05-12 16:48:51 +010079 IPI_IRQ_WORK,
Rohit Vaswanibe186fd2014-06-26 23:35:09 -070080 IPI_WAKEUP,
81 IPI_CPU_BACKTRACE,
Catalin Marinas08e875c2012-03-05 11:49:30 +000082};
83
Suzuki K Pouloseac1ad202016-04-13 14:41:33 +010084#ifdef CONFIG_ARM64_VHE
85
86/* Whether the boot CPU is running in HYP mode or not*/
87static bool boot_cpu_hyp_mode;
88
89static inline void save_boot_cpu_run_el(void)
90{
91 boot_cpu_hyp_mode = is_kernel_in_hyp_mode();
92}
93
94static inline bool is_boot_cpu_in_hyp_mode(void)
95{
96 return boot_cpu_hyp_mode;
97}
98
99/*
100 * Verify that a secondary CPU is running the kernel at the same
101 * EL as that of the boot CPU.
102 */
103void verify_cpu_run_el(void)
104{
105 bool in_el2 = is_kernel_in_hyp_mode();
106 bool boot_cpu_el2 = is_boot_cpu_in_hyp_mode();
107
108 if (in_el2 ^ boot_cpu_el2) {
109 pr_crit("CPU%d: mismatched Exception Level(EL%d) with boot CPU(EL%d)\n",
110 smp_processor_id(),
111 in_el2 ? 2 : 1,
112 boot_cpu_el2 ? 2 : 1);
113 cpu_panic_kernel();
114 }
115}
116
117#else
118static inline void save_boot_cpu_run_el(void) {}
119#endif
120
Suzuki K Poulosebb905272016-02-23 10:31:42 +0000121#ifdef CONFIG_HOTPLUG_CPU
122static int op_cpu_kill(unsigned int cpu);
123#else
124static inline int op_cpu_kill(unsigned int cpu)
125{
126 return -ENOSYS;
127}
128#endif
129
130
Catalin Marinas08e875c2012-03-05 11:49:30 +0000131/*
132 * Boot a secondary CPU, and assign it the specified idle task.
133 * This also gives us the initial stack to use for this CPU.
134 */
Paul Gortmakerb8c64532013-06-18 10:18:31 -0400135static int boot_secondary(unsigned int cpu, struct task_struct *idle)
Catalin Marinas08e875c2012-03-05 11:49:30 +0000136{
Mark Rutland652af892013-10-24 20:30:16 +0100137 if (cpu_ops[cpu]->cpu_boot)
138 return cpu_ops[cpu]->cpu_boot(cpu);
Catalin Marinas08e875c2012-03-05 11:49:30 +0000139
Mark Rutland652af892013-10-24 20:30:16 +0100140 return -EOPNOTSUPP;
Catalin Marinas08e875c2012-03-05 11:49:30 +0000141}
142
143static DECLARE_COMPLETION(cpu_running);
Steve Capper4afc01a2018-12-06 22:50:40 +0000144bool va52mismatch __ro_after_init;
Catalin Marinas08e875c2012-03-05 11:49:30 +0000145
Paul Gortmakerb8c64532013-06-18 10:18:31 -0400146int __cpu_up(unsigned int cpu, struct task_struct *idle)
Catalin Marinas08e875c2012-03-05 11:49:30 +0000147{
148 int ret;
Suzuki K Poulosebb905272016-02-23 10:31:42 +0000149 long status;
Catalin Marinas08e875c2012-03-05 11:49:30 +0000150
151 /*
152 * We need to tell the secondary core where to find its stack and the
153 * page tables.
154 */
Mark Rutlandb51386b2016-11-03 20:23:13 +0000155 secondary_data.task = idle;
Catalin Marinas08e875c2012-03-05 11:49:30 +0000156 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
Suzuki K Poulosebb905272016-02-23 10:31:42 +0000157 update_cpu_boot_status(CPU_MMU_OFF);
Catalin Marinas08e875c2012-03-05 11:49:30 +0000158 __flush_dcache_area(&secondary_data, sizeof(secondary_data));
159
160 /*
161 * Now bring the CPU into our world.
162 */
163 ret = boot_secondary(cpu, idle);
164 if (ret == 0) {
165 /*
166 * CPU was successfully started, wait for it to come online or
167 * time out.
168 */
169 wait_for_completion_timeout(&cpu_running,
170 msecs_to_jiffies(1000));
171
172 if (!cpu_online(cpu)) {
173 pr_crit("CPU%u: failed to come online\n", cpu);
Steve Capper4afc01a2018-12-06 22:50:40 +0000174
175 if (IS_ENABLED(CONFIG_ARM64_52BIT_VA) && va52mismatch)
176 pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
177
Catalin Marinas08e875c2012-03-05 11:49:30 +0000178 ret = -EIO;
179 }
180 } else {
181 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
Suzuki K Poulose42c9c872018-12-10 18:07:33 +0000182 return ret;
Catalin Marinas08e875c2012-03-05 11:49:30 +0000183 }
184
Mark Rutlandb51386b2016-11-03 20:23:13 +0000185 secondary_data.task = NULL;
Catalin Marinas08e875c2012-03-05 11:49:30 +0000186 secondary_data.stack = NULL;
Suzuki K Poulosebb905272016-02-23 10:31:42 +0000187 status = READ_ONCE(secondary_data.status);
188 if (ret && status) {
189
190 if (status == CPU_MMU_OFF)
191 status = READ_ONCE(__early_cpu_boot_status);
192
193 switch (status) {
194 default:
195 pr_err("CPU%u: failed in unknown state : 0x%lx\n",
196 cpu, status);
197 break;
198 case CPU_KILL_ME:
199 if (!op_cpu_kill(cpu)) {
200 pr_crit("CPU%u: died during early boot\n", cpu);
201 break;
202 }
203 /* Fall through */
204 pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
205 case CPU_STUCK_IN_KERNEL:
206 pr_crit("CPU%u: is stuck in kernel\n", cpu);
207 cpus_stuck_in_kernel++;
208 break;
209 case CPU_PANIC_KERNEL:
210 panic("CPU%u detected unsupported configuration\n", cpu);
211 }
212 }
Catalin Marinas08e875c2012-03-05 11:49:30 +0000213
214 return ret;
215}
216
217/*
218 * This is the secondary CPU boot entry. We're using this CPUs
219 * idle thread stack, but a set of temporary page tables.
220 */
Zhizhou Zhang40137ff2018-06-12 17:07:37 +0800221asmlinkage notrace void secondary_start_kernel(void)
Catalin Marinas08e875c2012-03-05 11:49:30 +0000222{
223 struct mm_struct *mm = &init_mm;
Mark Rutland684502e2016-11-03 20:23:10 +0000224 unsigned int cpu;
225
226 cpu = task_cpu(current);
227 set_my_cpu_offset(per_cpu_offset(cpu));
Catalin Marinas08e875c2012-03-05 11:49:30 +0000228
Catalin Marinas08e875c2012-03-05 11:49:30 +0000229 /*
230 * All kernel threads share the same mm context; grab a
231 * reference and switch to it.
232 */
233 atomic_inc(&mm->mm_count);
234 current->active_mm = mm;
Catalin Marinas08e875c2012-03-05 11:49:30 +0000235
Stepan Moskovchenkoa9561452014-12-04 21:44:17 -0800236 pr_debug("CPU%u: Booted secondary processor\n", cpu);
237
Catalin Marinas08e875c2012-03-05 11:49:30 +0000238 /*
239 * TTBR0 is only used for the identity mapping at this stage. Make it
240 * point to zero page to avoid speculatively fetching new entries.
241 */
Mark Rutland9e8e8652016-01-25 11:44:58 +0000242 cpu_uninstall_idmap();
Catalin Marinas08e875c2012-03-05 11:49:30 +0000243
244 preempt_disable();
245 trace_hardirqs_off();
246
Suzuki K. Poulosedbb4e152015-10-19 14:24:50 +0100247 /*
248 * If the system has established the capabilities, make sure
249 * this CPU ticks all of those. If it doesn't, the CPU will
250 * fail to come online.
251 */
Suzuki K Poulosec47a1902016-09-09 14:07:10 +0100252 check_local_cpu_capabilities();
Suzuki K. Poulosedbb4e152015-10-19 14:24:50 +0100253
Mark Rutland652af892013-10-24 20:30:16 +0100254 if (cpu_ops[cpu]->cpu_postboot)
255 cpu_ops[cpu]->cpu_postboot();
Catalin Marinas08e875c2012-03-05 11:49:30 +0000256
257 /*
Mark Rutlanddf857412014-07-16 16:32:44 +0100258 * Log the CPU info before it is marked online and might get read.
259 */
260 cpuinfo_store_cpu();
261
262 /*
Marc Zyngier7ade67b2013-11-04 16:55:22 +0000263 * Enable GIC and timers.
264 */
265 notify_cpu_starting(cpu);
266
David Daneyc18df0a2016-09-20 11:46:35 -0700267 store_cpu_topology(cpu);
Mark Brownf6e763b2014-03-04 07:51:17 +0000268
Marc Zyngier7ade67b2013-11-04 16:55:22 +0000269 /*
Catalin Marinas08e875c2012-03-05 11:49:30 +0000270 * OK, now it's safe to let the boot CPU continue. Wait for
271 * the CPU migration code to notice that the CPU is online
272 * before we continue.
273 */
Suzuki K. Poulose64f17812015-10-19 14:24:38 +0100274 pr_info("CPU%u: Booted secondary processor [%08x]\n",
275 cpu, read_cpuid_id());
Suzuki K Poulosebb905272016-02-23 10:31:42 +0000276 update_cpu_boot_status(CPU_BOOT_SUCCESS);
Catalin Marinas08e875c2012-03-05 11:49:30 +0000277 set_cpu_online(cpu, true);
Will Deaconb3770b32012-11-07 17:00:05 +0000278 complete(&cpu_running);
Catalin Marinas08e875c2012-03-05 11:49:30 +0000279
Catalin Marinas53ae3ac2013-07-19 15:08:15 +0100280 local_irq_enable();
Catalin Marinasb3bf6aa2013-11-21 14:46:17 +0000281 local_async_enable();
Catalin Marinas53ae3ac2013-07-19 15:08:15 +0100282
283 /*
Catalin Marinas08e875c2012-03-05 11:49:30 +0000284 * OK, it's off to the idle thread for us
285 */
Thomas Gleixnerfc6d73d2016-02-26 18:43:40 +0000286 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
Catalin Marinas08e875c2012-03-05 11:49:30 +0000287}
288
Mark Rutland9327e2c2013-10-24 20:30:18 +0100289#ifdef CONFIG_HOTPLUG_CPU
290static int op_cpu_disable(unsigned int cpu)
291{
292 /*
293 * If we don't have a cpu_die method, abort before we reach the point
294 * of no return. CPU0 may not have an cpu_ops, so test for it.
295 */
296 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
297 return -EOPNOTSUPP;
298
299 /*
300 * We may need to abort a hot unplug for some other mechanism-specific
301 * reason.
302 */
303 if (cpu_ops[cpu]->cpu_disable)
304 return cpu_ops[cpu]->cpu_disable(cpu);
305
306 return 0;
307}
308
309/*
310 * __cpu_disable runs on the processor to be shutdown.
311 */
312int __cpu_disable(void)
313{
314 unsigned int cpu = smp_processor_id();
315 int ret;
316
317 ret = op_cpu_disable(cpu);
318 if (ret)
319 return ret;
320
321 /*
322 * Take this CPU offline. Once we clear this, we can't return,
323 * and we must not schedule until we're ready to give up the cpu.
324 */
325 set_cpu_online(cpu, false);
326
327 /*
328 * OK - migrate IRQs away from this CPU
329 */
Yang Yingliang217d4532015-09-24 17:32:14 +0800330 irq_migrate_all_off_this_cpu();
331
Mark Rutland9327e2c2013-10-24 20:30:18 +0100332 return 0;
333}
334
Ashwin Chaugulec814ca02014-05-07 10:18:36 -0400335static int op_cpu_kill(unsigned int cpu)
336{
337 /*
338 * If we have no means of synchronising with the dying CPU, then assume
339 * that it is really dead. We can only wait for an arbitrary length of
340 * time and hope that it's dead, so let's skip the wait and just hope.
341 */
342 if (!cpu_ops[cpu]->cpu_kill)
Mark Rutland6b99c68c2015-04-20 17:55:30 +0100343 return 0;
Ashwin Chaugulec814ca02014-05-07 10:18:36 -0400344
345 return cpu_ops[cpu]->cpu_kill(cpu);
346}
347
Mark Rutland9327e2c2013-10-24 20:30:18 +0100348/*
349 * called on the thread which is asking for a CPU to be shutdown -
350 * waits until shutdown has completed, or it is timed out.
351 */
352void __cpu_die(unsigned int cpu)
353{
Mark Rutland6b99c68c2015-04-20 17:55:30 +0100354 int err;
355
Paul E. McKenney05981272015-05-12 14:50:05 -0700356 if (!cpu_wait_death(cpu, 5)) {
Mark Rutland9327e2c2013-10-24 20:30:18 +0100357 pr_crit("CPU%u: cpu didn't die\n", cpu);
358 return;
359 }
Stepan Moskovchenkoa9561452014-12-04 21:44:17 -0800360 pr_debug("CPU%u: shutdown\n", cpu);
Ashwin Chaugulec814ca02014-05-07 10:18:36 -0400361
362 /*
363 * Now that the dying CPU is beyond the point of no return w.r.t.
364 * in-kernel synchronisation, try to get the firwmare to help us to
365 * verify that it has really left the kernel before we consider
366 * clobbering anything it might still be using.
367 */
Mark Rutland6b99c68c2015-04-20 17:55:30 +0100368 err = op_cpu_kill(cpu);
369 if (err)
370 pr_warn("CPU%d may not have shut down cleanly: %d\n",
371 cpu, err);
Mark Rutland9327e2c2013-10-24 20:30:18 +0100372}
373
374/*
375 * Called from the idle thread for the CPU which has been shutdown.
376 *
377 * Note that we disable IRQs here, but do not re-enable them
378 * before returning to the caller. This is also the behaviour
379 * of the other hotplug-cpu capable cores, so presumably coming
380 * out of idle fixes this.
381 */
382void cpu_die(void)
383{
384 unsigned int cpu = smp_processor_id();
385
386 idle_task_exit();
387
388 local_irq_disable();
389
390 /* Tell __cpu_die() that this CPU is now safe to dispose of */
Paul E. McKenney05981272015-05-12 14:50:05 -0700391 (void)cpu_report_death();
Mark Rutland9327e2c2013-10-24 20:30:18 +0100392
393 /*
394 * Actually shutdown the CPU. This must never fail. The specific hotplug
395 * mechanism must perform all required cache maintenance to ensure that
396 * no dirty lines are lost in the process of shutting down the CPU.
397 */
398 cpu_ops[cpu]->cpu_die(cpu);
399
400 BUG();
401}
402#endif
403
Suzuki K Poulosefce63612016-02-23 10:31:41 +0000404/*
405 * Kill the calling secondary CPU, early in bringup before it is turned
406 * online.
407 */
408void cpu_die_early(void)
409{
410 int cpu = smp_processor_id();
411
412 pr_crit("CPU%d: will not boot\n", cpu);
413
414 /* Mark this CPU absent */
415 set_cpu_present(cpu, 0);
416
417#ifdef CONFIG_HOTPLUG_CPU
Suzuki K Poulosebb905272016-02-23 10:31:42 +0000418 update_cpu_boot_status(CPU_KILL_ME);
Suzuki K Poulosefce63612016-02-23 10:31:41 +0000419 /* Check if we can park ourselves */
420 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
421 cpu_ops[cpu]->cpu_die(cpu);
422#endif
Suzuki K Poulosebb905272016-02-23 10:31:42 +0000423 update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
Suzuki K Poulosefce63612016-02-23 10:31:41 +0000424
425 cpu_park_loop();
426}
427
Jonas Rabenstein377bcff2015-07-29 12:07:57 +0100428static void __init hyp_mode_check(void)
429{
430 if (is_hyp_mode_available())
431 pr_info("CPU: All CPU(s) started at EL2\n");
432 else if (is_hyp_mode_mismatched())
433 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
434 "CPU: CPUs started in inconsistent modes");
435 else
436 pr_info("CPU: All CPU(s) started at EL1\n");
437}
438
Catalin Marinas08e875c2012-03-05 11:49:30 +0000439void __init smp_cpus_done(unsigned int max_cpus)
440{
Will Deacon326b16d2013-08-30 18:06:48 +0100441 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
Suzuki K. Poulose3a755782015-10-19 14:24:39 +0100442 setup_cpu_features();
Jonas Rabenstein377bcff2015-07-29 12:07:57 +0100443 hyp_mode_check();
444 apply_alternatives_all();
Catalin Marinas08e875c2012-03-05 11:49:30 +0000445}
446
447void __init smp_prepare_boot_cpu(void)
448{
Suzuki K Poulose9113c2a2016-07-21 11:12:55 +0100449 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
Catalin Marinasefd9e032016-09-05 18:25:48 +0100450 /*
451 * Initialise the static keys early as they may be enabled by the
452 * cpufeature code.
453 */
454 jump_label_init();
Suzuki K. Poulose4b998ff2015-10-19 14:24:40 +0100455 cpuinfo_store_boot_cpu();
Suzuki K Pouloseac1ad202016-04-13 14:41:33 +0100456 save_boot_cpu_run_el();
Suzuki K Poulosec47a1902016-09-09 14:07:10 +0100457 /*
458 * Run the errata work around checks on the boot CPU, once we have
459 * initialised the cpu feature infrastructure from
460 * cpuinfo_store_boot_cpu() above.
461 */
462 update_cpu_errata_workarounds();
Catalin Marinas08e875c2012-03-05 11:49:30 +0000463}
464
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100465static u64 __init of_get_cpu_mpidr(struct device_node *dn)
466{
467 const __be32 *cell;
468 u64 hwid;
469
470 /*
471 * A cpu node with missing "reg" property is
472 * considered invalid to build a cpu_logical_map
473 * entry.
474 */
475 cell = of_get_property(dn, "reg", NULL);
476 if (!cell) {
477 pr_err("%s: missing reg property\n", dn->full_name);
478 return INVALID_HWID;
479 }
480
481 hwid = of_read_number(cell, of_n_addr_cells(dn));
482 /*
483 * Non affinity bits must be set to 0 in the DT
484 */
485 if (hwid & ~MPIDR_HWID_BITMASK) {
486 pr_err("%s: invalid reg property\n", dn->full_name);
487 return INVALID_HWID;
488 }
489 return hwid;
490}
491
492/*
493 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
494 * entries and check for duplicates. If any is found just ignore the
495 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
496 * matching valid MPIDR values.
497 */
498static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
499{
500 unsigned int i;
501
502 for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
503 if (cpu_logical_map(i) == hwid)
504 return true;
505 return false;
506}
507
Catalin Marinas08e875c2012-03-05 11:49:30 +0000508/*
Lorenzo Pieralisi819a8822015-05-13 14:12:46 +0100509 * Initialize cpu operations for a logical cpu and
510 * set it in the possible mask on success
511 */
512static int __init smp_cpu_setup(int cpu)
513{
514 if (cpu_read_ops(cpu))
515 return -ENODEV;
516
517 if (cpu_ops[cpu]->cpu_init(cpu))
518 return -ENODEV;
519
520 set_cpu_possible(cpu, true);
521
522 return 0;
523}
524
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100525static bool bootcpu_valid __initdata;
526static unsigned int cpu_count = 1;
527
528#ifdef CONFIG_ACPI
529/*
530 * acpi_map_gic_cpu_interface - parse processor MADT entry
531 *
532 * Carry out sanity checks on MADT processor entry and initialize
533 * cpu_logical_map on success
534 */
535static void __init
536acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
537{
538 u64 hwid = processor->arm_mpidr;
539
Hanjun Guof9058922015-07-03 15:29:06 +0800540 if (!(processor->flags & ACPI_MADT_ENABLED)) {
541 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100542 return;
543 }
544
Hanjun Guof9058922015-07-03 15:29:06 +0800545 if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
546 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100547 return;
548 }
549
550 if (is_mpidr_duplicate(cpu_count, hwid)) {
551 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
552 return;
553 }
554
555 /* Check if GICC structure of boot CPU is available in the MADT */
556 if (cpu_logical_map(0) == hwid) {
557 if (bootcpu_valid) {
558 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
559 hwid);
560 return;
561 }
562 bootcpu_valid = true;
Lorenzo Pieralisibaa55672016-10-17 15:18:48 +0100563 early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid));
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100564 return;
565 }
566
567 if (cpu_count >= NR_CPUS)
568 return;
569
570 /* map the logical cpu id to cpu MPIDR */
571 cpu_logical_map(cpu_count) = hwid;
572
Lorenzo Pieralisi5e89c552016-01-26 11:10:38 +0000573 /*
574 * Set-up the ACPI parking protocol cpu entries
575 * while initializing the cpu_logical_map to
576 * avoid parsing MADT entries multiple times for
577 * nothing (ie a valid cpu_logical_map entry should
578 * contain a valid parking protocol data set to
579 * initialize the cpu if the parking protocol is
580 * the only available enable method).
581 */
582 acpi_set_mailbox_entry(cpu_count, processor);
583
Hanjun Guod8b47fc2016-05-24 15:35:44 -0700584 early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count, hwid));
585
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100586 cpu_count++;
587}
588
589static int __init
590acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
591 const unsigned long end)
592{
593 struct acpi_madt_generic_interrupt *processor;
594
595 processor = (struct acpi_madt_generic_interrupt *)header;
Al Stone99e3e3a2015-07-06 17:16:48 -0600596 if (BAD_MADT_GICC_ENTRY(processor, end))
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100597 return -EINVAL;
598
599 acpi_table_print_madt_entry(header);
600
601 acpi_map_gic_cpu_interface(processor);
602
603 return 0;
604}
605#else
606#define acpi_table_parse_madt(...) do { } while (0)
607#endif
Mahesh Sivasubramanian741a8ec2014-09-18 20:33:55 -0600608void (*__smp_cross_call)(const struct cpumask *, unsigned int);
609DEFINE_PER_CPU(bool, pending_ipi);
610
Lorenzo Pieralisi819a8822015-05-13 14:12:46 +0100611/*
Javi Merino4c7aa002012-08-29 09:47:19 +0100612 * Enumerate the possible CPU set from the device tree and build the
613 * cpu logical map array containing MPIDR values related to logical
614 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
Catalin Marinas08e875c2012-03-05 11:49:30 +0000615 */
Jisheng Zhang29b83022015-11-12 20:04:42 +0800616static void __init of_parse_and_init_cpus(void)
Catalin Marinas08e875c2012-03-05 11:49:30 +0000617{
Catalin Marinas08e875c2012-03-05 11:49:30 +0000618 struct device_node *dn = NULL;
Catalin Marinas08e875c2012-03-05 11:49:30 +0000619
620 while ((dn = of_find_node_by_type(dn, "cpu"))) {
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100621 u64 hwid = of_get_cpu_mpidr(dn);
Javi Merino4c7aa002012-08-29 09:47:19 +0100622
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100623 if (hwid == INVALID_HWID)
Javi Merino4c7aa002012-08-29 09:47:19 +0100624 goto next;
Javi Merino4c7aa002012-08-29 09:47:19 +0100625
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100626 if (is_mpidr_duplicate(cpu_count, hwid)) {
627 pr_err("%s: duplicate cpu reg properties in the DT\n",
628 dn->full_name);
Javi Merino4c7aa002012-08-29 09:47:19 +0100629 goto next;
630 }
631
632 /*
Javi Merino4c7aa002012-08-29 09:47:19 +0100633 * The numbering scheme requires that the boot CPU
634 * must be assigned logical id 0. Record it so that
635 * the logical map built from DT is validated and can
636 * be used.
637 */
638 if (hwid == cpu_logical_map(0)) {
639 if (bootcpu_valid) {
640 pr_err("%s: duplicate boot cpu reg property in DT\n",
641 dn->full_name);
642 goto next;
643 }
644
645 bootcpu_valid = true;
Zhen Lei7ba5f602016-09-01 14:55:04 +0800646 early_map_cpu_to_node(0, of_node_to_nid(dn));
Javi Merino4c7aa002012-08-29 09:47:19 +0100647
648 /*
649 * cpu_logical_map has already been
650 * initialized and the boot cpu doesn't need
651 * the enable-method so continue without
652 * incrementing cpu.
653 */
654 continue;
655 }
656
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100657 if (cpu_count >= NR_CPUS)
Catalin Marinas08e875c2012-03-05 11:49:30 +0000658 goto next;
659
Javi Merino4c7aa002012-08-29 09:47:19 +0100660 pr_debug("cpu logical map 0x%llx\n", hwid);
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100661 cpu_logical_map(cpu_count) = hwid;
Ganapatrao Kulkarni1a2db302016-04-08 15:50:27 -0700662
663 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
Catalin Marinas08e875c2012-03-05 11:49:30 +0000664next:
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100665 cpu_count++;
Catalin Marinas08e875c2012-03-05 11:49:30 +0000666 }
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100667}
Catalin Marinas08e875c2012-03-05 11:49:30 +0000668
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100669/*
670 * Enumerate the possible CPU set from the device tree or ACPI and build the
671 * cpu logical map array containing MPIDR values related to logical
672 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
673 */
674void __init smp_init_cpus(void)
675{
676 int i;
677
678 if (acpi_disabled)
679 of_parse_and_init_cpus();
680 else
681 /*
682 * do a walk of MADT to determine how many CPUs
683 * we have including disabled CPUs, and get information
684 * we need for SMP init
685 */
686 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
687 acpi_parse_gic_cpu_interface, 0);
688
Kefeng Wang50ee91b2016-08-09 10:30:49 +0800689 if (cpu_count > nr_cpu_ids)
690 pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n",
691 cpu_count, nr_cpu_ids);
Javi Merino4c7aa002012-08-29 09:47:19 +0100692
693 if (!bootcpu_valid) {
Lorenzo Pieralisi0f078332015-05-13 14:12:47 +0100694 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
Javi Merino4c7aa002012-08-29 09:47:19 +0100695 return;
696 }
697
698 /*
Lorenzo Pieralisi819a8822015-05-13 14:12:46 +0100699 * We need to set the cpu_logical_map entries before enabling
700 * the cpus so that cpu processor description entries (DT cpu nodes
701 * and ACPI MADT entries) can be retrieved by matching the cpu hwid
702 * with entries in cpu_logical_map while initializing the cpus.
703 * If the cpu set-up fails, invalidate the cpu_logical_map entry.
Javi Merino4c7aa002012-08-29 09:47:19 +0100704 */
Kefeng Wang50ee91b2016-08-09 10:30:49 +0800705 for (i = 1; i < nr_cpu_ids; i++) {
Lorenzo Pieralisi819a8822015-05-13 14:12:46 +0100706 if (cpu_logical_map(i) != INVALID_HWID) {
707 if (smp_cpu_setup(i))
708 cpu_logical_map(i) = INVALID_HWID;
709 }
710 }
Catalin Marinas08e875c2012-03-05 11:49:30 +0000711}
712
713void __init smp_prepare_cpus(unsigned int max_cpus)
714{
Mark Rutlandcd1aebf2013-10-24 20:30:15 +0100715 int err;
Suzuki K Poulose44dbcc92016-04-22 12:25:35 +0100716 unsigned int cpu;
David Daneyc18df0a2016-09-20 11:46:35 -0700717 unsigned int this_cpu;
Catalin Marinas08e875c2012-03-05 11:49:30 +0000718
Mark Brownf6e763b2014-03-04 07:51:17 +0000719 init_cpu_topology();
720
David Daneyc18df0a2016-09-20 11:46:35 -0700721 this_cpu = smp_processor_id();
722 store_cpu_topology(this_cpu);
723 numa_store_cpu_info(this_cpu);
Mark Brownf6e763b2014-03-04 07:51:17 +0000724
Catalin Marinas08e875c2012-03-05 11:49:30 +0000725 /*
Suzuki K Poulosee75118a2016-07-21 11:15:27 +0100726 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
727 * secondary CPUs present.
728 */
729 if (max_cpus == 0)
730 return;
731
732 /*
Catalin Marinas08e875c2012-03-05 11:49:30 +0000733 * Initialise the present map (which describes the set of CPUs
734 * actually populated at the present time) and release the
735 * secondaries from the bootloader.
736 */
737 for_each_possible_cpu(cpu) {
Catalin Marinas08e875c2012-03-05 11:49:30 +0000738
Mark Rutland26053a232016-11-03 20:23:11 +0000739 per_cpu(cpu_number, cpu) = cpu;
740
Marc Zyngierd329de32013-01-02 15:24:22 +0000741 if (cpu == smp_processor_id())
Catalin Marinas08e875c2012-03-05 11:49:30 +0000742 continue;
743
Mark Rutlandcd1aebf2013-10-24 20:30:15 +0100744 if (!cpu_ops[cpu])
Marc Zyngierd329de32013-01-02 15:24:22 +0000745 continue;
746
Mark Rutlandcd1aebf2013-10-24 20:30:15 +0100747 err = cpu_ops[cpu]->cpu_prepare(cpu);
Marc Zyngierd329de32013-01-02 15:24:22 +0000748 if (err)
749 continue;
Catalin Marinas08e875c2012-03-05 11:49:30 +0000750
751 set_cpu_present(cpu, true);
David Daneyc18df0a2016-09-20 11:46:35 -0700752 numa_store_cpu_info(cpu);
Catalin Marinas08e875c2012-03-05 11:49:30 +0000753 }
Catalin Marinas08e875c2012-03-05 11:49:30 +0000754}
755
Catalin Marinas08e875c2012-03-05 11:49:30 +0000756void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
757{
Nicolas Pitre45ed6952014-07-25 16:05:32 -0400758 __smp_cross_call = fn;
Catalin Marinas08e875c2012-03-05 11:49:30 +0000759}
760
Nicolas Pitre45ed6952014-07-25 16:05:32 -0400761static const char *ipi_types[NR_IPI] __tracepoint_string = {
762#define S(x,s) [x] = s
Catalin Marinas08e875c2012-03-05 11:49:30 +0000763 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
764 S(IPI_CALL_FUNC, "Function call interrupts"),
Catalin Marinas08e875c2012-03-05 11:49:30 +0000765 S(IPI_CPU_STOP, "CPU stop interrupts"),
Lorenzo Pieralisi1f850082013-09-04 10:55:17 +0100766 S(IPI_TIMER, "Timer broadcast interrupts"),
Larry Basseleb631bb2014-05-12 16:48:51 +0100767 S(IPI_IRQ_WORK, "IRQ work interrupts"),
Lorenzo Pieralisi5e89c552016-01-26 11:10:38 +0000768 S(IPI_WAKEUP, "CPU wake-up interrupts"),
Rohit Vaswanibe186fd2014-06-26 23:35:09 -0700769 S(IPI_CPU_BACKTRACE, "CPU backtrace"),
Catalin Marinas08e875c2012-03-05 11:49:30 +0000770};
771
Nicolas Pitre45ed6952014-07-25 16:05:32 -0400772static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
773{
774 trace_ipi_raise(target, ipi_types[ipinr]);
775 __smp_cross_call(target, ipinr);
776}
777
Pavankumar Kondeti82e90fb2018-05-28 20:32:44 +0530778static void smp_cross_call_common(const struct cpumask *cpumask,
779 unsigned int func)
780{
781 unsigned int cpu;
782
783 for_each_cpu(cpu, cpumask)
784 per_cpu(pending_ipi, cpu) = true;
785
786 smp_cross_call(cpumask, func);
787}
788
Catalin Marinas08e875c2012-03-05 11:49:30 +0000789void show_ipi_list(struct seq_file *p, int prec)
790{
791 unsigned int cpu, i;
792
793 for (i = 0; i < NR_IPI; i++) {
Nicolas Pitre45ed6952014-07-25 16:05:32 -0400794 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
Catalin Marinas08e875c2012-03-05 11:49:30 +0000795 prec >= 4 ? " " : "");
Sudeep KarkadaNagesha67317c22013-11-07 15:25:44 +0000796 for_each_online_cpu(cpu)
Catalin Marinas08e875c2012-03-05 11:49:30 +0000797 seq_printf(p, "%10u ",
798 __get_irq_stat(cpu, ipi_irqs[i]));
799 seq_printf(p, " %s\n", ipi_types[i]);
800 }
801}
802
803u64 smp_irq_stat_cpu(unsigned int cpu)
804{
805 u64 sum = 0;
806 int i;
807
808 for (i = 0; i < NR_IPI; i++)
809 sum += __get_irq_stat(cpu, ipi_irqs[i]);
810
811 return sum;
812}
813
Nicolas Pitre45ed6952014-07-25 16:05:32 -0400814void arch_send_call_function_ipi_mask(const struct cpumask *mask)
815{
Mahesh Sivasubramanian741a8ec2014-09-18 20:33:55 -0600816 smp_cross_call_common(mask, IPI_CALL_FUNC);
Nicolas Pitre45ed6952014-07-25 16:05:32 -0400817}
818
819void arch_send_call_function_single_ipi(int cpu)
820{
Mahesh Sivasubramanian741a8ec2014-09-18 20:33:55 -0600821 smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
Nicolas Pitre45ed6952014-07-25 16:05:32 -0400822}
823
Lorenzo Pieralisi5e89c552016-01-26 11:10:38 +0000824#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
825void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
826{
Mahesh Sivasubramanian741a8ec2014-09-18 20:33:55 -0600827 smp_cross_call_common(mask, IPI_WAKEUP);
Lorenzo Pieralisi5e89c552016-01-26 11:10:38 +0000828}
829#endif
830
Nicolas Pitre45ed6952014-07-25 16:05:32 -0400831#ifdef CONFIG_IRQ_WORK
832void arch_irq_work_raise(void)
833{
834 if (__smp_cross_call)
Pavankumar Kondeti82e90fb2018-05-28 20:32:44 +0530835 smp_cross_call_common(cpumask_of(smp_processor_id()),
836 IPI_IRQ_WORK);
Nicolas Pitre45ed6952014-07-25 16:05:32 -0400837}
838#endif
839
Kyle Yana2d15862017-01-19 10:31:18 -0800840static DEFINE_RAW_SPINLOCK(stop_lock);
Rohit Vaswanif89e8682014-06-26 21:29:37 -0700841
842DEFINE_PER_CPU(struct pt_regs, regs_before_stop);
843
Catalin Marinas08e875c2012-03-05 11:49:30 +0000844/*
845 * ipi_cpu_stop - handle IPI from smp_send_stop()
846 */
Rohit Vaswanif89e8682014-06-26 21:29:37 -0700847static void ipi_cpu_stop(unsigned int cpu, struct pt_regs *regs)
Catalin Marinas08e875c2012-03-05 11:49:30 +0000848{
Kyle Yana2d15862017-01-19 10:31:18 -0800849 if (system_state == SYSTEM_BOOTING ||
850 system_state == SYSTEM_RUNNING) {
Rohit Vaswanif89e8682014-06-26 21:29:37 -0700851 per_cpu(regs_before_stop, cpu) = *regs;
Kyle Yana2d15862017-01-19 10:31:18 -0800852 raw_spin_lock(&stop_lock);
853 pr_crit("CPU%u: stopping\n", cpu);
Rohit Vaswanif89e8682014-06-26 21:29:37 -0700854 show_regs(regs);
Kyle Yana2d15862017-01-19 10:31:18 -0800855 dump_stack();
Lingutla Chandrasekharbe48b072017-09-25 19:07:12 +0530856 dump_stack_minidump(regs->sp);
Kyle Yana2d15862017-01-19 10:31:18 -0800857 raw_spin_unlock(&stop_lock);
858 }
859
Abhimanyu Kapur30b3a6e2014-11-24 16:15:17 -0800860 set_cpu_active(cpu, false);
Catalin Marinas08e875c2012-03-05 11:49:30 +0000861
Abhimanyu Kapura3b75452014-04-02 20:32:59 -0700862 flush_cache_all();
Catalin Marinas08e875c2012-03-05 11:49:30 +0000863 local_irq_disable();
864
865 while (1)
866 cpu_relax();
867}
868
Rohit Vaswanibe186fd2014-06-26 23:35:09 -0700869static cpumask_t backtrace_mask;
870static DEFINE_RAW_SPINLOCK(backtrace_lock);
871
872/* "in progress" flag of arch_trigger_all_cpu_backtrace */
873static unsigned long backtrace_flag;
874
875static void smp_send_all_cpu_backtrace(void)
876{
877 unsigned int this_cpu = smp_processor_id();
878 int i;
879
880 if (test_and_set_bit(0, &backtrace_flag))
881 /*
882 * If there is already a trigger_all_cpu_backtrace() in progress
883 * (backtrace_flag == 1), don't output double cpu dump infos.
884 */
885 return;
886
887 cpumask_copy(&backtrace_mask, cpu_online_mask);
888 cpumask_clear_cpu(this_cpu, &backtrace_mask);
889
890 pr_info("Backtrace for cpu %d (current):\n", this_cpu);
891 dump_stack();
892
893 pr_info("\nsending IPI to all other CPUs:\n");
894 if (!cpumask_empty(&backtrace_mask))
895 smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
896
897 /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
898 for (i = 0; i < 10 * 1000; i++) {
899 if (cpumask_empty(&backtrace_mask))
900 break;
901 mdelay(1);
902 }
903
904 clear_bit(0, &backtrace_flag);
905 smp_mb__after_atomic();
906}
907
908/*
909 * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
910 */
911static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
912{
913 if (cpumask_test_cpu(cpu, &backtrace_mask)) {
914 raw_spin_lock(&backtrace_lock);
915 pr_warn("IPI backtrace for cpu %d\n", cpu);
916 show_regs(regs);
917 raw_spin_unlock(&backtrace_lock);
918 cpumask_clear_cpu(cpu, &backtrace_mask);
919 }
920}
921
922#ifdef CONFIG_SMP
923void arch_trigger_all_cpu_backtrace(void)
924{
925 smp_send_all_cpu_backtrace();
926}
927#else
928void arch_trigger_all_cpu_backtrace(void)
929{
930 dump_stack();
931}
932#endif
933
934
Catalin Marinas08e875c2012-03-05 11:49:30 +0000935/*
936 * Main handler for inter-processor interrupts
937 */
938void handle_IPI(int ipinr, struct pt_regs *regs)
939{
940 unsigned int cpu = smp_processor_id();
941 struct pt_regs *old_regs = set_irq_regs(regs);
942
Nicolas Pitre45ed6952014-07-25 16:05:32 -0400943 if ((unsigned)ipinr < NR_IPI) {
Stephen Boydbe081d92015-06-24 13:14:18 -0700944 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
Nicolas Pitre45ed6952014-07-25 16:05:32 -0400945 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
946 }
Catalin Marinas08e875c2012-03-05 11:49:30 +0000947
948 switch (ipinr) {
949 case IPI_RESCHEDULE:
950 scheduler_ipi();
951 break;
952
953 case IPI_CALL_FUNC:
954 irq_enter();
955 generic_smp_call_function_interrupt();
956 irq_exit();
957 break;
958
Catalin Marinas08e875c2012-03-05 11:49:30 +0000959 case IPI_CPU_STOP:
960 irq_enter();
Rohit Vaswanif89e8682014-06-26 21:29:37 -0700961 ipi_cpu_stop(cpu, regs);
Catalin Marinas08e875c2012-03-05 11:49:30 +0000962 irq_exit();
963 break;
964
Lorenzo Pieralisi1f850082013-09-04 10:55:17 +0100965#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
966 case IPI_TIMER:
967 irq_enter();
968 tick_receive_broadcast();
969 irq_exit();
970 break;
971#endif
972
Larry Basseleb631bb2014-05-12 16:48:51 +0100973#ifdef CONFIG_IRQ_WORK
974 case IPI_IRQ_WORK:
975 irq_enter();
976 irq_work_run();
977 irq_exit();
978 break;
979#endif
980
Lorenzo Pieralisi5e89c552016-01-26 11:10:38 +0000981#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
982 case IPI_WAKEUP:
983 WARN_ONCE(!acpi_parking_protocol_valid(cpu),
984 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
985 cpu);
986 break;
987#endif
988
Rohit Vaswanibe186fd2014-06-26 23:35:09 -0700989 case IPI_CPU_BACKTRACE:
990 ipi_cpu_backtrace(cpu, regs);
991 break;
992
Catalin Marinas08e875c2012-03-05 11:49:30 +0000993 default:
994 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
995 break;
996 }
Nicolas Pitre45ed6952014-07-25 16:05:32 -0400997
998 if ((unsigned)ipinr < NR_IPI)
Stephen Boydbe081d92015-06-24 13:14:18 -0700999 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
Mahesh Sivasubramanian741a8ec2014-09-18 20:33:55 -06001000 per_cpu(pending_ipi, cpu) = false;
Catalin Marinas08e875c2012-03-05 11:49:30 +00001001 set_irq_regs(old_regs);
1002}
1003
1004void smp_send_reschedule(int cpu)
1005{
Trilok Soni17927272014-06-24 17:00:07 -07001006 BUG_ON(cpu_is_offline(cpu));
Mahesh Sivasubramanian741a8ec2014-09-18 20:33:55 -06001007 smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
Catalin Marinas08e875c2012-03-05 11:49:30 +00001008}
1009
Lorenzo Pieralisi1f850082013-09-04 10:55:17 +01001010#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
1011void tick_broadcast(const struct cpumask *mask)
1012{
Mahesh Sivasubramanian741a8ec2014-09-18 20:33:55 -06001013 smp_cross_call_common(mask, IPI_TIMER);
Lorenzo Pieralisi1f850082013-09-04 10:55:17 +01001014}
1015#endif
1016
Cristian Marussi35989bb2020-03-11 17:12:44 +00001017/*
1018 * The number of CPUs online, not counting this CPU (which may not be
1019 * fully online and so not counted in num_online_cpus()).
1020 */
1021static inline unsigned int num_other_online_cpus(void)
1022{
1023 unsigned int this_cpu_online = cpu_online(smp_processor_id());
1024
1025 return num_online_cpus() - this_cpu_online;
1026}
1027
jianzhoud2440d62020-04-08 10:01:32 +08001028static inline unsigned int num_other_active_cpus(void)
1029{
1030 unsigned int this_cpu_active = cpu_active(smp_processor_id());
1031
1032 return num_active_cpus() - this_cpu_active;
1033}
1034
Catalin Marinas08e875c2012-03-05 11:49:30 +00001035void smp_send_stop(void)
1036{
1037 unsigned long timeout;
1038
Cristian Marussi35989bb2020-03-11 17:12:44 +00001039 if (num_other_online_cpus()) {
Catalin Marinas08e875c2012-03-05 11:49:30 +00001040 cpumask_t mask;
1041
1042 cpumask_copy(&mask, cpu_online_mask);
Rusty Russell434ed7f2015-03-05 10:49:18 +10301043 cpumask_clear_cpu(smp_processor_id(), &mask);
Catalin Marinas08e875c2012-03-05 11:49:30 +00001044
Jan Glauber82611c12016-04-18 09:43:33 +02001045 if (system_state == SYSTEM_BOOTING ||
1046 system_state == SYSTEM_RUNNING)
1047 pr_crit("SMP: stopping secondary CPUs\n");
Mahesh Sivasubramanian741a8ec2014-09-18 20:33:55 -06001048 smp_cross_call_common(&mask, IPI_CPU_STOP);
Catalin Marinas08e875c2012-03-05 11:49:30 +00001049 }
1050
1051 /* Wait up to one second for other CPUs to stop */
1052 timeout = USEC_PER_SEC;
jianzhoud2440d62020-04-08 10:01:32 +08001053
1054 while (num_other_active_cpus() && timeout--)
Catalin Marinas08e875c2012-03-05 11:49:30 +00001055 udelay(1);
1056
jianzhoud2440d62020-04-08 10:01:32 +08001057 if (num_other_active_cpus())
Jan Glauber82611c12016-04-18 09:43:33 +02001058 pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
1059 cpumask_pr_args(cpu_online_mask));
Catalin Marinas08e875c2012-03-05 11:49:30 +00001060}
1061
1062/*
1063 * not supported here
1064 */
1065int setup_profiling_timer(unsigned int multiplier)
1066{
1067 return -EINVAL;
1068}
James Morse5c492c32016-06-22 10:06:12 +01001069
1070static bool have_cpu_die(void)
1071{
1072#ifdef CONFIG_HOTPLUG_CPU
1073 int any_cpu = raw_smp_processor_id();
1074
Mark Salter982d8d92017-03-24 09:53:56 -04001075 if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
James Morse5c492c32016-06-22 10:06:12 +01001076 return true;
1077#endif
1078 return false;
1079}
1080
1081bool cpus_are_stuck_in_kernel(void)
1082{
1083 bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
1084
1085 return !!cpus_stuck_in_kernel || smp_spin_tables;
1086}