blob: 44805673a250577a03a1e1bc07341da046ce8ce5 [file] [log] [blame]
Max Filippovf6151362013-10-17 02:42:26 +04001/*
2 * Xtensa SMP support functions.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2008 - 2013 Tensilica Inc.
9 *
10 * Chris Zankel <chris@zankel.net>
11 * Joe Taylor <joe@tensilica.com>
12 * Pete Delaney <piet@tensilica.com
13 */
14
15#include <linux/cpu.h>
16#include <linux/cpumask.h>
17#include <linux/delay.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/irqdomain.h>
21#include <linux/irq.h>
22#include <linux/kdebug.h>
23#include <linux/module.h>
24#include <linux/reboot.h>
25#include <linux/seq_file.h>
26#include <linux/smp.h>
27#include <linux/thread_info.h>
28
29#include <asm/cacheflush.h>
30#include <asm/kdebug.h>
31#include <asm/mmu_context.h>
32#include <asm/mxregs.h>
33#include <asm/platform.h>
34#include <asm/tlbflush.h>
35#include <asm/traps.h>
36
37#ifdef CONFIG_SMP
38# if XCHAL_HAVE_S32C1I == 0
39# error "The S32C1I option is required for SMP."
40# endif
41#endif
42
Max Filippov49b424f2013-10-17 02:42:28 +040043static void system_invalidate_dcache_range(unsigned long start,
44 unsigned long size);
45static void system_flush_invalidate_dcache_range(unsigned long start,
46 unsigned long size);
47
Max Filippovf6151362013-10-17 02:42:26 +040048/* IPI (Inter Process Interrupt) */
49
50#define IPI_IRQ 0
51
52static irqreturn_t ipi_interrupt(int irq, void *dev_id);
53static struct irqaction ipi_irqaction = {
54 .handler = ipi_interrupt,
55 .flags = IRQF_PERCPU,
56 .name = "ipi",
57};
58
59void ipi_init(void)
60{
61 unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
62 setup_irq(irq, &ipi_irqaction);
63}
64
65static inline unsigned int get_core_count(void)
66{
67 /* Bits 18..21 of SYSCFGID contain the core count minus 1. */
68 unsigned int syscfgid = get_er(SYSCFGID);
69 return ((syscfgid >> 18) & 0xf) + 1;
70}
71
72static inline int get_core_id(void)
73{
74 /* Bits 0...18 of SYSCFGID contain the core id */
75 unsigned int core_id = get_er(SYSCFGID);
76 return core_id & 0x3fff;
77}
78
79void __init smp_prepare_cpus(unsigned int max_cpus)
80{
81 unsigned i;
82
Max Filippov11a4dc82019-01-19 00:26:48 -080083 for_each_possible_cpu(i)
Max Filippovf6151362013-10-17 02:42:26 +040084 set_cpu_present(i, true);
85}
86
87void __init smp_init_cpus(void)
88{
89 unsigned i;
90 unsigned int ncpus = get_core_count();
91 unsigned int core_id = get_core_id();
92
93 pr_info("%s: Core Count = %d\n", __func__, ncpus);
94 pr_info("%s: Core Id = %d\n", __func__, core_id);
95
Max Filippov7d1ef642019-01-26 20:35:18 -080096 if (ncpus > NR_CPUS) {
97 ncpus = NR_CPUS;
98 pr_info("%s: limiting core count by %d\n", __func__, ncpus);
99 }
100
Max Filippovf6151362013-10-17 02:42:26 +0400101 for (i = 0; i < ncpus; ++i)
102 set_cpu_possible(i, true);
103}
104
105void __init smp_prepare_boot_cpu(void)
106{
107 unsigned int cpu = smp_processor_id();
108 BUG_ON(cpu != 0);
109 cpu_asid_cache(cpu) = ASID_USER_FIRST;
110}
111
112void __init smp_cpus_done(unsigned int max_cpus)
113{
114}
115
116static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
117static DECLARE_COMPLETION(cpu_running);
118
Max Filippov49b424f2013-10-17 02:42:28 +0400119void secondary_start_kernel(void)
Max Filippovf6151362013-10-17 02:42:26 +0400120{
121 struct mm_struct *mm = &init_mm;
122 unsigned int cpu = smp_processor_id();
123
124 init_mmu();
125
126#ifdef CONFIG_DEBUG_KERNEL
127 if (boot_secondary_processors == 0) {
128 pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
129 __func__, boot_secondary_processors, cpu);
130 for (;;)
131 __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
132 }
133
134 pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
135 __func__, boot_secondary_processors, cpu);
136#endif
137 /* Init EXCSAVE1 */
138
139 secondary_trap_init();
140
141 /* All kernel threads share the same mm context. */
142
143 atomic_inc(&mm->mm_users);
144 atomic_inc(&mm->mm_count);
145 current->active_mm = mm;
146 cpumask_set_cpu(cpu, mm_cpumask(mm));
147 enter_lazy_tlb(mm, current);
148
149 preempt_disable();
150 trace_hardirqs_off();
151
152 calibrate_delay();
153
154 notify_cpu_starting(cpu);
155
156 secondary_init_irq();
157 local_timer_setup(cpu);
158
Kirill Tkhaiabf0ea62013-12-12 17:41:01 +0400159 set_cpu_online(cpu, true);
160
Max Filippovf6151362013-10-17 02:42:26 +0400161 local_irq_enable();
162
Max Filippovf6151362013-10-17 02:42:26 +0400163 complete(&cpu_running);
164
Thomas Gleixnerfc6d73d2016-02-26 18:43:40 +0000165 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
Max Filippovf6151362013-10-17 02:42:26 +0400166}
167
168static void mx_cpu_start(void *p)
169{
170 unsigned cpu = (unsigned)p;
171 unsigned long run_stall_mask = get_er(MPSCORE);
172
173 set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
174 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
175 __func__, cpu, run_stall_mask, get_er(MPSCORE));
176}
177
178static void mx_cpu_stop(void *p)
179{
180 unsigned cpu = (unsigned)p;
181 unsigned long run_stall_mask = get_er(MPSCORE);
182
183 set_er(run_stall_mask | (1u << cpu), MPSCORE);
184 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
185 __func__, cpu, run_stall_mask, get_er(MPSCORE));
186}
187
Max Filippov49b424f2013-10-17 02:42:28 +0400188#ifdef CONFIG_HOTPLUG_CPU
189unsigned long cpu_start_id __cacheline_aligned;
190#endif
Max Filippovf6151362013-10-17 02:42:26 +0400191unsigned long cpu_start_ccount;
192
193static int boot_secondary(unsigned int cpu, struct task_struct *ts)
194{
195 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
196 unsigned long ccount;
197 int i;
198
Max Filippov49b424f2013-10-17 02:42:28 +0400199#ifdef CONFIG_HOTPLUG_CPU
Max Filippove2f3fd42018-12-21 08:26:20 -0800200 WRITE_ONCE(cpu_start_id, cpu);
201 /* Pairs with the third memw in the cpu_restart */
202 mb();
203 system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
204 sizeof(cpu_start_id));
Max Filippov49b424f2013-10-17 02:42:28 +0400205#endif
Max Filippovf6151362013-10-17 02:42:26 +0400206 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
207
208 for (i = 0; i < 2; ++i) {
209 do
210 ccount = get_ccount();
211 while (!ccount);
212
Max Filippove2f3fd42018-12-21 08:26:20 -0800213 WRITE_ONCE(cpu_start_ccount, ccount);
Max Filippovf6151362013-10-17 02:42:26 +0400214
Max Filippove2f3fd42018-12-21 08:26:20 -0800215 do {
216 /*
217 * Pairs with the first two memws in the
218 * .Lboot_secondary.
219 */
Max Filippovf6151362013-10-17 02:42:26 +0400220 mb();
Max Filippove2f3fd42018-12-21 08:26:20 -0800221 ccount = READ_ONCE(cpu_start_ccount);
222 } while (ccount && time_before(jiffies, timeout));
Max Filippovf6151362013-10-17 02:42:26 +0400223
Max Filippove2f3fd42018-12-21 08:26:20 -0800224 if (ccount) {
Max Filippovf6151362013-10-17 02:42:26 +0400225 smp_call_function_single(0, mx_cpu_stop,
Max Filippove2f3fd42018-12-21 08:26:20 -0800226 (void *)cpu, 1);
227 WRITE_ONCE(cpu_start_ccount, 0);
Max Filippovf6151362013-10-17 02:42:26 +0400228 return -EIO;
229 }
230 }
231 return 0;
232}
233
234int __cpu_up(unsigned int cpu, struct task_struct *idle)
235{
236 int ret = 0;
237
238 if (cpu_asid_cache(cpu) == 0)
239 cpu_asid_cache(cpu) = ASID_USER_FIRST;
240
241 start_info.stack = (unsigned long)task_pt_regs(idle);
242 wmb();
243
244 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
245 __func__, cpu, idle, start_info.stack);
246
Max Filippove2f3fd42018-12-21 08:26:20 -0800247 init_completion(&cpu_running);
Max Filippovf6151362013-10-17 02:42:26 +0400248 ret = boot_secondary(cpu, idle);
249 if (ret == 0) {
250 wait_for_completion_timeout(&cpu_running,
251 msecs_to_jiffies(1000));
252 if (!cpu_online(cpu))
253 ret = -EIO;
254 }
255
256 if (ret)
257 pr_err("CPU %u failed to boot\n", cpu);
258
259 return ret;
260}
261
Max Filippov49b424f2013-10-17 02:42:28 +0400262#ifdef CONFIG_HOTPLUG_CPU
263
264/*
265 * __cpu_disable runs on the processor to be shutdown.
266 */
267int __cpu_disable(void)
268{
269 unsigned int cpu = smp_processor_id();
270
271 /*
272 * Take this CPU offline. Once we clear this, we can't return,
273 * and we must not schedule until we're ready to give up the cpu.
274 */
275 set_cpu_online(cpu, false);
276
277 /*
278 * OK - migrate IRQs away from this CPU
279 */
280 migrate_irqs();
281
282 /*
283 * Flush user cache and TLB mappings, and then remove this CPU
284 * from the vm mask set of all processes.
285 */
286 local_flush_cache_all();
287 local_flush_tlb_all();
288 invalidate_page_directory();
289
290 clear_tasks_mm_cpumask(cpu);
291
292 return 0;
293}
294
295static void platform_cpu_kill(unsigned int cpu)
296{
297 smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
298}
299
300/*
301 * called on the thread which is asking for a CPU to be shutdown -
302 * waits until shutdown has completed, or it is timed out.
303 */
304void __cpu_die(unsigned int cpu)
305{
306 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
307 while (time_before(jiffies, timeout)) {
308 system_invalidate_dcache_range((unsigned long)&cpu_start_id,
Max Filippove2f3fd42018-12-21 08:26:20 -0800309 sizeof(cpu_start_id));
310 /* Pairs with the second memw in the cpu_restart */
311 mb();
312 if (READ_ONCE(cpu_start_id) == -cpu) {
Max Filippov49b424f2013-10-17 02:42:28 +0400313 platform_cpu_kill(cpu);
314 return;
315 }
316 }
317 pr_err("CPU%u: unable to kill\n", cpu);
318}
319
320void arch_cpu_idle_dead(void)
321{
322 cpu_die();
323}
324/*
325 * Called from the idle thread for the CPU which has been shutdown.
326 *
327 * Note that we disable IRQs here, but do not re-enable them
328 * before returning to the caller. This is also the behaviour
329 * of the other hotplug-cpu capable cores, so presumably coming
330 * out of idle fixes this.
331 */
332void __ref cpu_die(void)
333{
334 idle_task_exit();
335 local_irq_disable();
336 __asm__ __volatile__(
337 " movi a2, cpu_restart\n"
338 " jx a2\n");
339}
340
341#endif /* CONFIG_HOTPLUG_CPU */
342
Max Filippovf6151362013-10-17 02:42:26 +0400343enum ipi_msg_type {
344 IPI_RESCHEDULE = 0,
345 IPI_CALL_FUNC,
346 IPI_CPU_STOP,
347 IPI_MAX
348};
349
350static const struct {
351 const char *short_text;
352 const char *long_text;
353} ipi_text[] = {
354 { .short_text = "RES", .long_text = "Rescheduling interrupts" },
355 { .short_text = "CAL", .long_text = "Function call interrupts" },
356 { .short_text = "DIE", .long_text = "CPU shutdown interrupts" },
357};
358
359struct ipi_data {
360 unsigned long ipi_count[IPI_MAX];
361};
362
363static DEFINE_PER_CPU(struct ipi_data, ipi_data);
364
365static void send_ipi_message(const struct cpumask *callmask,
366 enum ipi_msg_type msg_id)
367{
368 int index;
369 unsigned long mask = 0;
370
371 for_each_cpu(index, callmask)
372 if (index != smp_processor_id())
373 mask |= 1 << index;
374
375 set_er(mask, MIPISET(msg_id));
376}
377
378void arch_send_call_function_ipi_mask(const struct cpumask *mask)
379{
380 send_ipi_message(mask, IPI_CALL_FUNC);
381}
382
383void arch_send_call_function_single_ipi(int cpu)
384{
385 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
386}
387
388void smp_send_reschedule(int cpu)
389{
390 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
391}
392
393void smp_send_stop(void)
394{
395 struct cpumask targets;
396
397 cpumask_copy(&targets, cpu_online_mask);
398 cpumask_clear_cpu(smp_processor_id(), &targets);
399 send_ipi_message(&targets, IPI_CPU_STOP);
400}
401
402static void ipi_cpu_stop(unsigned int cpu)
403{
404 set_cpu_online(cpu, false);
405 machine_halt();
406}
407
408irqreturn_t ipi_interrupt(int irq, void *dev_id)
409{
410 unsigned int cpu = smp_processor_id();
411 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
412 unsigned int msg;
413 unsigned i;
414
415 msg = get_er(MIPICAUSE(cpu));
416 for (i = 0; i < IPI_MAX; i++)
417 if (msg & (1 << i)) {
418 set_er(1 << i, MIPICAUSE(cpu));
419 ++ipi->ipi_count[i];
420 }
421
422 if (msg & (1 << IPI_RESCHEDULE))
423 scheduler_ipi();
424 if (msg & (1 << IPI_CALL_FUNC))
425 generic_smp_call_function_interrupt();
426 if (msg & (1 << IPI_CPU_STOP))
427 ipi_cpu_stop(cpu);
428
429 return IRQ_HANDLED;
430}
431
432void show_ipi_list(struct seq_file *p, int prec)
433{
434 unsigned int cpu;
435 unsigned i;
436
437 for (i = 0; i < IPI_MAX; ++i) {
438 seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
439 for_each_online_cpu(cpu)
440 seq_printf(p, " %10lu",
441 per_cpu(ipi_data, cpu).ipi_count[i]);
442 seq_printf(p, " %s\n", ipi_text[i].long_text);
443 }
444}
445
446int setup_profiling_timer(unsigned int multiplier)
447{
448 pr_debug("setup_profiling_timer %d\n", multiplier);
449 return 0;
450}
451
452/* TLB flush functions */
453
454struct flush_data {
455 struct vm_area_struct *vma;
456 unsigned long addr1;
457 unsigned long addr2;
458};
459
460static void ipi_flush_tlb_all(void *arg)
461{
462 local_flush_tlb_all();
463}
464
465void flush_tlb_all(void)
466{
467 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
468}
469
470static void ipi_flush_tlb_mm(void *arg)
471{
472 local_flush_tlb_mm(arg);
473}
474
475void flush_tlb_mm(struct mm_struct *mm)
476{
477 on_each_cpu(ipi_flush_tlb_mm, mm, 1);
478}
479
480static void ipi_flush_tlb_page(void *arg)
481{
482 struct flush_data *fd = arg;
483 local_flush_tlb_page(fd->vma, fd->addr1);
484}
485
486void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
487{
488 struct flush_data fd = {
489 .vma = vma,
490 .addr1 = addr,
491 };
492 on_each_cpu(ipi_flush_tlb_page, &fd, 1);
493}
494
495static void ipi_flush_tlb_range(void *arg)
496{
497 struct flush_data *fd = arg;
498 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
499}
500
501void flush_tlb_range(struct vm_area_struct *vma,
502 unsigned long start, unsigned long end)
503{
504 struct flush_data fd = {
505 .vma = vma,
506 .addr1 = start,
507 .addr2 = end,
508 };
509 on_each_cpu(ipi_flush_tlb_range, &fd, 1);
510}
511
Max Filippov04c6b3e2014-02-14 14:08:48 +0400512static void ipi_flush_tlb_kernel_range(void *arg)
513{
514 struct flush_data *fd = arg;
515 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
516}
517
518void flush_tlb_kernel_range(unsigned long start, unsigned long end)
519{
520 struct flush_data fd = {
521 .addr1 = start,
522 .addr2 = end,
523 };
524 on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
525}
526
Max Filippovf6151362013-10-17 02:42:26 +0400527/* Cache flush functions */
528
529static void ipi_flush_cache_all(void *arg)
530{
531 local_flush_cache_all();
532}
533
534void flush_cache_all(void)
535{
536 on_each_cpu(ipi_flush_cache_all, NULL, 1);
537}
538
539static void ipi_flush_cache_page(void *arg)
540{
541 struct flush_data *fd = arg;
542 local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
543}
544
545void flush_cache_page(struct vm_area_struct *vma,
546 unsigned long address, unsigned long pfn)
547{
548 struct flush_data fd = {
549 .vma = vma,
550 .addr1 = address,
551 .addr2 = pfn,
552 };
553 on_each_cpu(ipi_flush_cache_page, &fd, 1);
554}
555
556static void ipi_flush_cache_range(void *arg)
557{
558 struct flush_data *fd = arg;
559 local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
560}
561
562void flush_cache_range(struct vm_area_struct *vma,
563 unsigned long start, unsigned long end)
564{
565 struct flush_data fd = {
566 .vma = vma,
567 .addr1 = start,
568 .addr2 = end,
569 };
570 on_each_cpu(ipi_flush_cache_range, &fd, 1);
571}
572
573static void ipi_flush_icache_range(void *arg)
574{
575 struct flush_data *fd = arg;
576 local_flush_icache_range(fd->addr1, fd->addr2);
577}
578
579void flush_icache_range(unsigned long start, unsigned long end)
580{
581 struct flush_data fd = {
582 .addr1 = start,
583 .addr2 = end,
584 };
585 on_each_cpu(ipi_flush_icache_range, &fd, 1);
586}
Pranith Kumare3560302014-08-29 15:19:09 -0700587EXPORT_SYMBOL(flush_icache_range);
Max Filippov49b424f2013-10-17 02:42:28 +0400588
589/* ------------------------------------------------------------------------- */
590
591static void ipi_invalidate_dcache_range(void *arg)
592{
593 struct flush_data *fd = arg;
594 __invalidate_dcache_range(fd->addr1, fd->addr2);
595}
596
597static void system_invalidate_dcache_range(unsigned long start,
598 unsigned long size)
599{
600 struct flush_data fd = {
601 .addr1 = start,
602 .addr2 = size,
603 };
604 on_each_cpu(ipi_invalidate_dcache_range, &fd, 1);
605}
606
607static void ipi_flush_invalidate_dcache_range(void *arg)
608{
609 struct flush_data *fd = arg;
610 __flush_invalidate_dcache_range(fd->addr1, fd->addr2);
611}
612
613static void system_flush_invalidate_dcache_range(unsigned long start,
614 unsigned long size)
615{
616 struct flush_data fd = {
617 .addr1 = start,
618 .addr2 = size,
619 };
620 on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1);
621}