blob: 9e0fb9a015d4343fecf562a23ed9190c690d9e30 [file] [log] [blame]
Vitaly Kuznetsova52482d2017-03-14 18:35:45 +01001#include <asm/smp.h>
2
Boris Ostrovsky84d582d2017-04-24 15:04:53 -04003#include <xen/events.h>
4
Vitaly Kuznetsova52482d2017-03-14 18:35:45 +01005#include "xen-ops.h"
6#include "smp.h"
7
8
9static void __init xen_hvm_smp_prepare_boot_cpu(void)
10{
11 BUG_ON(smp_processor_id() != 0);
12 native_smp_prepare_boot_cpu();
13
14 /*
Ankur Aroraad73fd52017-06-02 17:05:58 -070015 * Setup vcpu_info for boot CPU. Secondary CPUs get their vcpu_info
16 * in xen_cpu_up_prepare_hvm().
Vitaly Kuznetsova52482d2017-03-14 18:35:45 +010017 */
18 xen_vcpu_setup(0);
19
20 /*
21 * The alternative logic (which patches the unlock/lock) runs before
22 * the smp bootup up code is activated. Hence we need to set this up
23 * the core kernel is being patched. Otherwise we will have only
24 * modules patched but not core code.
25 */
26 xen_init_spinlocks();
27}
28
29static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
30{
31 native_smp_prepare_cpus(max_cpus);
32 WARN_ON(xen_smp_intr_init(0));
33
34 xen_init_lock_cpu(0);
35}
36
37#ifdef CONFIG_HOTPLUG_CPU
38static void xen_hvm_cpu_die(unsigned int cpu)
39{
40 if (common_cpu_die(cpu) == 0) {
41 xen_smp_intr_free(cpu);
42 xen_uninit_lock_cpu(cpu);
43 xen_teardown_timer(cpu);
44 }
45}
46#else
47static void xen_hvm_cpu_die(unsigned int cpu)
48{
49 BUG();
50}
51#endif
52
53void __init xen_hvm_smp_init(void)
54{
Boris Ostrovsky84d582d2017-04-24 15:04:53 -040055 if (!xen_have_vector_callback)
56 return;
57
Vitaly Kuznetsova52482d2017-03-14 18:35:45 +010058 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
59 smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
60 smp_ops.cpu_die = xen_hvm_cpu_die;
61 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
62 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
63 smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
64}