x86: fill cpu to apicid and present map in mpparse
This is the way x86_64 does, and complement the already
present patch that does the bios cpu to apicid mapping here
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c
index 6ea9716..a0cec74 100644
--- a/arch/x86/kernel/mpparse_32.c
+++ b/arch/x86/kernel/mpparse_32.c
@@ -105,7 +105,8 @@
static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
{
- int ver, apicid;
+ int ver, apicid, cpu;
+ cpumask_t tmp_map;
physid_mask_t phys_cpu;
if (!(m->mpc_cpuflag & CPU_ENABLED)) {
@@ -198,6 +199,16 @@
cpu_set(num_processors, cpu_possible_map);
num_processors++;
+ cpus_complement(tmp_map, cpu_present_map);
+ cpu = first_cpu(tmp_map);
+
+ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR)
+ /*
+ * x86_bios_cpu_apicid is required to have processors listed
+ * in same order as logical cpu numbers. Hence the first
+ * entry is BSP, and so on.
+ */
+ cpu = 0;
/*
* Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
@@ -220,12 +231,16 @@
}
/* are we being called early in kernel startup? */
if (x86_cpu_to_apicid_early_ptr) {
+ u16 *cpu_to_apicid = x86_cpu_to_apicid_early_ptr;
u16 *bios_cpu_apicid = x86_bios_cpu_apicid_early_ptr;
+
+ cpu_to_apicid[cpu] = m->mpc_apicid;
bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
} else {
- int cpu = num_processors - 1;
+ per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
per_cpu(x86_bios_cpu_apicid, cpu) = m->mpc_apicid;
}
+ cpu_set(cpu, cpu_present_map);
}
static void __init MP_bus_info (struct mpc_config_bus *m)
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index bf5c9e9..2fea910 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -525,16 +525,6 @@
#endif /* WAKE_SECONDARY_VIA_INIT */
extern cpumask_t cpu_initialized;
-static inline int alloc_cpu_id(void)
-{
- cpumask_t tmp_map;
- int cpu;
- cpus_complement(tmp_map, cpu_present_map);
- cpu = first_cpu(tmp_map);
- if (cpu >= NR_CPUS)
- return -ENODEV;
- return cpu;
-}
#ifdef CONFIG_HOTPLUG_CPU
static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS];
@@ -605,7 +595,6 @@
irq_ctx_init(cpu);
- per_cpu(x86_cpu_to_apicid, cpu) = apicid;
/*
* This grunge runs the startup process for
* the targeted processor.
@@ -666,10 +655,8 @@
cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
cpu_clear(cpu, cpu_possible_map);
+ per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
cpucount--;
- } else {
- per_cpu(x86_cpu_to_apicid, cpu) = apicid;
- cpu_set(cpu, cpu_present_map);
}
/* mark "stuck" area as not stuck */
@@ -745,6 +732,7 @@
static void __init disable_smp(void)
{
cpu_possible_map = cpumask_of_cpu(0);
+ cpu_present_map = cpumask_of_cpu(0);
smpboot_clear_io_apic_irqs();
phys_cpu_present_map = physid_mask_of_physid(0);
map_cpu_to_logical_apicid();
@@ -825,7 +813,6 @@
boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
boot_cpu_logical_apicid = logical_smp_processor_id();
- per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid;
current_thread_info()->cpu = 0;
@@ -866,8 +853,11 @@
continue;
if (max_cpus <= cpucount+1)
continue;
-
- if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
+ /* Utterly temporary */
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ if (per_cpu(x86_cpu_to_apicid, cpu) == apicid)
+ break;
+ if (do_boot_cpu(apicid, cpu))
printk("CPU #%d not responding - cannot use it.\n",
apicid);
else