MIPS: Cavium: Add CPU hotplugging code.

Thanks to Cavium Inc. for the code contribution and help.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 24e0ad6..0b891a9 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -5,6 +5,7 @@
  *
  * Copyright (C) 2004-2008 Cavium Networks
  */
+#include <linux/cpu.h>
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/smp.h>
@@ -19,10 +20,16 @@
 
 #include <asm/octeon/octeon.h>
 
+#include "octeon_boot.h"
+
 volatile unsigned long octeon_processor_boot = 0xff;
 volatile unsigned long octeon_processor_sp;
 volatile unsigned long octeon_processor_gp;
 
+#ifdef CONFIG_HOTPLUG_CPU
+static unsigned int InitTLBStart_addr;
+#endif
+
 static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
 {
 	const int coreid = cvmx_get_core_num();
@@ -67,8 +74,28 @@
 }
 
 /**
- * Detect available CPUs, populate phys_cpu_present_map
+ * Detect available CPUs, populate cpu_possible_map
  */
+static void octeon_smp_hotplug_setup(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+	uint32_t labi_signature;
+
+	labi_signature =
+		cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+					LABI_ADDR_IN_BOOTLOADER +
+					offsetof(struct linux_app_boot_info,
+						    labi_signature)));
+	if (labi_signature != LABI_SIGNATURE)
+		pr_err("The bootloader version on this board is incorrect\n");
+	InitTLBStart_addr =
+		cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+				   LABI_ADDR_IN_BOOTLOADER +
+					   offsetof(struct linux_app_boot_info,
+						    InitTLBStart_addr)));
+#endif
+}
+
 static void octeon_smp_setup(void)
 {
 	const int coreid = cvmx_get_core_num();
@@ -91,6 +118,9 @@
 			cpus++;
 		}
 	}
+	cpu_present_map = cpu_possible_map;
+
+	octeon_smp_hotplug_setup();
 }
 
 /**
@@ -128,6 +158,17 @@
 	const int coreid = cvmx_get_core_num();
 	union cvmx_ciu_intx_sum0 interrupt_enable;
 
+#ifdef CONFIG_HOTPLUG_CPU
+	unsigned int cur_exception_base;
+
+	cur_exception_base = cvmx_read64_uint32(
+		CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+			     LABI_ADDR_IN_BOOTLOADER +
+			     offsetof(struct linux_app_boot_info,
+				      cur_exception_base)));
+	/* cur_exception_base is incremented in bootloader after setting */
+	write_c0_ebase((unsigned int)(cur_exception_base - EXCEPTION_BASE_INCR));
+#endif
 	octeon_check_cpu_bist();
 	octeon_init_cvmcount();
 	/*
@@ -199,6 +240,193 @@
 #endif
 }
 
+#ifdef CONFIG_HOTPLUG_CPU
+
+/* State of each CPU. */
+DEFINE_PER_CPU(int, cpu_state);
+
+extern void fixup_irqs(void);
+
+static DEFINE_SPINLOCK(smp_reserve_lock);
+
+static int octeon_cpu_disable(void)
+{
+	unsigned int cpu = smp_processor_id();
+
+	if (cpu == 0)
+		return -EBUSY;
+
+	spin_lock(&smp_reserve_lock);
+
+	cpu_clear(cpu, cpu_online_map);
+	cpu_clear(cpu, cpu_callin_map);
+	local_irq_disable();
+	fixup_irqs();
+	local_irq_enable();
+
+	flush_cache_all();
+	local_flush_tlb_all();
+
+	spin_unlock(&smp_reserve_lock);
+
+	return 0;
+}
+
+static void octeon_cpu_die(unsigned int cpu)
+{
+	int coreid = cpu_logical_map(cpu);
+	uint32_t avail_coremask;
+	struct cvmx_bootmem_named_block_desc *block_desc;
+
+#ifdef CONFIG_CAVIUM_OCTEON_WATCHDOG
+	/* Disable the watchdog */
+	cvmx_ciu_wdogx_t ciu_wdog;
+	ciu_wdog.u64 = cvmx_read_csr(CVMX_CIU_WDOGX(cpu));
+	ciu_wdog.s.mode = 0;
+	cvmx_write_csr(CVMX_CIU_WDOGX(cpu), ciu_wdog.u64);
+#endif
+
+	while (per_cpu(cpu_state, cpu) != CPU_DEAD)
+		cpu_relax();
+
+	/*
+	 * This is a bit complicated strategics of getting/settig available
+	 * cores mask, copied from bootloader
+	 */
+	/* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
+	block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
+
+	if (!block_desc) {
+		avail_coremask =
+			cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+						   LABI_ADDR_IN_BOOTLOADER +
+						   offsetof
+						   (struct linux_app_boot_info,
+						    avail_coremask)));
+	} else {		       /* alternative, already initialized */
+	       avail_coremask =
+		   cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+						   block_desc->base_addr +
+						  AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK));
+	}
+
+	avail_coremask |= 1 << coreid;
+
+	/* Setting avail_coremask for bootoct binary */
+	if (!block_desc) {
+		cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+						LABI_ADDR_IN_BOOTLOADER +
+						offsetof(struct linux_app_boot_info,
+							 avail_coremask)),
+				   avail_coremask);
+	} else {
+		cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+						block_desc->base_addr +
+						AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK),
+				   avail_coremask);
+	}
+
+	pr_info("Reset core %d. Available Coremask = %x \n", coreid,
+		avail_coremask);
+	cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
+	cvmx_write_csr(CVMX_CIU_PP_RST, 0);
+}
+
+void play_dead(void)
+{
+	int coreid = cvmx_get_core_num();
+
+	idle_task_exit();
+	octeon_processor_boot = 0xff;
+	per_cpu(cpu_state, coreid) = CPU_DEAD;
+
+	while (1)	/* core will be reset here */
+		;
+}
+
+extern void kernel_entry(unsigned long arg1, ...);
+
+static void start_after_reset(void)
+{
+	kernel_entry(0, 0, 0);  /* set a2 = 0 for secondary core */
+}
+
+int octeon_update_boot_vector(unsigned int cpu)
+{
+
+	int coreid = cpu_logical_map(cpu);
+	unsigned int avail_coremask;
+	struct cvmx_bootmem_named_block_desc *block_desc;
+	struct boot_init_vector *boot_vect =
+		(struct boot_init_vector *) cvmx_phys_to_ptr(0x0 +
+						  BOOTLOADER_BOOT_VECTOR);
+
+	block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
+
+	if (!block_desc) {
+		avail_coremask =
+			cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+					   LABI_ADDR_IN_BOOTLOADER +
+						offsetof(struct linux_app_boot_info,
+						avail_coremask)));
+	} else {		       /* alternative, already initialized */
+	       avail_coremask =
+		   cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+						   block_desc->base_addr +
+						   AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK));
+	}
+
+	if (!(avail_coremask & (1 << coreid))) {
+		/* core not available, assume, that catched by simple-executive */
+		cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
+		cvmx_write_csr(CVMX_CIU_PP_RST, 0);
+	}
+
+	boot_vect[coreid].app_start_func_addr =
+		(uint32_t) (unsigned long) start_after_reset;
+	boot_vect[coreid].code_addr = InitTLBStart_addr;
+
+	CVMX_SYNC;
+
+	cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
+
+	return 0;
+}
+
+static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb,
+	unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+
+	switch (action) {
+	case CPU_UP_PREPARE:
+		octeon_update_boot_vector(cpu);
+		break;
+	case CPU_ONLINE:
+		pr_info("Cpu %d online\n", cpu);
+		break;
+	case CPU_DEAD:
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata octeon_cpu_notifier = {
+	.notifier_call = octeon_cpu_callback,
+};
+
+static int __cpuinit register_cavium_notifier(void)
+{
+	register_hotcpu_notifier(&octeon_cpu_notifier);
+
+	return 0;
+}
+
+late_initcall(register_cavium_notifier);
+
+#endif  /* CONFIG_HOTPLUG_CPU */
+
 struct plat_smp_ops octeon_smp_ops = {
 	.send_ipi_single	= octeon_send_ipi_single,
 	.send_ipi_mask		= octeon_send_ipi_mask,
@@ -208,4 +436,8 @@
 	.boot_secondary		= octeon_boot_secondary,
 	.smp_setup		= octeon_smp_setup,
 	.prepare_cpus		= octeon_prepare_cpus,
+#ifdef CONFIG_HOTPLUG_CPU
+	.cpu_disable		= octeon_cpu_disable,
+	.cpu_die		= octeon_cpu_die,
+#endif
 };