MIPS: kdump: Add support

[ralf@linux-mips.org: Original patch by Maxim Uvarov <muvarov@gmail.com>
with plenty of further shining, polishing, debugging and testing by me.]

Signed-off-by: Maxim Uvarov <muvarov@gmail.com>
Cc: linux-mips@linux-mips.org
Cc: kexec@lists.infradead.org
Cc: horms@verge.net.au
Patchwork: https://patchwork.linux-mips.org/patch/1025/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
index 85beb9b..992e184 100644
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -5,7 +5,7 @@
  * This source code is licensed under the GNU General Public License,
  * Version 2.  See the file COPYING for more details.
  */
-
+#include <linux/compiler.h>
 #include <linux/kexec.h>
 #include <linux/mm.h>
 #include <linux/delay.h>
@@ -19,9 +19,19 @@
 extern unsigned long kexec_start_address;
 extern unsigned long kexec_indirection_page;
 
+int (*_machine_kexec_prepare)(struct kimage *) = NULL;
+void (*_machine_kexec_shutdown)(void) = NULL;
+void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
+#ifdef CONFIG_SMP
+void (*relocated_kexec_smp_wait) (void *);
+atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+#endif
+
 int
 machine_kexec_prepare(struct kimage *kimage)
 {
+	if (_machine_kexec_prepare)
+		return _machine_kexec_prepare(kimage);
 	return 0;
 }
 
@@ -33,14 +43,20 @@
 void
 machine_shutdown(void)
 {
+	if (_machine_kexec_shutdown)
+		_machine_kexec_shutdown();
 }
 
 void
 machine_crash_shutdown(struct pt_regs *regs)
 {
+	if (_machine_crash_shutdown)
+		_machine_crash_shutdown(regs);
+	else
+		default_machine_crash_shutdown(regs);
 }
 
-typedef void (*noretfun_t)(void) __attribute__((noreturn));
+typedef void (*noretfun_t)(void) __noreturn;
 
 void
 machine_kexec(struct kimage *image)
@@ -52,7 +68,9 @@
 	reboot_code_buffer =
 	  (unsigned long)page_address(image->control_code_page);
 
-	kexec_start_address = image->start;
+	kexec_start_address =
+		(unsigned long) phys_to_virt(image->start);
+
 	kexec_indirection_page =
 		(unsigned long) phys_to_virt(image->head & PAGE_MASK);
 
@@ -63,7 +81,7 @@
 	 * The generic kexec code builds a page list with physical
 	 * addresses. they are directly accessible through KSEG0 (or
 	 * CKSEG0 or XPHYS if on 64bit system), hence the
-	 * pys_to_virt() call.
+	 * phys_to_virt() call.
 	 */
 	for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
 	     ptr = (entry & IND_INDIRECTION) ?
@@ -81,5 +99,12 @@
 	printk("Will call new kernel at %08lx\n", image->start);
 	printk("Bye ...\n");
 	__flush_cache_all();
+#ifdef CONFIG_SMP
+	/* All secondary cpus now may jump to kexec_wait cycle */
+	relocated_kexec_smp_wait = reboot_code_buffer +
+		(void *)(kexec_smp_wait - relocate_new_kernel);
+	smp_wmb();
+	atomic_set(&kexec_ready_to_reboot, 1);
+#endif
 	((noretfun_t) reboot_code_buffer)();
 }