Michael Ellerman | e1df870 | 2005-11-03 15:35:45 +1100 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_KEXEC_H |
| 2 | #define _ASM_POWERPC_KEXEC_H |
Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 3 | #ifdef __KERNEL__ |
Michael Ellerman | e1df870 | 2005-11-03 15:35:45 +1100 | [diff] [blame] | 4 | |
| 5 | /* |
| 6 | * Maximum page that is mapped directly into kernel memory. |
| 7 | * XXX: Since we copy virt we can use any page we allocate |
| 8 | */ |
| 9 | #define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) |
| 10 | |
| 11 | /* |
| 12 | * Maximum address we can reach in physical address mode. |
| 13 | * XXX: I want to allow initrd in highmem. Otherwise set to rmo on LPAR. |
| 14 | */ |
| 15 | #define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) |
| 16 | |
| 17 | /* Maximum address we can use for the control code buffer */ |
| 18 | #ifdef __powerpc64__ |
| 19 | #define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) |
| 20 | #else |
| 21 | /* TASK_SIZE, probably left over from use_mm ?? */ |
| 22 | #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE |
| 23 | #endif |
| 24 | |
| 25 | #define KEXEC_CONTROL_CODE_SIZE 4096 |
| 26 | |
| 27 | /* The native architecture */ |
| 28 | #ifdef __powerpc64__ |
| 29 | #define KEXEC_ARCH KEXEC_ARCH_PPC64 |
| 30 | #else |
| 31 | #define KEXEC_ARCH KEXEC_ARCH_PPC |
| 32 | #endif |
| 33 | |
Michael Ellerman | cc53291 | 2005-12-04 18:39:43 +1100 | [diff] [blame] | 34 | #ifdef CONFIG_KEXEC |
| 35 | |
Albert Herranz | 39931e4 | 2006-02-01 03:05:57 -0800 | [diff] [blame] | 36 | #ifndef __ASSEMBLY__ |
Haren Myneni | 8385a6a | 2006-01-13 19:15:36 -0800 | [diff] [blame] | 37 | #ifdef __powerpc64__ |
| 38 | /* |
| 39 | * This function is responsible for capturing register states if coming |
| 40 | * via panic or invoking dump using sysrq-trigger. |
| 41 | */ |
| 42 | static inline void crash_setup_regs(struct pt_regs *newregs, |
| 43 | struct pt_regs *oldregs) |
| 44 | { |
| 45 | if (oldregs) |
| 46 | memcpy(newregs, oldregs, sizeof(*newregs)); |
| 47 | else { |
| 48 | /* FIXME Merge this with xmon_save_regs ?? */ |
| 49 | unsigned long tmp1, tmp2; |
| 50 | __asm__ __volatile__ ( |
| 51 | "std 0,0(%2)\n" |
| 52 | "std 1,8(%2)\n" |
| 53 | "std 2,16(%2)\n" |
| 54 | "std 3,24(%2)\n" |
| 55 | "std 4,32(%2)\n" |
| 56 | "std 5,40(%2)\n" |
| 57 | "std 6,48(%2)\n" |
| 58 | "std 7,56(%2)\n" |
| 59 | "std 8,64(%2)\n" |
| 60 | "std 9,72(%2)\n" |
| 61 | "std 10,80(%2)\n" |
| 62 | "std 11,88(%2)\n" |
| 63 | "std 12,96(%2)\n" |
| 64 | "std 13,104(%2)\n" |
| 65 | "std 14,112(%2)\n" |
| 66 | "std 15,120(%2)\n" |
| 67 | "std 16,128(%2)\n" |
| 68 | "std 17,136(%2)\n" |
| 69 | "std 18,144(%2)\n" |
| 70 | "std 19,152(%2)\n" |
| 71 | "std 20,160(%2)\n" |
| 72 | "std 21,168(%2)\n" |
| 73 | "std 22,176(%2)\n" |
| 74 | "std 23,184(%2)\n" |
| 75 | "std 24,192(%2)\n" |
| 76 | "std 25,200(%2)\n" |
| 77 | "std 26,208(%2)\n" |
| 78 | "std 27,216(%2)\n" |
| 79 | "std 28,224(%2)\n" |
| 80 | "std 29,232(%2)\n" |
| 81 | "std 30,240(%2)\n" |
| 82 | "std 31,248(%2)\n" |
| 83 | "mfmsr %0\n" |
| 84 | "std %0, 264(%2)\n" |
| 85 | "mfctr %0\n" |
| 86 | "std %0, 280(%2)\n" |
| 87 | "mflr %0\n" |
| 88 | "std %0, 288(%2)\n" |
| 89 | "bl 1f\n" |
| 90 | "1: mflr %1\n" |
| 91 | "std %1, 256(%2)\n" |
| 92 | "mtlr %0\n" |
| 93 | "mfxer %0\n" |
| 94 | "std %0, 296(%2)\n" |
| 95 | : "=&r" (tmp1), "=&r" (tmp2) |
Michael Matz | 2ec5e3a | 2006-03-07 21:55:48 -0800 | [diff] [blame] | 96 | : "b" (newregs) |
| 97 | : "memory"); |
Haren Myneni | 8385a6a | 2006-01-13 19:15:36 -0800 | [diff] [blame] | 98 | } |
| 99 | } |
| 100 | #else |
| 101 | /* |
| 102 | * Provide a dummy definition to avoid build failures. Will remain |
| 103 | * empty till crash dump support is enabled. |
| 104 | */ |
| 105 | static inline void crash_setup_regs(struct pt_regs *newregs, |
| 106 | struct pt_regs *oldregs) { } |
| 107 | #endif /* !__powerpc64 __ */ |
| 108 | |
Michael Ellerman | e1df870 | 2005-11-03 15:35:45 +1100 | [diff] [blame] | 109 | #define MAX_NOTE_BYTES 1024 |
Michael Ellerman | e1df870 | 2005-11-03 15:35:45 +1100 | [diff] [blame] | 110 | |
| 111 | #ifdef __powerpc64__ |
| 112 | extern void kexec_smp_wait(void); /* get and clear naca physid, wait for |
| 113 | master to copy new code to 0 */ |
Michael Ellerman | 593e537 | 2005-11-12 00:06:06 +1100 | [diff] [blame] | 114 | extern void __init kexec_setup(void); |
Michael Ellerman | cc53291 | 2005-12-04 18:39:43 +1100 | [diff] [blame] | 115 | extern int crashing_cpu; |
| 116 | extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)); |
| 117 | #endif /* __powerpc64 __ */ |
Michael Ellerman | e1df870 | 2005-11-03 15:35:45 +1100 | [diff] [blame] | 118 | |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 119 | struct kimage; |
Michael Ellerman | cc53291 | 2005-12-04 18:39:43 +1100 | [diff] [blame] | 120 | struct pt_regs; |
Michael Ellerman | 3d1229d | 2005-11-14 23:35:00 +1100 | [diff] [blame] | 121 | extern void default_machine_kexec(struct kimage *image); |
| 122 | extern int default_machine_kexec_prepare(struct kimage *image); |
Michael Ellerman | cc53291 | 2005-12-04 18:39:43 +1100 | [diff] [blame] | 123 | extern void default_machine_crash_shutdown(struct pt_regs *regs); |
| 124 | |
Albert Herranz | 39931e4 | 2006-02-01 03:05:57 -0800 | [diff] [blame] | 125 | extern void machine_kexec_simple(struct kimage *image); |
| 126 | |
Michael Ellerman | e1df870 | 2005-11-03 15:35:45 +1100 | [diff] [blame] | 127 | #endif /* ! __ASSEMBLY__ */ |
Haren Myneni | 8385a6a | 2006-01-13 19:15:36 -0800 | [diff] [blame] | 128 | #endif /* CONFIG_KEXEC */ |
Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 129 | #endif /* __KERNEL__ */ |
Michael Ellerman | e1df870 | 2005-11-03 15:35:45 +1100 | [diff] [blame] | 130 | #endif /* _ASM_POWERPC_KEXEC_H */ |