| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 1 | /* | 
| Sergio Luis | 6d48bec | 2009-04-28 00:27:18 +0200 | [diff] [blame] | 2 | * Suspend support specific for i386/x86-64. | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 3 | * | 
|  | 4 | * Distribute under GPLv2 | 
|  | 5 | * | 
|  | 6 | * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> | 
| Pavel Machek | a253129 | 2010-07-18 14:27:13 +0200 | [diff] [blame] | 7 | * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz> | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 8 | * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> | 
|  | 9 | */ | 
|  | 10 |  | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 11 | #include <linux/suspend.h> | 
| Paul Gortmaker | 69c60c8 | 2011-05-26 12:22:53 -0400 | [diff] [blame] | 12 | #include <linux/export.h> | 
| Sergio Luis | f6783d2 | 2009-04-28 00:26:22 +0200 | [diff] [blame] | 13 | #include <linux/smp.h> | 
| Stephane Eranian | 1d9d863 | 2013-03-15 14:26:07 +0100 | [diff] [blame] | 14 | #include <linux/perf_event.h> | 
| Sergio Luis | f6783d2 | 2009-04-28 00:26:22 +0200 | [diff] [blame] | 15 |  | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 16 | #include <asm/pgtable.h> | 
| Sergio Luis | f6783d2 | 2009-04-28 00:26:22 +0200 | [diff] [blame] | 17 | #include <asm/proto.h> | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 18 | #include <asm/mtrr.h> | 
| Sergio Luis | f6783d2 | 2009-04-28 00:26:22 +0200 | [diff] [blame] | 19 | #include <asm/page.h> | 
|  | 20 | #include <asm/mce.h> | 
| Suresh Siddha | 83b8e28 | 2008-08-27 14:57:36 -0700 | [diff] [blame] | 21 | #include <asm/xcr.h> | 
| Magnus Damm | a8af789 | 2009-03-31 15:23:37 -0700 | [diff] [blame] | 22 | #include <asm/suspend.h> | 
| K.Prasad | 1e35006 | 2009-06-01 23:44:26 +0530 | [diff] [blame] | 23 | #include <asm/debugreg.h> | 
| Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 24 | #include <asm/fpu-internal.h> /* pcntxt_mask */ | 
| Fenghua Yu | a71c8bc | 2012-11-13 11:32:51 -0800 | [diff] [blame] | 25 | #include <asm/cpu.h> | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 26 |  | 
| Sergio Luis | 833b2ca | 2009-04-28 00:26:50 +0200 | [diff] [blame] | 27 | #ifdef CONFIG_X86_32 | 
| Andi Kleen | d6efc2f | 2013-08-05 15:02:49 -0700 | [diff] [blame] | 28 | __visible unsigned long saved_context_ebx; | 
|  | 29 | __visible unsigned long saved_context_esp, saved_context_ebp; | 
|  | 30 | __visible unsigned long saved_context_esi, saved_context_edi; | 
|  | 31 | __visible unsigned long saved_context_eflags; | 
| Sergio Luis | 833b2ca | 2009-04-28 00:26:50 +0200 | [diff] [blame] | 32 | #endif | 
| Konrad Rzeszutek Wilk | cc456c4 | 2013-05-01 21:53:30 -0400 | [diff] [blame] | 33 | struct saved_context saved_context; | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 34 |  | 
|  | 35 | /** | 
|  | 36 | *	__save_processor_state - save CPU registers before creating a | 
|  | 37 | *		hibernation image and before restoring the memory state from it | 
|  | 38 | *	@ctxt - structure to store the registers contents in | 
|  | 39 | * | 
|  | 40 | *	NOTE: If there is a CPU register the modification of which by the | 
|  | 41 | *	boot kernel (ie. the kernel used for loading the hibernation image) | 
|  | 42 | *	might affect the operations of the restored target kernel (ie. the one | 
|  | 43 | *	saved in the hibernation image), then its contents must be saved by this | 
|  | 44 | *	function.  In other words, if kernel A is hibernated and different | 
|  | 45 | *	kernel B is used for loading the hibernation image into memory, the | 
|  | 46 | *	kernel A's __save_processor_state() function must save all registers | 
|  | 47 | *	needed by kernel A, so that it can operate correctly after the resume | 
|  | 48 | *	regardless of what kernel B does in the meantime. | 
|  | 49 | */ | 
|  | 50 | static void __save_processor_state(struct saved_context *ctxt) | 
|  | 51 | { | 
| Sergio Luis | f9ebbe5 | 2009-04-28 00:27:00 +0200 | [diff] [blame] | 52 | #ifdef CONFIG_X86_32 | 
|  | 53 | mtrr_save_fixed_ranges(NULL); | 
|  | 54 | #endif | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 55 | kernel_fpu_begin(); | 
|  | 56 |  | 
|  | 57 | /* | 
|  | 58 | * descriptor tables | 
|  | 59 | */ | 
| Sergio Luis | f9ebbe5 | 2009-04-28 00:27:00 +0200 | [diff] [blame] | 60 | #ifdef CONFIG_X86_32 | 
| Sergio Luis | f9ebbe5 | 2009-04-28 00:27:00 +0200 | [diff] [blame] | 61 | store_idt(&ctxt->idt); | 
|  | 62 | #else | 
|  | 63 | /* CONFIG_X86_64 */ | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 64 | store_idt((struct desc_ptr *)&ctxt->idt_limit); | 
| Sergio Luis | f9ebbe5 | 2009-04-28 00:27:00 +0200 | [diff] [blame] | 65 | #endif | 
| Konrad Rzeszutek Wilk | cc456c4 | 2013-05-01 21:53:30 -0400 | [diff] [blame] | 66 | /* | 
|  | 67 | * We save it here, but restore it only in the hibernate case. | 
|  | 68 | * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit | 
|  | 69 | * mode in "secondary_startup_64". In 32-bit mode it is done via | 
|  | 70 | * 'pmode_gdt' in wakeup_start. | 
|  | 71 | */ | 
|  | 72 | ctxt->gdt_desc.size = GDT_SIZE - 1; | 
|  | 73 | ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_table(smp_processor_id()); | 
|  | 74 |  | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 75 | store_tr(ctxt->tr); | 
|  | 76 |  | 
|  | 77 | /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ | 
|  | 78 | /* | 
|  | 79 | * segment registers | 
|  | 80 | */ | 
| Sergio Luis | f9ebbe5 | 2009-04-28 00:27:00 +0200 | [diff] [blame] | 81 | #ifdef CONFIG_X86_32 | 
|  | 82 | savesegment(es, ctxt->es); | 
|  | 83 | savesegment(fs, ctxt->fs); | 
|  | 84 | savesegment(gs, ctxt->gs); | 
|  | 85 | savesegment(ss, ctxt->ss); | 
|  | 86 | #else | 
|  | 87 | /* CONFIG_X86_64 */ | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 88 | asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); | 
|  | 89 | asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); | 
|  | 90 | asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); | 
|  | 91 | asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); | 
|  | 92 | asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); | 
|  | 93 |  | 
|  | 94 | rdmsrl(MSR_FS_BASE, ctxt->fs_base); | 
|  | 95 | rdmsrl(MSR_GS_BASE, ctxt->gs_base); | 
|  | 96 | rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); | 
|  | 97 | mtrr_save_fixed_ranges(NULL); | 
|  | 98 |  | 
| Sergio Luis | f9ebbe5 | 2009-04-28 00:27:00 +0200 | [diff] [blame] | 99 | rdmsrl(MSR_EFER, ctxt->efer); | 
|  | 100 | #endif | 
|  | 101 |  | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 102 | /* | 
|  | 103 | * control registers | 
|  | 104 | */ | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 105 | ctxt->cr0 = read_cr0(); | 
|  | 106 | ctxt->cr2 = read_cr2(); | 
|  | 107 | ctxt->cr3 = read_cr3(); | 
| Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 108 | ctxt->cr4 = __read_cr4_safe(); | 
|  | 109 | #ifdef CONFIG_X86_64 | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 110 | ctxt->cr8 = read_cr8(); | 
| Sergio Luis | f9ebbe5 | 2009-04-28 00:27:00 +0200 | [diff] [blame] | 111 | #endif | 
| Ondrej Zary | 85a0e75 | 2010-06-08 00:32:49 +0200 | [diff] [blame] | 112 | ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, | 
|  | 113 | &ctxt->misc_enable); | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 114 | } | 
|  | 115 |  | 
| Sergio Luis | f9ebbe5 | 2009-04-28 00:27:00 +0200 | [diff] [blame] | 116 | /* Needed by apm.c */ | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 117 | void save_processor_state(void) | 
|  | 118 | { | 
|  | 119 | __save_processor_state(&saved_context); | 
| Marcelo Tosatti | b74f05d | 2012-02-13 11:07:27 -0200 | [diff] [blame] | 120 | x86_platform.save_sched_clock_state(); | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 121 | } | 
| Sergio Luis | f9ebbe5 | 2009-04-28 00:27:00 +0200 | [diff] [blame] | 122 | #ifdef CONFIG_X86_32 | 
|  | 123 | EXPORT_SYMBOL(save_processor_state); | 
|  | 124 | #endif | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 125 |  | 
|  | 126 | static void do_fpu_end(void) | 
|  | 127 | { | 
|  | 128 | /* | 
| Sergio Luis | 3134d04 | 2009-04-28 00:27:05 +0200 | [diff] [blame] | 129 | * Restore FPU regs if necessary. | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 130 | */ | 
|  | 131 | kernel_fpu_end(); | 
|  | 132 | } | 
|  | 133 |  | 
| Sergio Luis | 3134d04 | 2009-04-28 00:27:05 +0200 | [diff] [blame] | 134 | static void fix_processor_context(void) | 
|  | 135 | { | 
|  | 136 | int cpu = smp_processor_id(); | 
| Andy Lutomirski | 24933b8 | 2015-03-05 19:19:05 -0800 | [diff] [blame] | 137 | struct tss_struct *t = &per_cpu(cpu_tss, cpu); | 
| konrad@kernel.org | 4d681be | 2013-04-05 16:42:24 -0400 | [diff] [blame] | 138 | #ifdef CONFIG_X86_64 | 
|  | 139 | struct desc_struct *desc = get_cpu_gdt_table(cpu); | 
|  | 140 | tss_desc tss; | 
|  | 141 | #endif | 
| Sergio Luis | 3134d04 | 2009-04-28 00:27:05 +0200 | [diff] [blame] | 142 | set_tss_desc(cpu, t);	/* | 
|  | 143 | * This just modifies memory; should not be | 
|  | 144 | * necessary. But... This is necessary, because | 
|  | 145 | * 386 hardware has concept of busy TSS or some | 
|  | 146 | * similar stupidity. | 
|  | 147 | */ | 
|  | 148 |  | 
|  | 149 | #ifdef CONFIG_X86_64 | 
| konrad@kernel.org | 4d681be | 2013-04-05 16:42:24 -0400 | [diff] [blame] | 150 | memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc)); | 
|  | 151 | tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */ | 
|  | 152 | write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS); | 
| Sergio Luis | 3134d04 | 2009-04-28 00:27:05 +0200 | [diff] [blame] | 153 |  | 
|  | 154 | syscall_init();				/* This sets MSR_*STAR and related */ | 
|  | 155 | #endif | 
|  | 156 | load_TR_desc();				/* This does ltr */ | 
|  | 157 | load_LDT(¤t->active_mm->context);	/* This does lldt */ | 
| Sergio Luis | 3134d04 | 2009-04-28 00:27:05 +0200 | [diff] [blame] | 158 | } | 
|  | 159 |  | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 160 | /** | 
|  | 161 | *	__restore_processor_state - restore the contents of CPU registers saved | 
|  | 162 | *		by __save_processor_state() | 
|  | 163 | *	@ctxt - structure to load the registers contents from | 
|  | 164 | */ | 
| Steven Rostedt (Red Hat) | b8f99b3 | 2014-06-24 20:58:26 -0400 | [diff] [blame] | 165 | static void notrace __restore_processor_state(struct saved_context *ctxt) | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 166 | { | 
| Ondrej Zary | 85a0e75 | 2010-06-08 00:32:49 +0200 | [diff] [blame] | 167 | if (ctxt->misc_enable_saved) | 
|  | 168 | wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 169 | /* | 
|  | 170 | * control registers | 
|  | 171 | */ | 
| Sergio Luis | 3134d04 | 2009-04-28 00:27:05 +0200 | [diff] [blame] | 172 | /* cr4 was introduced in the Pentium CPU */ | 
|  | 173 | #ifdef CONFIG_X86_32 | 
|  | 174 | if (ctxt->cr4) | 
| Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 175 | __write_cr4(ctxt->cr4); | 
| Sergio Luis | 3134d04 | 2009-04-28 00:27:05 +0200 | [diff] [blame] | 176 | #else | 
|  | 177 | /* CONFIG X86_64 */ | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 178 | wrmsrl(MSR_EFER, ctxt->efer); | 
|  | 179 | write_cr8(ctxt->cr8); | 
| Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 180 | __write_cr4(ctxt->cr4); | 
| Sergio Luis | 3134d04 | 2009-04-28 00:27:05 +0200 | [diff] [blame] | 181 | #endif | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 182 | write_cr3(ctxt->cr3); | 
|  | 183 | write_cr2(ctxt->cr2); | 
|  | 184 | write_cr0(ctxt->cr0); | 
|  | 185 |  | 
|  | 186 | /* | 
|  | 187 | * now restore the descriptor tables to their proper values | 
|  | 188 | * ltr is done i fix_processor_context(). | 
|  | 189 | */ | 
| Sergio Luis | 3134d04 | 2009-04-28 00:27:05 +0200 | [diff] [blame] | 190 | #ifdef CONFIG_X86_32 | 
| Sergio Luis | 3134d04 | 2009-04-28 00:27:05 +0200 | [diff] [blame] | 191 | load_idt(&ctxt->idt); | 
|  | 192 | #else | 
|  | 193 | /* CONFIG_X86_64 */ | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 194 | load_idt((const struct desc_ptr *)&ctxt->idt_limit); | 
| Sergio Luis | 3134d04 | 2009-04-28 00:27:05 +0200 | [diff] [blame] | 195 | #endif | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 196 |  | 
|  | 197 | /* | 
|  | 198 | * segment registers | 
|  | 199 | */ | 
| Sergio Luis | 3134d04 | 2009-04-28 00:27:05 +0200 | [diff] [blame] | 200 | #ifdef CONFIG_X86_32 | 
|  | 201 | loadsegment(es, ctxt->es); | 
|  | 202 | loadsegment(fs, ctxt->fs); | 
|  | 203 | loadsegment(gs, ctxt->gs); | 
|  | 204 | loadsegment(ss, ctxt->ss); | 
|  | 205 |  | 
|  | 206 | /* | 
|  | 207 | * sysenter MSRs | 
|  | 208 | */ | 
|  | 209 | if (boot_cpu_has(X86_FEATURE_SEP)) | 
|  | 210 | enable_sep_cpu(); | 
|  | 211 | #else | 
|  | 212 | /* CONFIG_X86_64 */ | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 213 | asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); | 
|  | 214 | asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); | 
|  | 215 | asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); | 
|  | 216 | load_gs_index(ctxt->gs); | 
|  | 217 | asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); | 
|  | 218 |  | 
|  | 219 | wrmsrl(MSR_FS_BASE, ctxt->fs_base); | 
|  | 220 | wrmsrl(MSR_GS_BASE, ctxt->gs_base); | 
|  | 221 | wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); | 
| Sergio Luis | 3134d04 | 2009-04-28 00:27:05 +0200 | [diff] [blame] | 222 | #endif | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 223 |  | 
| Suresh Siddha | 83b8e28 | 2008-08-27 14:57:36 -0700 | [diff] [blame] | 224 | /* | 
|  | 225 | * restore XCR0 for xsave capable cpu's. | 
|  | 226 | */ | 
|  | 227 | if (cpu_has_xsave) | 
|  | 228 | xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); | 
|  | 229 |  | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 230 | fix_processor_context(); | 
|  | 231 |  | 
|  | 232 | do_fpu_end(); | 
| Marcelo Tosatti | dba69d1 | 2012-04-01 13:53:36 -0300 | [diff] [blame] | 233 | x86_platform.restore_sched_clock_state(); | 
| Suresh Siddha | d0af9ee | 2009-08-19 18:05:36 -0700 | [diff] [blame] | 234 | mtrr_bp_restore(); | 
| Stephane Eranian | 1d9d863 | 2013-03-15 14:26:07 +0100 | [diff] [blame] | 235 | perf_restore_debug_store(); | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 236 | } | 
|  | 237 |  | 
| Sergio Luis | 3134d04 | 2009-04-28 00:27:05 +0200 | [diff] [blame] | 238 | /* Needed by apm.c */ | 
| Steven Rostedt (Red Hat) | b8f99b3 | 2014-06-24 20:58:26 -0400 | [diff] [blame] | 239 | void notrace restore_processor_state(void) | 
| Rafael J. Wysocki | ef8b03f | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 240 | { | 
|  | 241 | __restore_processor_state(&saved_context); | 
|  | 242 | } | 
| Sergio Luis | 3134d04 | 2009-04-28 00:27:05 +0200 | [diff] [blame] | 243 | #ifdef CONFIG_X86_32 | 
|  | 244 | EXPORT_SYMBOL(restore_processor_state); | 
|  | 245 | #endif | 
| Fenghua Yu | 209efae | 2012-11-13 11:32:42 -0800 | [diff] [blame] | 246 |  | 
|  | 247 | /* | 
|  | 248 | * When bsp_check() is called in hibernate and suspend, cpu hotplug | 
|  | 249 | * is disabled already. So it's unnessary to handle race condition between | 
|  | 250 | * cpumask query and cpu hotplug. | 
|  | 251 | */ | 
|  | 252 | static int bsp_check(void) | 
|  | 253 | { | 
|  | 254 | if (cpumask_first(cpu_online_mask) != 0) { | 
|  | 255 | pr_warn("CPU0 is offline.\n"); | 
|  | 256 | return -ENODEV; | 
|  | 257 | } | 
|  | 258 |  | 
|  | 259 | return 0; | 
|  | 260 | } | 
|  | 261 |  | 
|  | 262 | static int bsp_pm_callback(struct notifier_block *nb, unsigned long action, | 
|  | 263 | void *ptr) | 
|  | 264 | { | 
|  | 265 | int ret = 0; | 
|  | 266 |  | 
|  | 267 | switch (action) { | 
|  | 268 | case PM_SUSPEND_PREPARE: | 
|  | 269 | case PM_HIBERNATION_PREPARE: | 
|  | 270 | ret = bsp_check(); | 
|  | 271 | break; | 
| Fenghua Yu | a71c8bc | 2012-11-13 11:32:51 -0800 | [diff] [blame] | 272 | #ifdef CONFIG_DEBUG_HOTPLUG_CPU0 | 
|  | 273 | case PM_RESTORE_PREPARE: | 
|  | 274 | /* | 
|  | 275 | * When system resumes from hibernation, online CPU0 because | 
|  | 276 | * 1. it's required for resume and | 
|  | 277 | * 2. the CPU was online before hibernation | 
|  | 278 | */ | 
|  | 279 | if (!cpu_online(0)) | 
|  | 280 | _debug_hotplug_cpu(0, 1); | 
|  | 281 | break; | 
|  | 282 | case PM_POST_RESTORE: | 
|  | 283 | /* | 
|  | 284 | * When a resume really happens, this code won't be called. | 
|  | 285 | * | 
|  | 286 | * This code is called only when user space hibernation software | 
|  | 287 | * prepares for snapshot device during boot time. So we just | 
|  | 288 | * call _debug_hotplug_cpu() to restore to CPU0's state prior to | 
|  | 289 | * preparing the snapshot device. | 
|  | 290 | * | 
|  | 291 | * This works for normal boot case in our CPU0 hotplug debug | 
|  | 292 | * mode, i.e. CPU0 is offline and user mode hibernation | 
|  | 293 | * software initializes during boot time. | 
|  | 294 | * | 
|  | 295 | * If CPU0 is online and user application accesses snapshot | 
|  | 296 | * device after boot time, this will offline CPU0 and user may | 
|  | 297 | * see different CPU0 state before and after accessing | 
|  | 298 | * the snapshot device. But hopefully this is not a case when | 
|  | 299 | * user debugging CPU0 hotplug. Even if users hit this case, | 
|  | 300 | * they can easily online CPU0 back. | 
|  | 301 | * | 
|  | 302 | * To simplify this debug code, we only consider normal boot | 
|  | 303 | * case. Otherwise we need to remember CPU0's state and restore | 
|  | 304 | * to that state and resolve racy conditions etc. | 
|  | 305 | */ | 
|  | 306 | _debug_hotplug_cpu(0, 0); | 
|  | 307 | break; | 
|  | 308 | #endif | 
| Fenghua Yu | 209efae | 2012-11-13 11:32:42 -0800 | [diff] [blame] | 309 | default: | 
|  | 310 | break; | 
|  | 311 | } | 
|  | 312 | return notifier_from_errno(ret); | 
|  | 313 | } | 
|  | 314 |  | 
|  | 315 | static int __init bsp_pm_check_init(void) | 
|  | 316 | { | 
|  | 317 | /* | 
|  | 318 | * Set this bsp_pm_callback as lower priority than | 
|  | 319 | * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called | 
|  | 320 | * earlier to disable cpu hotplug before bsp online check. | 
|  | 321 | */ | 
|  | 322 | pm_notifier(bsp_pm_callback, -INT_MAX); | 
|  | 323 | return 0; | 
|  | 324 | } | 
|  | 325 |  | 
|  | 326 | core_initcall(bsp_pm_check_init); |