blob: 13bc43d1fb227f8ee0c55a411460200a2ac3b067 [file] [log] [blame]
Thomas Gleixner38498a62012-04-20 13:05:44 +00001/*
2 * Common SMP CPU bringup/teardown functions
3 */
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +00004#include <linux/cpu.h>
Thomas Gleixner29d5e042012-04-20 13:05:45 +00005#include <linux/err.h>
6#include <linux/smp.h>
Paul E. McKenney8038dad2015-02-25 10:34:39 -08007#include <linux/delay.h>
Thomas Gleixner38498a62012-04-20 13:05:44 +00008#include <linux/init.h>
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +00009#include <linux/list.h>
10#include <linux/slab.h>
Thomas Gleixner29d5e042012-04-20 13:05:45 +000011#include <linux/sched.h>
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +000012#include <linux/export.h>
Thomas Gleixner29d5e042012-04-20 13:05:45 +000013#include <linux/percpu.h>
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +000014#include <linux/kthread.h>
15#include <linux/smpboot.h>
Thomas Gleixner38498a62012-04-20 13:05:44 +000016
17#include "smpboot.h"
18
Paul E. McKenney3180d892012-07-12 01:55:54 -070019#ifdef CONFIG_SMP
20
Thomas Gleixner29d5e042012-04-20 13:05:45 +000021#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
Thomas Gleixner29d5e042012-04-20 13:05:45 +000022/*
23 * For the hotplug case we keep the task structs around and reuse
24 * them.
25 */
26static DEFINE_PER_CPU(struct task_struct *, idle_threads);
27
Paul Gortmaker0db06282013-06-19 14:53:51 -040028struct task_struct *idle_thread_get(unsigned int cpu)
Thomas Gleixner29d5e042012-04-20 13:05:45 +000029{
30 struct task_struct *tsk = per_cpu(idle_threads, cpu);
31
32 if (!tsk)
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070033 return ERR_PTR(-ENOMEM);
Thomas Gleixner29d5e042012-04-20 13:05:45 +000034 init_idle(tsk, cpu);
35 return tsk;
36}
37
Thomas Gleixner29d5e042012-04-20 13:05:45 +000038void __init idle_thread_set_boot_cpu(void)
39{
40 per_cpu(idle_threads, smp_processor_id()) = current;
41}
42
Srivatsa S. Bhat4a70d2d2012-05-24 20:41:00 +053043/**
44 * idle_init - Initialize the idle thread for a cpu
45 * @cpu: The cpu for which the idle thread should be initialized
46 *
47 * Creates the thread if it does not exist.
48 */
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070049static inline void idle_init(unsigned int cpu)
50{
51 struct task_struct *tsk = per_cpu(idle_threads, cpu);
52
53 if (!tsk) {
54 tsk = fork_idle(cpu);
55 if (IS_ERR(tsk))
56 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
57 else
58 per_cpu(idle_threads, cpu) = tsk;
59 }
60}
61
Thomas Gleixner29d5e042012-04-20 13:05:45 +000062/**
Srivatsa S. Bhat4a70d2d2012-05-24 20:41:00 +053063 * idle_threads_init - Initialize idle threads for all cpus
Thomas Gleixner29d5e042012-04-20 13:05:45 +000064 */
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070065void __init idle_threads_init(void)
Thomas Gleixner29d5e042012-04-20 13:05:45 +000066{
Srivatsa S. Bhatee74d132012-05-24 20:40:55 +053067 unsigned int cpu, boot_cpu;
68
69 boot_cpu = smp_processor_id();
Thomas Gleixner29d5e042012-04-20 13:05:45 +000070
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070071 for_each_possible_cpu(cpu) {
Srivatsa S. Bhatee74d132012-05-24 20:40:55 +053072 if (cpu != boot_cpu)
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070073 idle_init(cpu);
Thomas Gleixner29d5e042012-04-20 13:05:45 +000074 }
Thomas Gleixner29d5e042012-04-20 13:05:45 +000075}
Thomas Gleixner29d5e042012-04-20 13:05:45 +000076#endif
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +000077
Paul E. McKenney3180d892012-07-12 01:55:54 -070078#endif /* #ifdef CONFIG_SMP */
79
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +000080static LIST_HEAD(hotplug_threads);
81static DEFINE_MUTEX(smpboot_threads_lock);
82
83struct smpboot_thread_data {
84 unsigned int cpu;
85 unsigned int status;
86 struct smp_hotplug_thread *ht;
87};
88
89enum {
90 HP_THREAD_NONE = 0,
91 HP_THREAD_ACTIVE,
92 HP_THREAD_PARKED,
93};
94
95/**
96 * smpboot_thread_fn - percpu hotplug thread loop function
97 * @data: thread data pointer
98 *
99 * Checks for thread stop and park conditions. Calls the necessary
100 * setup, cleanup, park and unpark functions for the registered
101 * thread.
102 *
103 * Returns 1 when the thread should exit, 0 otherwise.
104 */
105static int smpboot_thread_fn(void *data)
106{
107 struct smpboot_thread_data *td = data;
108 struct smp_hotplug_thread *ht = td->ht;
109
110 while (1) {
111 set_current_state(TASK_INTERRUPTIBLE);
112 preempt_disable();
113 if (kthread_should_stop()) {
Peter Zijlstra7d4d2692014-09-24 10:18:52 +0200114 __set_current_state(TASK_RUNNING);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000115 preempt_enable();
Frederic Weisbecker3dd08c02015-09-04 15:45:03 -0700116 /* cleanup must mirror setup */
117 if (ht->cleanup && td->status != HP_THREAD_NONE)
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000118 ht->cleanup(td->cpu, cpu_online(td->cpu));
119 kfree(td);
120 return 0;
121 }
122
123 if (kthread_should_park()) {
124 __set_current_state(TASK_RUNNING);
Ingo Molnarbe6a2e42016-10-04 09:55:57 +0200125 preempt_enable();
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000126 if (ht->park && td->status == HP_THREAD_ACTIVE) {
127 BUG_ON(td->cpu != smp_processor_id());
128 ht->park(td->cpu);
129 td->status = HP_THREAD_PARKED;
130 }
131 kthread_parkme();
132 /* We might have been woken for stop */
133 continue;
134 }
135
Arnd Bergmanndc893e12013-03-08 12:43:31 -0800136 BUG_ON(td->cpu != smp_processor_id());
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000137
138 /* Check for state change setup */
139 switch (td->status) {
140 case HP_THREAD_NONE:
Peter Zijlstra7d4d2692014-09-24 10:18:52 +0200141 __set_current_state(TASK_RUNNING);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000142 preempt_enable();
143 if (ht->setup)
144 ht->setup(td->cpu);
145 td->status = HP_THREAD_ACTIVE;
Peter Zijlstra7d4d2692014-09-24 10:18:52 +0200146 continue;
147
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000148 case HP_THREAD_PARKED:
Peter Zijlstra7d4d2692014-09-24 10:18:52 +0200149 __set_current_state(TASK_RUNNING);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000150 preempt_enable();
151 if (ht->unpark)
152 ht->unpark(td->cpu);
153 td->status = HP_THREAD_ACTIVE;
Peter Zijlstra7d4d2692014-09-24 10:18:52 +0200154 continue;
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000155 }
156
157 if (!ht->thread_should_run(td->cpu)) {
Peter Zijlstra7d4d2692014-09-24 10:18:52 +0200158 preempt_enable_no_resched();
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000159 schedule();
160 } else {
Peter Zijlstra7d4d2692014-09-24 10:18:52 +0200161 __set_current_state(TASK_RUNNING);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000162 preempt_enable();
163 ht->thread_fn(td->cpu);
164 }
165 }
166}
167
168static int
169__smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
170{
171 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
172 struct smpboot_thread_data *td;
173
174 if (tsk)
175 return 0;
176
177 td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
178 if (!td)
179 return -ENOMEM;
180 td->cpu = cpu;
181 td->ht = ht;
182
183 tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
184 ht->thread_comm);
185 if (IS_ERR(tsk)) {
186 kfree(td);
187 return PTR_ERR(tsk);
188 }
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000189 get_task_struct(tsk);
190 *per_cpu_ptr(ht->store, cpu) = tsk;
Thomas Gleixnerf2530dc2013-04-09 09:33:34 +0200191 if (ht->create) {
192 /*
193 * Make sure that the task has actually scheduled out
194 * into park position, before calling the create
195 * callback. At least the migration thread callback
196 * requires that the task is off the runqueue.
197 */
198 if (!wait_task_inactive(tsk, TASK_PARKED))
199 WARN_ON(1);
200 else
201 ht->create(cpu);
202 }
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000203 return 0;
204}
205
206int smpboot_create_threads(unsigned int cpu)
207{
208 struct smp_hotplug_thread *cur;
209 int ret = 0;
210
211 mutex_lock(&smpboot_threads_lock);
212 list_for_each_entry(cur, &hotplug_threads, list) {
213 ret = __smpboot_create_thread(cur, cpu);
214 if (ret)
215 break;
216 }
217 mutex_unlock(&smpboot_threads_lock);
218 return ret;
219}
220
221static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
222{
223 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
224
Oleg Nesterovc00166d2015-10-09 18:00:49 +0200225 if (!ht->selfparking)
226 kthread_unpark(tsk);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000227}
228
Thomas Gleixner931ef162016-02-26 18:43:36 +0000229int smpboot_unpark_threads(unsigned int cpu)
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000230{
231 struct smp_hotplug_thread *cur;
232
233 mutex_lock(&smpboot_threads_lock);
234 list_for_each_entry(cur, &hotplug_threads, list)
Chris Metcalfb5242e92015-06-24 16:55:42 -0700235 if (cpumask_test_cpu(cpu, cur->cpumask))
236 smpboot_unpark_thread(cur, cpu);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000237 mutex_unlock(&smpboot_threads_lock);
Thomas Gleixner931ef162016-02-26 18:43:36 +0000238 return 0;
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000239}
240
241static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
242{
243 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
244
Thomas Gleixner7d7e4992013-01-31 12:11:12 +0000245 if (tsk && !ht->selfparking)
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000246 kthread_park(tsk);
247}
248
Thomas Gleixner931ef162016-02-26 18:43:36 +0000249int smpboot_park_threads(unsigned int cpu)
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000250{
251 struct smp_hotplug_thread *cur;
252
253 mutex_lock(&smpboot_threads_lock);
254 list_for_each_entry_reverse(cur, &hotplug_threads, list)
255 smpboot_park_thread(cur, cpu);
256 mutex_unlock(&smpboot_threads_lock);
Thomas Gleixner931ef162016-02-26 18:43:36 +0000257 return 0;
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000258}
259
260static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
261{
262 unsigned int cpu;
263
264 /* We need to destroy also the parked threads of offline cpus */
265 for_each_possible_cpu(cpu) {
266 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
267
268 if (tsk) {
269 kthread_stop(tsk);
270 put_task_struct(tsk);
271 *per_cpu_ptr(ht->store, cpu) = NULL;
272 }
273 }
274}
275
276/**
Frederic Weisbecker230ec932015-09-04 15:45:06 -0700277 * smpboot_register_percpu_thread_cpumask - Register a per_cpu thread related
278 * to hotplug
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000279 * @plug_thread: Hotplug thread descriptor
Frederic Weisbecker230ec932015-09-04 15:45:06 -0700280 * @cpumask: The cpumask where threads run
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000281 *
282 * Creates and starts the threads on all online cpus.
283 */
Frederic Weisbecker230ec932015-09-04 15:45:06 -0700284int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
285 const struct cpumask *cpumask)
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000286{
287 unsigned int cpu;
288 int ret = 0;
289
Chris Metcalfb5242e92015-06-24 16:55:42 -0700290 if (!alloc_cpumask_var(&plug_thread->cpumask, GFP_KERNEL))
291 return -ENOMEM;
Frederic Weisbecker230ec932015-09-04 15:45:06 -0700292 cpumask_copy(plug_thread->cpumask, cpumask);
Chris Metcalfb5242e92015-06-24 16:55:42 -0700293
Lai Jiangshan4bee9682014-07-31 11:30:17 +0800294 get_online_cpus();
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000295 mutex_lock(&smpboot_threads_lock);
296 for_each_online_cpu(cpu) {
297 ret = __smpboot_create_thread(plug_thread, cpu);
298 if (ret) {
299 smpboot_destroy_threads(plug_thread);
Frederic Weisbecker5869b502015-09-04 15:45:00 -0700300 free_cpumask_var(plug_thread->cpumask);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000301 goto out;
302 }
Frederic Weisbecker230ec932015-09-04 15:45:06 -0700303 if (cpumask_test_cpu(cpu, cpumask))
304 smpboot_unpark_thread(plug_thread, cpu);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000305 }
306 list_add(&plug_thread->list, &hotplug_threads);
307out:
308 mutex_unlock(&smpboot_threads_lock);
Lai Jiangshan4bee9682014-07-31 11:30:17 +0800309 put_online_cpus();
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000310 return ret;
311}
Frederic Weisbecker230ec932015-09-04 15:45:06 -0700312EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread_cpumask);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000313
314/**
315 * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
316 * @plug_thread: Hotplug thread descriptor
317 *
318 * Stops all threads on all possible cpus.
319 */
320void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
321{
322 get_online_cpus();
323 mutex_lock(&smpboot_threads_lock);
324 list_del(&plug_thread->list);
325 smpboot_destroy_threads(plug_thread);
326 mutex_unlock(&smpboot_threads_lock);
327 put_online_cpus();
Chris Metcalfb5242e92015-06-24 16:55:42 -0700328 free_cpumask_var(plug_thread->cpumask);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000329}
330EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
Paul E. McKenney8038dad2015-02-25 10:34:39 -0800331
Chris Metcalfb5242e92015-06-24 16:55:42 -0700332/**
333 * smpboot_update_cpumask_percpu_thread - Adjust which per_cpu hotplug threads stay parked
334 * @plug_thread: Hotplug thread descriptor
335 * @new: Revised mask to use
336 *
337 * The cpumask field in the smp_hotplug_thread must not be updated directly
338 * by the client, but only by calling this function.
Chris Metcalffe4ba3c2015-06-24 16:55:45 -0700339 * This function can only be called on a registered smp_hotplug_thread.
Chris Metcalfb5242e92015-06-24 16:55:42 -0700340 */
341int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
342 const struct cpumask *new)
343{
344 struct cpumask *old = plug_thread->cpumask;
345 cpumask_var_t tmp;
346 unsigned int cpu;
347
348 if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
349 return -ENOMEM;
350
351 get_online_cpus();
352 mutex_lock(&smpboot_threads_lock);
353
354 /* Park threads that were exclusively enabled on the old mask. */
355 cpumask_andnot(tmp, old, new);
356 for_each_cpu_and(cpu, tmp, cpu_online_mask)
357 smpboot_park_thread(plug_thread, cpu);
358
359 /* Unpark threads that are exclusively enabled on the new mask. */
360 cpumask_andnot(tmp, new, old);
361 for_each_cpu_and(cpu, tmp, cpu_online_mask)
362 smpboot_unpark_thread(plug_thread, cpu);
363
364 cpumask_copy(old, new);
365
366 mutex_unlock(&smpboot_threads_lock);
367 put_online_cpus();
368
369 free_cpumask_var(tmp);
370
371 return 0;
372}
373EXPORT_SYMBOL_GPL(smpboot_update_cpumask_percpu_thread);
374
Paul E. McKenney8038dad2015-02-25 10:34:39 -0800375static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
376
377/*
378 * Called to poll specified CPU's state, for example, when waiting for
379 * a CPU to come online.
380 */
381int cpu_report_state(int cpu)
382{
383 return atomic_read(&per_cpu(cpu_hotplug_state, cpu));
384}
385
386/*
387 * If CPU has died properly, set its state to CPU_UP_PREPARE and
388 * return success. Otherwise, return -EBUSY if the CPU died after
389 * cpu_wait_death() timed out. And yet otherwise again, return -EAGAIN
390 * if cpu_wait_death() timed out and the CPU still hasn't gotten around
391 * to dying. In the latter two cases, the CPU might not be set up
392 * properly, but it is up to the arch-specific code to decide.
393 * Finally, -EIO indicates an unanticipated problem.
394 *
395 * Note that it is permissible to omit this call entirely, as is
396 * done in architectures that do no CPU-hotplug error checking.
397 */
398int cpu_check_up_prepare(int cpu)
399{
400 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
401 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
402 return 0;
403 }
404
405 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) {
406
407 case CPU_POST_DEAD:
408
409 /* The CPU died properly, so just start it up again. */
410 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
411 return 0;
412
413 case CPU_DEAD_FROZEN:
414
415 /*
416 * Timeout during CPU death, so let caller know.
417 * The outgoing CPU completed its processing, but after
418 * cpu_wait_death() timed out and reported the error. The
419 * caller is free to proceed, in which case the state
420 * will be reset properly by cpu_set_state_online().
421 * Proceeding despite this -EBUSY return makes sense
422 * for systems where the outgoing CPUs take themselves
423 * offline, with no post-death manipulation required from
424 * a surviving CPU.
425 */
426 return -EBUSY;
427
428 case CPU_BROKEN:
429
430 /*
431 * The most likely reason we got here is that there was
432 * a timeout during CPU death, and the outgoing CPU never
433 * did complete its processing. This could happen on
434 * a virtualized system if the outgoing VCPU gets preempted
435 * for more than five seconds, and the user attempts to
436 * immediately online that same CPU. Trying again later
437 * might return -EBUSY above, hence -EAGAIN.
438 */
439 return -EAGAIN;
440
441 default:
442
443 /* Should not happen. Famous last words. */
444 return -EIO;
445 }
446}
447
448/*
449 * Mark the specified CPU online.
450 *
451 * Note that it is permissible to omit this call entirely, as is
452 * done in architectures that do no CPU-hotplug error checking.
453 */
454void cpu_set_state_online(int cpu)
455{
456 (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE);
457}
458
459#ifdef CONFIG_HOTPLUG_CPU
460
461/*
462 * Wait for the specified CPU to exit the idle loop and die.
463 */
464bool cpu_wait_death(unsigned int cpu, int seconds)
465{
466 int jf_left = seconds * HZ;
467 int oldstate;
468 bool ret = true;
469 int sleep_jf = 1;
470
471 might_sleep();
472
473 /* The outgoing CPU will normally get done quite quickly. */
474 if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
475 goto update_state;
476 udelay(5);
477
478 /* But if the outgoing CPU dawdles, wait increasingly long times. */
479 while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
480 schedule_timeout_uninterruptible(sleep_jf);
481 jf_left -= sleep_jf;
482 if (jf_left <= 0)
483 break;
484 sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
485 }
486update_state:
487 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
488 if (oldstate == CPU_DEAD) {
489 /* Outgoing CPU died normally, update state. */
490 smp_mb(); /* atomic_read() before update. */
491 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
492 } else {
493 /* Outgoing CPU still hasn't died, set state accordingly. */
494 if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
495 oldstate, CPU_BROKEN) != oldstate)
496 goto update_state;
497 ret = false;
498 }
499 return ret;
500}
501
502/*
503 * Called by the outgoing CPU to report its successful death. Return
504 * false if this report follows the surviving CPU's timing out.
505 *
506 * A separate "CPU_DEAD_FROZEN" is used when the surviving CPU
507 * timed out. This approach allows architectures to omit calls to
508 * cpu_check_up_prepare() and cpu_set_state_online() without defeating
509 * the next cpu_wait_death()'s polling loop.
510 */
511bool cpu_report_death(void)
512{
513 int oldstate;
514 int newstate;
515 int cpu = smp_processor_id();
516
517 do {
518 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
519 if (oldstate != CPU_BROKEN)
520 newstate = CPU_DEAD;
521 else
522 newstate = CPU_DEAD_FROZEN;
523 } while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
524 oldstate, newstate) != oldstate);
525 return newstate == CPU_DEAD;
526}
527
528#endif /* #ifdef CONFIG_HOTPLUG_CPU */