blob: 57858cebd6b561939dbc5b1bbaeecf797ec56492 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
Anton Vorontsovcb792952012-05-31 16:26:22 -070013#include <linux/oom.h>
14#include <linux/rcupdate.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040015#include <linux/export.h>
Anton Vorontsove4cc2f82012-05-31 16:26:26 -070016#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kthread.h>
18#include <linux/stop_machine.h>
Ingo Molnar81615b62006-06-26 00:24:32 -070019#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/gfp.h>
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +010021#include <linux/suspend.h>
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053022#include <linux/lockdep.h>
Todd E Brandtbb3632c2014-06-06 05:40:17 -070023#include <trace/events/power.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Thomas Gleixner38498a62012-04-20 13:05:44 +000025#include "smpboot.h"
26
Rusty Russell98a79d62008-12-13 21:19:41 +103027#ifdef CONFIG_SMP
Rusty Russellb3199c02008-12-30 09:05:14 +103028/* Serializes the updates to cpu_online_mask, cpu_present_mask */
Linus Torvaldsaa953872006-07-23 12:12:16 -070029static DEFINE_MUTEX(cpu_add_remove_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070031/*
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053032 * The following two APIs (cpu_maps_update_begin/done) must be used when
33 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
34 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
35 * hotplug callback (un)registration performed using __register_cpu_notifier()
36 * or __unregister_cpu_notifier().
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070037 */
38void cpu_maps_update_begin(void)
39{
40 mutex_lock(&cpu_add_remove_lock);
41}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053042EXPORT_SYMBOL(cpu_notifier_register_begin);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070043
44void cpu_maps_update_done(void)
45{
46 mutex_unlock(&cpu_add_remove_lock);
47}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053048EXPORT_SYMBOL(cpu_notifier_register_done);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070049
Daniel J Blueman5c113fb2010-06-01 12:15:11 +010050static RAW_NOTIFIER_HEAD(cpu_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -070052/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
53 * Should always be manipulated under cpu_add_remove_lock
54 */
55static int cpu_hotplug_disabled;
56
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070057#ifdef CONFIG_HOTPLUG_CPU
58
Gautham R Shenoyd2219382008-01-25 21:08:01 +010059static struct {
60 struct task_struct *active_writer;
David Hildenbrand87af9e72014-12-12 10:11:44 +010061 /* wait queue to wake up the active_writer */
62 wait_queue_head_t wq;
63 /* verifies that no writer will get active while readers are active */
64 struct mutex lock;
Gautham R Shenoyd2219382008-01-25 21:08:01 +010065 /*
66 * Also blocks the new readers during
67 * an ongoing cpu hotplug operation.
68 */
David Hildenbrand87af9e72014-12-12 10:11:44 +010069 atomic_t refcount;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053070
71#ifdef CONFIG_DEBUG_LOCK_ALLOC
72 struct lockdep_map dep_map;
73#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -070074} cpu_hotplug = {
75 .active_writer = NULL,
David Hildenbrand87af9e72014-12-12 10:11:44 +010076 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
Linus Torvalds31950eb2009-06-22 21:18:12 -070077 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053078#ifdef CONFIG_DEBUG_LOCK_ALLOC
79 .dep_map = {.name = "cpu_hotplug.lock" },
80#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -070081};
Gautham R Shenoyd2219382008-01-25 21:08:01 +010082
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053083/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
84#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
Paul E. McKenneydd56af42014-08-25 20:25:06 -070085#define cpuhp_lock_acquire_tryread() \
86 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053087#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
88#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
89
Paul E. McKenney62db99f2014-10-22 14:51:49 -070090
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010091void get_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -080092{
Gautham R Shenoyd2219382008-01-25 21:08:01 +010093 might_sleep();
94 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -070095 return;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053096 cpuhp_lock_acquire_read();
Gautham R Shenoyd2219382008-01-25 21:08:01 +010097 mutex_lock(&cpu_hotplug.lock);
David Hildenbrand87af9e72014-12-12 10:11:44 +010098 atomic_inc(&cpu_hotplug.refcount);
Gautham R Shenoyd2219382008-01-25 21:08:01 +010099 mutex_unlock(&cpu_hotplug.lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800100}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100101EXPORT_SYMBOL_GPL(get_online_cpus);
Ashok Raj90d45d12005-11-08 21:34:24 -0800102
Paul E. McKenneydd56af42014-08-25 20:25:06 -0700103bool try_get_online_cpus(void)
104{
105 if (cpu_hotplug.active_writer == current)
106 return true;
107 if (!mutex_trylock(&cpu_hotplug.lock))
108 return false;
109 cpuhp_lock_acquire_tryread();
David Hildenbrand87af9e72014-12-12 10:11:44 +0100110 atomic_inc(&cpu_hotplug.refcount);
Paul E. McKenneydd56af42014-08-25 20:25:06 -0700111 mutex_unlock(&cpu_hotplug.lock);
112 return true;
113}
114EXPORT_SYMBOL_GPL(try_get_online_cpus);
115
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100116void put_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800117{
David Hildenbrand87af9e72014-12-12 10:11:44 +0100118 int refcount;
119
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100120 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -0700121 return;
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700122
David Hildenbrand87af9e72014-12-12 10:11:44 +0100123 refcount = atomic_dec_return(&cpu_hotplug.refcount);
124 if (WARN_ON(refcount < 0)) /* try to fix things up */
125 atomic_inc(&cpu_hotplug.refcount);
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700126
David Hildenbrand87af9e72014-12-12 10:11:44 +0100127 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
128 wake_up(&cpu_hotplug.wq);
129
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530130 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100131
Ashok Raja9d9baa2005-11-28 13:43:46 -0800132}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100133EXPORT_SYMBOL_GPL(put_online_cpus);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800134
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100135/*
136 * This ensures that the hotplug operation can begin only when the
137 * refcount goes to zero.
138 *
139 * Note that during a cpu-hotplug operation, the new readers, if any,
140 * will be blocked by the cpu_hotplug.lock
141 *
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700142 * Since cpu_hotplug_begin() is always called after invoking
143 * cpu_maps_update_begin(), we can be sure that only one writer is active.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100144 *
145 * Note that theoretically, there is a possibility of a livelock:
146 * - Refcount goes to zero, last reader wakes up the sleeping
147 * writer.
148 * - Last reader unlocks the cpu_hotplug.lock.
149 * - A new reader arrives at this moment, bumps up the refcount.
150 * - The writer acquires the cpu_hotplug.lock finds the refcount
151 * non zero and goes to sleep again.
152 *
153 * However, this is very difficult to achieve in practice since
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100154 * get_online_cpus() not an api which is called all that often.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100155 *
156 */
Toshi Kanib9d10be2013-08-12 09:45:53 -0600157void cpu_hotplug_begin(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100158{
David Hildenbrand87af9e72014-12-12 10:11:44 +0100159 DEFINE_WAIT(wait);
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700160
David Hildenbrand87af9e72014-12-12 10:11:44 +0100161 cpu_hotplug.active_writer = current;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530162 cpuhp_lock_acquire();
David Hildenbrand87af9e72014-12-12 10:11:44 +0100163
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700164 for (;;) {
165 mutex_lock(&cpu_hotplug.lock);
David Hildenbrand87af9e72014-12-12 10:11:44 +0100166 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
167 if (likely(!atomic_read(&cpu_hotplug.refcount)))
168 break;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100169 mutex_unlock(&cpu_hotplug.lock);
170 schedule();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100171 }
David Hildenbrand87af9e72014-12-12 10:11:44 +0100172 finish_wait(&cpu_hotplug.wq, &wait);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100173}
174
Toshi Kanib9d10be2013-08-12 09:45:53 -0600175void cpu_hotplug_done(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100176{
177 cpu_hotplug.active_writer = NULL;
178 mutex_unlock(&cpu_hotplug.lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530179 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100180}
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700181
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700182/*
183 * Wait for currently running CPU hotplug operations to complete (if any) and
184 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
185 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
186 * hotplug path before performing hotplug operations. So acquiring that lock
187 * guarantees mutual exclusion from any currently running hotplug operations.
188 */
189void cpu_hotplug_disable(void)
190{
191 cpu_maps_update_begin();
192 cpu_hotplug_disabled = 1;
193 cpu_maps_update_done();
194}
195
196void cpu_hotplug_enable(void)
197{
198 cpu_maps_update_begin();
199 cpu_hotplug_disabled = 0;
200 cpu_maps_update_done();
201}
202
Toshi Kanib9d10be2013-08-12 09:45:53 -0600203#endif /* CONFIG_HOTPLUG_CPU */
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700204
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205/* Need to know about CPUs going up/down? */
Sam Ravnborgf7b16c12008-04-29 00:58:51 -0700206int __ref register_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207{
Neil Brownbd5349c2006-10-17 00:10:35 -0700208 int ret;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100209 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700210 ret = raw_notifier_chain_register(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100211 cpu_maps_update_done();
Neil Brownbd5349c2006-10-17 00:10:35 -0700212 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213}
Chandra Seetharaman65edc682006-06-27 02:54:08 -0700214
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530215int __ref __register_cpu_notifier(struct notifier_block *nb)
216{
217 return raw_notifier_chain_register(&cpu_chain, nb);
218}
219
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700220static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
221 int *nr_calls)
222{
Akinobu Mitae6bde732010-05-26 14:43:29 -0700223 int ret;
224
225 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700226 nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700227
228 return notifier_to_errno(ret);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700229}
230
231static int cpu_notify(unsigned long val, void *v)
232{
233 return __cpu_notify(val, v, -1, NULL);
234}
235
Linus Torvalds00b9b0a2010-05-27 10:32:08 -0700236#ifdef CONFIG_HOTPLUG_CPU
237
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700238static void cpu_notify_nofail(unsigned long val, void *v)
239{
Linus Torvalds00b9b0a2010-05-27 10:32:08 -0700240 BUG_ON(cpu_notify(val, v));
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700241}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242EXPORT_SYMBOL(register_cpu_notifier);
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530243EXPORT_SYMBOL(__register_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Sam Ravnborg96471552008-04-29 00:58:48 -0700245void __ref unregister_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100247 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700248 raw_notifier_chain_unregister(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100249 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250}
251EXPORT_SYMBOL(unregister_cpu_notifier);
252
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530253void __ref __unregister_cpu_notifier(struct notifier_block *nb)
254{
255 raw_notifier_chain_unregister(&cpu_chain, nb);
256}
257EXPORT_SYMBOL(__unregister_cpu_notifier);
258
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700259/**
260 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
261 * @cpu: a CPU id
262 *
263 * This function walks all processes, finds a valid mm struct for each one and
264 * then clears a corresponding bit in mm's cpumask. While this all sounds
265 * trivial, there are various non-obvious corner cases, which this function
266 * tries to solve in a safe manner.
267 *
268 * Also note that the function uses a somewhat relaxed locking scheme, so it may
269 * be called only for an already offlined CPU.
270 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700271void clear_tasks_mm_cpumask(int cpu)
272{
273 struct task_struct *p;
274
275 /*
276 * This function is called after the cpu is taken down and marked
277 * offline, so its not like new tasks will ever get this cpu set in
278 * their mm mask. -- Peter Zijlstra
279 * Thus, we may use rcu_read_lock() here, instead of grabbing
280 * full-fledged tasklist_lock.
281 */
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700282 WARN_ON(cpu_online(cpu));
Anton Vorontsovcb792952012-05-31 16:26:22 -0700283 rcu_read_lock();
284 for_each_process(p) {
285 struct task_struct *t;
286
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700287 /*
288 * Main thread might exit, but other threads may still have
289 * a valid mm. Find one.
290 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700291 t = find_lock_task_mm(p);
292 if (!t)
293 continue;
294 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
295 task_unlock(t);
296 }
297 rcu_read_unlock();
298}
299
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400300static inline void check_for_tasks(int dead_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301{
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400302 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400304 read_lock_irq(&tasklist_lock);
305 do_each_thread(g, p) {
306 if (!p->on_rq)
307 continue;
308 /*
309 * We do the check with unlocked task_rq(p)->lock.
310 * Order the reading to do not warn about a task,
311 * which was running on this cpu in the past, and
312 * it's just been woken on another cpu.
313 */
314 rmb();
315 if (task_cpu(p) != dead_cpu)
316 continue;
317
318 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
319 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
320 } while_each_thread(g, p);
321 read_unlock_irq(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322}
323
Avi Kivitydb912f92007-05-24 12:23:10 +0300324struct take_cpu_down_param {
325 unsigned long mod;
326 void *hcpu;
327};
328
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329/* Take this CPU down. */
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700330static int __ref take_cpu_down(void *_param)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331{
Avi Kivitydb912f92007-05-24 12:23:10 +0300332 struct take_cpu_down_param *param = _param;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 int err;
334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 /* Ensure this CPU doesn't handle any more interrupts. */
336 err = __cpu_disable();
337 if (err < 0)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700338 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700340 cpu_notify(CPU_DYING | param->mod, param->hcpu);
Thomas Gleixner14e568e2013-01-31 12:11:14 +0000341 /* Park the stopper thread */
342 kthread_park(current);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700343 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344}
345
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700346/* Requires cpu_add_remove_lock to be held */
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700347static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348{
Heiko Carstense7407dc2007-05-09 02:34:04 -0700349 int err, nr_calls = 0;
Heiko Carstense7407dc2007-05-09 02:34:04 -0700350 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700351 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Avi Kivitydb912f92007-05-24 12:23:10 +0300352 struct take_cpu_down_param tcd_param = {
353 .mod = mod,
354 .hcpu = hcpu,
355 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700357 if (num_online_cpus() == 1)
358 return -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700360 if (!cpu_online(cpu))
361 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100363 cpu_hotplug_begin();
Michael Rodriguez4d519852011-03-22 16:34:07 -0700364
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700365 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700366 if (err) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700367 nr_calls--;
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700368 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
Fabian Frederick84117da2014-06-04 16:11:17 -0700369 pr_warn("%s: attempt to take down CPU %u failed\n",
370 __func__, cpu);
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700371 goto out_release;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 }
373
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200374 /*
375 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
376 * and RCU users of this state to go away such that all new such users
377 * will observe it.
378 *
379 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
380 * not imply sync_sched(), so explicitly call both.
Michael wang106dd5a2013-11-13 11:10:56 +0800381 *
382 * Do sync before park smpboot threads to take care the rcu boost case.
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200383 */
384#ifdef CONFIG_PREEMPT
385 synchronize_sched();
386#endif
387 synchronize_rcu();
388
Michael wang106dd5a2013-11-13 11:10:56 +0800389 smpboot_park_threads(cpu);
390
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200391 /*
392 * So now all preempt/rcu users must observe !cpu_active().
393 */
394
Rusty Russelle0b582e2009-01-01 10:12:28 +1030395 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
Rusty Russell04321582008-07-28 12:16:29 -0500396 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 /* CPU didn't die: tell everyone. Can't complain. */
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000398 smpboot_unpark_threads(cpu);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700399 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
Oleg Nesterov6a1bdc12010-03-15 10:10:23 +0100400 goto out_release;
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700401 }
Rusty Russell04321582008-07-28 12:16:29 -0500402 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100404 /*
405 * The migration_call() CPU_DYING callback will have removed all
406 * runnable tasks from the cpu, there's only the idle task left now
407 * that the migration thread is done doing the stop_machine thing.
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100408 *
409 * Wait for the stop thread to go away.
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100410 */
Paul E. McKenney528a25b2015-01-28 14:09:43 -0800411 while (!per_cpu(cpu_dead_idle, cpu))
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100412 cpu_relax();
Paul E. McKenney528a25b2015-01-28 14:09:43 -0800413 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
414 per_cpu(cpu_dead_idle, cpu) = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
416 /* This actually kills the CPU. */
417 __cpu_die(cpu);
418
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 /* CPU is completely dead: tell everyone. Too late to complain. */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700420 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
422 check_for_tasks(cpu);
423
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700424out_release:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100425 cpu_hotplug_done();
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700426 if (!err)
427 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700428 return err;
429}
430
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700431int __ref cpu_down(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700432{
Heiko Carstens9ea09af2008-12-22 12:36:30 +0100433 int err;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700434
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100435 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700436
Max Krasnyanskye761b772008-07-15 04:43:49 -0700437 if (cpu_hotplug_disabled) {
438 err = -EBUSY;
439 goto out;
440 }
441
Max Krasnyanskye761b772008-07-15 04:43:49 -0700442 err = _cpu_down(cpu, 0);
443
Max Krasnyanskye761b772008-07-15 04:43:49 -0700444out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100445 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 return err;
447}
Zhang Ruib62b8ef2008-04-29 02:35:56 -0400448EXPORT_SYMBOL(cpu_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449#endif /*CONFIG_HOTPLUG_CPU*/
450
Paul E. McKenney00df35f2015-04-12 08:06:55 -0700451/*
452 * Unpark per-CPU smpboot kthreads at CPU-online time.
453 */
454static int smpboot_thread_call(struct notifier_block *nfb,
455 unsigned long action, void *hcpu)
456{
457 int cpu = (long)hcpu;
458
459 switch (action & ~CPU_TASKS_FROZEN) {
460
461 case CPU_ONLINE:
462 smpboot_unpark_threads(cpu);
463 break;
464
465 default:
466 break;
467 }
468
469 return NOTIFY_OK;
470}
471
472static struct notifier_block smpboot_thread_notifier = {
473 .notifier_call = smpboot_thread_call,
474 .priority = CPU_PRI_SMPBOOT,
475};
476
477void __cpuinit smpboot_thread_init(void)
478{
479 register_cpu_notifier(&smpboot_thread_notifier);
480}
481
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700482/* Requires cpu_add_remove_lock to be held */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400483static int _cpu_up(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700485 int ret, nr_calls = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700487 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700488 struct task_struct *idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100490 cpu_hotplug_begin();
Thomas Gleixner38498a62012-04-20 13:05:44 +0000491
Yasuaki Ishimatsu5e5041f2012-10-23 01:30:54 +0200492 if (cpu_online(cpu) || !cpu_present(cpu)) {
493 ret = -EINVAL;
494 goto out;
495 }
496
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700497 idle = idle_thread_get(cpu);
498 if (IS_ERR(idle)) {
499 ret = PTR_ERR(idle);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000500 goto out;
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700501 }
Thomas Gleixner38498a62012-04-20 13:05:44 +0000502
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000503 ret = smpboot_create_threads(cpu);
504 if (ret)
505 goto out;
506
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700507 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700508 if (ret) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700509 nr_calls--;
Fabian Frederick84117da2014-06-04 16:11:17 -0700510 pr_warn("%s: attempt to bring up CPU %u failed\n",
511 __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 goto out_notify;
513 }
514
515 /* Arch-specific enabling code. */
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700516 ret = __cpu_up(cpu, idle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 if (ret != 0)
518 goto out_notify;
Eric Sesterhenn6978c702006-03-24 18:45:21 +0100519 BUG_ON(!cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
521 /* Now call notifier in preparation. */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700522 cpu_notify(CPU_ONLINE | mod, hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
524out_notify:
525 if (ret != 0)
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700526 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000527out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100528 cpu_hotplug_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700529
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 return ret;
531}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700532
Paul Gortmaker0db06282013-06-19 14:53:51 -0400533int cpu_up(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700534{
535 int err = 0;
minskey guocf234222010-05-24 14:32:41 -0700536
Rusty Russelle0b582e2009-01-01 10:12:28 +1030537 if (!cpu_possible(cpu)) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700538 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
539 cpu);
Chen Gong87d5e022010-03-05 13:42:38 -0800540#if defined(CONFIG_IA64)
Fabian Frederick84117da2014-06-04 16:11:17 -0700541 pr_err("please check additional_cpus= boot parameter\n");
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -0700542#endif
543 return -EINVAL;
544 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700545
Toshi Kani01b0f192013-11-12 15:07:25 -0800546 err = try_online_node(cpu_to_node(cpu));
547 if (err)
548 return err;
minskey guocf234222010-05-24 14:32:41 -0700549
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100550 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700551
Max Krasnyanskye761b772008-07-15 04:43:49 -0700552 if (cpu_hotplug_disabled) {
553 err = -EBUSY;
554 goto out;
555 }
556
557 err = _cpu_up(cpu, 0);
558
Max Krasnyanskye761b772008-07-15 04:43:49 -0700559out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100560 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700561 return err;
562}
Paul E. McKenneya513f6b2011-12-11 21:54:45 -0800563EXPORT_SYMBOL_GPL(cpu_up);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700564
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700565#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030566static cpumask_var_t frozen_cpus;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700567
568int disable_nonboot_cpus(void)
569{
Rafael J. Wysockie9a5f422010-05-27 22:16:22 +0200570 int cpu, first_cpu, error = 0;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700571
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100572 cpu_maps_update_begin();
Rusty Russelle0b582e2009-01-01 10:12:28 +1030573 first_cpu = cpumask_first(cpu_online_mask);
Xiaotian Feng9ee349a2009-12-16 18:04:32 +0100574 /*
575 * We take down all of the non-boot CPUs in one shot to avoid races
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700576 * with the userspace trying to use the CPU hotplug at the same time
577 */
Rusty Russelle0b582e2009-01-01 10:12:28 +1030578 cpumask_clear(frozen_cpus);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +0100579
Fabian Frederick84117da2014-06-04 16:11:17 -0700580 pr_info("Disabling non-boot CPUs ...\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700581 for_each_online_cpu(cpu) {
582 if (cpu == first_cpu)
583 continue;
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700584 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700585 error = _cpu_down(cpu, 1);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700586 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
Mike Travisfeae3202009-11-17 18:22:13 -0600587 if (!error)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030588 cpumask_set_cpu(cpu, frozen_cpus);
Mike Travisfeae3202009-11-17 18:22:13 -0600589 else {
Fabian Frederick84117da2014-06-04 16:11:17 -0700590 pr_err("Error taking CPU%d down: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700591 break;
592 }
593 }
Joseph Cihula86886e52009-06-30 19:31:07 -0700594
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700595 if (!error) {
596 BUG_ON(num_online_cpus() > 1);
597 /* Make sure the CPUs won't be enabled by someone else */
598 cpu_hotplug_disabled = 1;
599 } else {
Fabian Frederick84117da2014-06-04 16:11:17 -0700600 pr_err("Non-boot CPUs are not disabled\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700601 }
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100602 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700603 return error;
604}
605
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700606void __weak arch_enable_nonboot_cpus_begin(void)
607{
608}
609
610void __weak arch_enable_nonboot_cpus_end(void)
611{
612}
613
Sam Ravnborgfa7303e2008-02-08 04:21:55 -0800614void __ref enable_nonboot_cpus(void)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700615{
616 int cpu, error;
617
618 /* Allow everyone to use the CPU hotplug again */
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100619 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700620 cpu_hotplug_disabled = 0;
Rusty Russelle0b582e2009-01-01 10:12:28 +1030621 if (cpumask_empty(frozen_cpus))
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700622 goto out;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700623
Fabian Frederick84117da2014-06-04 16:11:17 -0700624 pr_info("Enabling non-boot CPUs ...\n");
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700625
626 arch_enable_nonboot_cpus_begin();
627
Rusty Russelle0b582e2009-01-01 10:12:28 +1030628 for_each_cpu(cpu, frozen_cpus) {
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700629 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700630 error = _cpu_up(cpu, 1);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700631 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700632 if (!error) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700633 pr_info("CPU%d is up\n", cpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700634 continue;
635 }
Fabian Frederick84117da2014-06-04 16:11:17 -0700636 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700637 }
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700638
639 arch_enable_nonboot_cpus_end();
640
Rusty Russelle0b582e2009-01-01 10:12:28 +1030641 cpumask_clear(frozen_cpus);
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700642out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100643 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700644}
Rusty Russelle0b582e2009-01-01 10:12:28 +1030645
Fenghua Yud7268a32011-11-15 21:59:31 +0100646static int __init alloc_frozen_cpus(void)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030647{
648 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
649 return -ENOMEM;
650 return 0;
651}
652core_initcall(alloc_frozen_cpus);
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100653
654/*
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100655 * When callbacks for CPU hotplug notifications are being executed, we must
656 * ensure that the state of the system with respect to the tasks being frozen
657 * or not, as reported by the notification, remains unchanged *throughout the
658 * duration* of the execution of the callbacks.
659 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
660 *
661 * This synchronization is implemented by mutually excluding regular CPU
662 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
663 * Hibernate notifications.
664 */
665static int
666cpu_hotplug_pm_callback(struct notifier_block *nb,
667 unsigned long action, void *ptr)
668{
669 switch (action) {
670
671 case PM_SUSPEND_PREPARE:
672 case PM_HIBERNATION_PREPARE:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700673 cpu_hotplug_disable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100674 break;
675
676 case PM_POST_SUSPEND:
677 case PM_POST_HIBERNATION:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700678 cpu_hotplug_enable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100679 break;
680
681 default:
682 return NOTIFY_DONE;
683 }
684
685 return NOTIFY_OK;
686}
687
688
Fenghua Yud7268a32011-11-15 21:59:31 +0100689static int __init cpu_hotplug_pm_sync_init(void)
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100690{
Fenghua Yu6e32d472012-11-13 11:32:43 -0800691 /*
692 * cpu_hotplug_pm_callback has higher priority than x86
693 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
694 * to disable cpu hotplug to avoid cpu hotplug race.
695 */
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100696 pm_notifier(cpu_hotplug_pm_callback, 0);
697 return 0;
698}
699core_initcall(cpu_hotplug_pm_sync_init);
700
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700701#endif /* CONFIG_PM_SLEEP_SMP */
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700702
Manfred Spraule545a612008-09-07 16:57:22 +0200703/**
704 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
705 * @cpu: cpu that just started
706 *
707 * This function calls the cpu_chain notifiers with CPU_STARTING.
708 * It must be called by the arch code on the new cpu, before the new cpu
709 * enables interrupts and before the "boot" cpu returns from __cpu_up().
710 */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400711void notify_cpu_starting(unsigned int cpu)
Manfred Spraule545a612008-09-07 16:57:22 +0200712{
713 unsigned long val = CPU_STARTING;
714
715#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030716 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
Manfred Spraule545a612008-09-07 16:57:22 +0200717 val = CPU_STARTING_FROZEN;
718#endif /* CONFIG_PM_SLEEP_SMP */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700719 cpu_notify(val, (void *)(long)cpu);
Manfred Spraule545a612008-09-07 16:57:22 +0200720}
721
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700722#endif /* CONFIG_SMP */
Mike Travisb8d317d2008-07-24 18:21:29 -0700723
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700724/*
725 * cpu_bit_bitmap[] is a special, "compressed" data structure that
726 * represents all NR_CPUS bits binary values of 1<<nr.
727 *
Rusty Russelle0b582e2009-01-01 10:12:28 +1030728 * It is used by cpumask_of() to get a constant address to a CPU
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700729 * mask value that has a single bit set only.
730 */
Mike Travisb8d317d2008-07-24 18:21:29 -0700731
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700732/* cpu_bit_bitmap[0] is empty - so we can back into it */
Michael Rodriguez4d519852011-03-22 16:34:07 -0700733#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700734#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
735#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
736#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
Mike Travisb8d317d2008-07-24 18:21:29 -0700737
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700738const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
Mike Travisb8d317d2008-07-24 18:21:29 -0700739
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700740 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
741 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
742#if BITS_PER_LONG > 32
743 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
744 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
Mike Travisb8d317d2008-07-24 18:21:29 -0700745#endif
746};
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700747EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100748
749const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
750EXPORT_SYMBOL(cpu_all_bits);
Rusty Russellb3199c02008-12-30 09:05:14 +1030751
752#ifdef CONFIG_INIT_ALL_POSSIBLE
753static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
754 = CPU_BITS_ALL;
755#else
756static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
757#endif
758const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
759EXPORT_SYMBOL(cpu_possible_mask);
760
761static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
762const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
763EXPORT_SYMBOL(cpu_online_mask);
764
765static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
766const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
767EXPORT_SYMBOL(cpu_present_mask);
768
769static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
770const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
771EXPORT_SYMBOL(cpu_active_mask);
Rusty Russell3fa41522008-12-30 09:05:16 +1030772
773void set_cpu_possible(unsigned int cpu, bool possible)
774{
775 if (possible)
776 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
777 else
778 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
779}
780
781void set_cpu_present(unsigned int cpu, bool present)
782{
783 if (present)
784 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
785 else
786 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
787}
788
789void set_cpu_online(unsigned int cpu, bool online)
790{
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800791 if (online) {
Rusty Russell3fa41522008-12-30 09:05:16 +1030792 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800793 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
794 } else {
Rusty Russell3fa41522008-12-30 09:05:16 +1030795 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800796 }
Rusty Russell3fa41522008-12-30 09:05:16 +1030797}
798
799void set_cpu_active(unsigned int cpu, bool active)
800{
801 if (active)
802 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
803 else
804 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
805}
806
807void init_cpu_present(const struct cpumask *src)
808{
809 cpumask_copy(to_cpumask(cpu_present_bits), src);
810}
811
812void init_cpu_possible(const struct cpumask *src)
813{
814 cpumask_copy(to_cpumask(cpu_possible_bits), src);
815}
816
817void init_cpu_online(const struct cpumask *src)
818{
819 cpumask_copy(to_cpumask(cpu_online_bits), src);
820}