blob: eba7eaa1341ddb060a2dadfd529f37149f172247 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
Anton Vorontsovcb792952012-05-31 16:26:22 -070013#include <linux/oom.h>
14#include <linux/rcupdate.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040015#include <linux/export.h>
Anton Vorontsove4cc2f82012-05-31 16:26:26 -070016#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kthread.h>
18#include <linux/stop_machine.h>
Ingo Molnar81615b62006-06-26 00:24:32 -070019#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/gfp.h>
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +010021#include <linux/suspend.h>
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053022#include <linux/lockdep.h>
Preeti U Murthy345527b2015-03-30 14:59:19 +053023#include <linux/tick.h>
Todd E Brandtbb3632c2014-06-06 05:40:17 -070024#include <trace/events/power.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Thomas Gleixner38498a62012-04-20 13:05:44 +000026#include "smpboot.h"
27
Rusty Russell98a79d62008-12-13 21:19:41 +103028#ifdef CONFIG_SMP
Rusty Russellb3199c02008-12-30 09:05:14 +103029/* Serializes the updates to cpu_online_mask, cpu_present_mask */
Linus Torvaldsaa953872006-07-23 12:12:16 -070030static DEFINE_MUTEX(cpu_add_remove_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070032/*
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053033 * The following two APIs (cpu_maps_update_begin/done) must be used when
34 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
35 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
36 * hotplug callback (un)registration performed using __register_cpu_notifier()
37 * or __unregister_cpu_notifier().
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070038 */
39void cpu_maps_update_begin(void)
40{
41 mutex_lock(&cpu_add_remove_lock);
42}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053043EXPORT_SYMBOL(cpu_notifier_register_begin);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070044
45void cpu_maps_update_done(void)
46{
47 mutex_unlock(&cpu_add_remove_lock);
48}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053049EXPORT_SYMBOL(cpu_notifier_register_done);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070050
Daniel J Blueman5c113fb2010-06-01 12:15:11 +010051static RAW_NOTIFIER_HEAD(cpu_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -070053/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
54 * Should always be manipulated under cpu_add_remove_lock
55 */
56static int cpu_hotplug_disabled;
57
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070058#ifdef CONFIG_HOTPLUG_CPU
59
Gautham R Shenoyd2219382008-01-25 21:08:01 +010060static struct {
61 struct task_struct *active_writer;
David Hildenbrand87af9e72014-12-12 10:11:44 +010062 /* wait queue to wake up the active_writer */
63 wait_queue_head_t wq;
64 /* verifies that no writer will get active while readers are active */
65 struct mutex lock;
Gautham R Shenoyd2219382008-01-25 21:08:01 +010066 /*
67 * Also blocks the new readers during
68 * an ongoing cpu hotplug operation.
69 */
David Hildenbrand87af9e72014-12-12 10:11:44 +010070 atomic_t refcount;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053071
72#ifdef CONFIG_DEBUG_LOCK_ALLOC
73 struct lockdep_map dep_map;
74#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -070075} cpu_hotplug = {
76 .active_writer = NULL,
David Hildenbrand87af9e72014-12-12 10:11:44 +010077 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
Linus Torvalds31950eb2009-06-22 21:18:12 -070078 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053079#ifdef CONFIG_DEBUG_LOCK_ALLOC
80 .dep_map = {.name = "cpu_hotplug.lock" },
81#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -070082};
Gautham R Shenoyd2219382008-01-25 21:08:01 +010083
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053084/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
85#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
Paul E. McKenneydd56af42014-08-25 20:25:06 -070086#define cpuhp_lock_acquire_tryread() \
87 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053088#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
89#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
90
Paul E. McKenney62db99f2014-10-22 14:51:49 -070091
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010092void get_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -080093{
Gautham R Shenoyd2219382008-01-25 21:08:01 +010094 might_sleep();
95 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -070096 return;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053097 cpuhp_lock_acquire_read();
Gautham R Shenoyd2219382008-01-25 21:08:01 +010098 mutex_lock(&cpu_hotplug.lock);
David Hildenbrand87af9e72014-12-12 10:11:44 +010099 atomic_inc(&cpu_hotplug.refcount);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100100 mutex_unlock(&cpu_hotplug.lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800101}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100102EXPORT_SYMBOL_GPL(get_online_cpus);
Ashok Raj90d45d12005-11-08 21:34:24 -0800103
Paul E. McKenneydd56af42014-08-25 20:25:06 -0700104bool try_get_online_cpus(void)
105{
106 if (cpu_hotplug.active_writer == current)
107 return true;
108 if (!mutex_trylock(&cpu_hotplug.lock))
109 return false;
110 cpuhp_lock_acquire_tryread();
David Hildenbrand87af9e72014-12-12 10:11:44 +0100111 atomic_inc(&cpu_hotplug.refcount);
Paul E. McKenneydd56af42014-08-25 20:25:06 -0700112 mutex_unlock(&cpu_hotplug.lock);
113 return true;
114}
115EXPORT_SYMBOL_GPL(try_get_online_cpus);
116
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100117void put_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800118{
David Hildenbrand87af9e72014-12-12 10:11:44 +0100119 int refcount;
120
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100121 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -0700122 return;
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700123
David Hildenbrand87af9e72014-12-12 10:11:44 +0100124 refcount = atomic_dec_return(&cpu_hotplug.refcount);
125 if (WARN_ON(refcount < 0)) /* try to fix things up */
126 atomic_inc(&cpu_hotplug.refcount);
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700127
David Hildenbrand87af9e72014-12-12 10:11:44 +0100128 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
129 wake_up(&cpu_hotplug.wq);
130
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530131 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100132
Ashok Raja9d9baa2005-11-28 13:43:46 -0800133}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100134EXPORT_SYMBOL_GPL(put_online_cpus);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800135
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100136/*
137 * This ensures that the hotplug operation can begin only when the
138 * refcount goes to zero.
139 *
140 * Note that during a cpu-hotplug operation, the new readers, if any,
141 * will be blocked by the cpu_hotplug.lock
142 *
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700143 * Since cpu_hotplug_begin() is always called after invoking
144 * cpu_maps_update_begin(), we can be sure that only one writer is active.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100145 *
146 * Note that theoretically, there is a possibility of a livelock:
147 * - Refcount goes to zero, last reader wakes up the sleeping
148 * writer.
149 * - Last reader unlocks the cpu_hotplug.lock.
150 * - A new reader arrives at this moment, bumps up the refcount.
151 * - The writer acquires the cpu_hotplug.lock finds the refcount
152 * non zero and goes to sleep again.
153 *
154 * However, this is very difficult to achieve in practice since
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100155 * get_online_cpus() not an api which is called all that often.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100156 *
157 */
Toshi Kanib9d10be2013-08-12 09:45:53 -0600158void cpu_hotplug_begin(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100159{
David Hildenbrand87af9e72014-12-12 10:11:44 +0100160 DEFINE_WAIT(wait);
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700161
David Hildenbrand87af9e72014-12-12 10:11:44 +0100162 cpu_hotplug.active_writer = current;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530163 cpuhp_lock_acquire();
David Hildenbrand87af9e72014-12-12 10:11:44 +0100164
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700165 for (;;) {
166 mutex_lock(&cpu_hotplug.lock);
David Hildenbrand87af9e72014-12-12 10:11:44 +0100167 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
168 if (likely(!atomic_read(&cpu_hotplug.refcount)))
169 break;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100170 mutex_unlock(&cpu_hotplug.lock);
171 schedule();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100172 }
David Hildenbrand87af9e72014-12-12 10:11:44 +0100173 finish_wait(&cpu_hotplug.wq, &wait);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100174}
175
Toshi Kanib9d10be2013-08-12 09:45:53 -0600176void cpu_hotplug_done(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100177{
178 cpu_hotplug.active_writer = NULL;
179 mutex_unlock(&cpu_hotplug.lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530180 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100181}
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700182
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700183/*
184 * Wait for currently running CPU hotplug operations to complete (if any) and
185 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
186 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
187 * hotplug path before performing hotplug operations. So acquiring that lock
188 * guarantees mutual exclusion from any currently running hotplug operations.
189 */
190void cpu_hotplug_disable(void)
191{
192 cpu_maps_update_begin();
193 cpu_hotplug_disabled = 1;
194 cpu_maps_update_done();
195}
196
197void cpu_hotplug_enable(void)
198{
199 cpu_maps_update_begin();
200 cpu_hotplug_disabled = 0;
201 cpu_maps_update_done();
202}
203
Toshi Kanib9d10be2013-08-12 09:45:53 -0600204#endif /* CONFIG_HOTPLUG_CPU */
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206/* Need to know about CPUs going up/down? */
Sam Ravnborgf7b16c12008-04-29 00:58:51 -0700207int __ref register_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208{
Neil Brownbd5349c2006-10-17 00:10:35 -0700209 int ret;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100210 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700211 ret = raw_notifier_chain_register(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100212 cpu_maps_update_done();
Neil Brownbd5349c2006-10-17 00:10:35 -0700213 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214}
Chandra Seetharaman65edc682006-06-27 02:54:08 -0700215
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530216int __ref __register_cpu_notifier(struct notifier_block *nb)
217{
218 return raw_notifier_chain_register(&cpu_chain, nb);
219}
220
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700221static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
222 int *nr_calls)
223{
Akinobu Mitae6bde732010-05-26 14:43:29 -0700224 int ret;
225
226 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700227 nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700228
229 return notifier_to_errno(ret);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700230}
231
232static int cpu_notify(unsigned long val, void *v)
233{
234 return __cpu_notify(val, v, -1, NULL);
235}
236
Linus Torvalds00b9b0a2010-05-27 10:32:08 -0700237#ifdef CONFIG_HOTPLUG_CPU
238
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700239static void cpu_notify_nofail(unsigned long val, void *v)
240{
Linus Torvalds00b9b0a2010-05-27 10:32:08 -0700241 BUG_ON(cpu_notify(val, v));
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700242}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243EXPORT_SYMBOL(register_cpu_notifier);
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530244EXPORT_SYMBOL(__register_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Sam Ravnborg96471552008-04-29 00:58:48 -0700246void __ref unregister_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100248 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700249 raw_notifier_chain_unregister(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100250 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251}
252EXPORT_SYMBOL(unregister_cpu_notifier);
253
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530254void __ref __unregister_cpu_notifier(struct notifier_block *nb)
255{
256 raw_notifier_chain_unregister(&cpu_chain, nb);
257}
258EXPORT_SYMBOL(__unregister_cpu_notifier);
259
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700260/**
261 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
262 * @cpu: a CPU id
263 *
264 * This function walks all processes, finds a valid mm struct for each one and
265 * then clears a corresponding bit in mm's cpumask. While this all sounds
266 * trivial, there are various non-obvious corner cases, which this function
267 * tries to solve in a safe manner.
268 *
269 * Also note that the function uses a somewhat relaxed locking scheme, so it may
270 * be called only for an already offlined CPU.
271 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700272void clear_tasks_mm_cpumask(int cpu)
273{
274 struct task_struct *p;
275
276 /*
277 * This function is called after the cpu is taken down and marked
278 * offline, so its not like new tasks will ever get this cpu set in
279 * their mm mask. -- Peter Zijlstra
280 * Thus, we may use rcu_read_lock() here, instead of grabbing
281 * full-fledged tasklist_lock.
282 */
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700283 WARN_ON(cpu_online(cpu));
Anton Vorontsovcb792952012-05-31 16:26:22 -0700284 rcu_read_lock();
285 for_each_process(p) {
286 struct task_struct *t;
287
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700288 /*
289 * Main thread might exit, but other threads may still have
290 * a valid mm. Find one.
291 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700292 t = find_lock_task_mm(p);
293 if (!t)
294 continue;
295 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
296 task_unlock(t);
297 }
298 rcu_read_unlock();
299}
300
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400301static inline void check_for_tasks(int dead_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302{
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400303 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400305 read_lock_irq(&tasklist_lock);
306 do_each_thread(g, p) {
307 if (!p->on_rq)
308 continue;
309 /*
310 * We do the check with unlocked task_rq(p)->lock.
311 * Order the reading to do not warn about a task,
312 * which was running on this cpu in the past, and
313 * it's just been woken on another cpu.
314 */
315 rmb();
316 if (task_cpu(p) != dead_cpu)
317 continue;
318
319 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
320 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
321 } while_each_thread(g, p);
322 read_unlock_irq(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Avi Kivitydb912f92007-05-24 12:23:10 +0300325struct take_cpu_down_param {
326 unsigned long mod;
327 void *hcpu;
328};
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330/* Take this CPU down. */
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700331static int __ref take_cpu_down(void *_param)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332{
Avi Kivitydb912f92007-05-24 12:23:10 +0300333 struct take_cpu_down_param *param = _param;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 int err;
335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 /* Ensure this CPU doesn't handle any more interrupts. */
337 err = __cpu_disable();
338 if (err < 0)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700339 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700341 cpu_notify(CPU_DYING | param->mod, param->hcpu);
Thomas Gleixner52c063d2015-04-03 02:37:24 +0200342 /* Give up timekeeping duties */
343 tick_handover_do_timer();
Thomas Gleixner14e568e2013-01-31 12:11:14 +0000344 /* Park the stopper thread */
345 kthread_park(current);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700346 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347}
348
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700349/* Requires cpu_add_remove_lock to be held */
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700350static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351{
Heiko Carstense7407dc2007-05-09 02:34:04 -0700352 int err, nr_calls = 0;
Heiko Carstense7407dc2007-05-09 02:34:04 -0700353 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700354 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Avi Kivitydb912f92007-05-24 12:23:10 +0300355 struct take_cpu_down_param tcd_param = {
356 .mod = mod,
357 .hcpu = hcpu,
358 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700360 if (num_online_cpus() == 1)
361 return -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700363 if (!cpu_online(cpu))
364 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100366 cpu_hotplug_begin();
Michael Rodriguez4d519852011-03-22 16:34:07 -0700367
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700368 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700369 if (err) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700370 nr_calls--;
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700371 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
Fabian Frederick84117da2014-06-04 16:11:17 -0700372 pr_warn("%s: attempt to take down CPU %u failed\n",
373 __func__, cpu);
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700374 goto out_release;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 }
376
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200377 /*
378 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
379 * and RCU users of this state to go away such that all new such users
380 * will observe it.
381 *
382 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
383 * not imply sync_sched(), so explicitly call both.
Michael wang106dd5a2013-11-13 11:10:56 +0800384 *
385 * Do sync before park smpboot threads to take care the rcu boost case.
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200386 */
387#ifdef CONFIG_PREEMPT
388 synchronize_sched();
389#endif
390 synchronize_rcu();
391
Michael wang106dd5a2013-11-13 11:10:56 +0800392 smpboot_park_threads(cpu);
393
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200394 /*
395 * So now all preempt/rcu users must observe !cpu_active().
396 */
397
Rusty Russelle0b582e2009-01-01 10:12:28 +1030398 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
Rusty Russell04321582008-07-28 12:16:29 -0500399 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 /* CPU didn't die: tell everyone. Can't complain. */
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000401 smpboot_unpark_threads(cpu);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700402 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
Oleg Nesterov6a1bdc12010-03-15 10:10:23 +0100403 goto out_release;
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700404 }
Rusty Russell04321582008-07-28 12:16:29 -0500405 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100407 /*
408 * The migration_call() CPU_DYING callback will have removed all
409 * runnable tasks from the cpu, there's only the idle task left now
410 * that the migration thread is done doing the stop_machine thing.
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100411 *
412 * Wait for the stop thread to go away.
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100413 */
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100414 while (!idle_cpu(cpu))
415 cpu_relax();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
Preeti U Murthy345527b2015-03-30 14:59:19 +0530417 hotplug_cpu__broadcast_tick_pull(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 /* This actually kills the CPU. */
419 __cpu_die(cpu);
420
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 /* CPU is completely dead: tell everyone. Too late to complain. */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700422 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
424 check_for_tasks(cpu);
425
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700426out_release:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100427 cpu_hotplug_done();
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700428 if (!err)
429 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700430 return err;
431}
432
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700433int __ref cpu_down(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700434{
Heiko Carstens9ea09af2008-12-22 12:36:30 +0100435 int err;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700436
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100437 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700438
Max Krasnyanskye761b772008-07-15 04:43:49 -0700439 if (cpu_hotplug_disabled) {
440 err = -EBUSY;
441 goto out;
442 }
443
Max Krasnyanskye761b772008-07-15 04:43:49 -0700444 err = _cpu_down(cpu, 0);
445
Max Krasnyanskye761b772008-07-15 04:43:49 -0700446out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100447 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 return err;
449}
Zhang Ruib62b8ef2008-04-29 02:35:56 -0400450EXPORT_SYMBOL(cpu_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451#endif /*CONFIG_HOTPLUG_CPU*/
452
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700453/* Requires cpu_add_remove_lock to be held */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400454static int _cpu_up(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455{
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700456 int ret, nr_calls = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700458 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700459 struct task_struct *idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100461 cpu_hotplug_begin();
Thomas Gleixner38498a62012-04-20 13:05:44 +0000462
Yasuaki Ishimatsu5e5041f2012-10-23 01:30:54 +0200463 if (cpu_online(cpu) || !cpu_present(cpu)) {
464 ret = -EINVAL;
465 goto out;
466 }
467
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700468 idle = idle_thread_get(cpu);
469 if (IS_ERR(idle)) {
470 ret = PTR_ERR(idle);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000471 goto out;
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700472 }
Thomas Gleixner38498a62012-04-20 13:05:44 +0000473
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000474 ret = smpboot_create_threads(cpu);
475 if (ret)
476 goto out;
477
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700478 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700479 if (ret) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700480 nr_calls--;
Fabian Frederick84117da2014-06-04 16:11:17 -0700481 pr_warn("%s: attempt to bring up CPU %u failed\n",
482 __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 goto out_notify;
484 }
485
486 /* Arch-specific enabling code. */
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700487 ret = __cpu_up(cpu, idle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 if (ret != 0)
489 goto out_notify;
Eric Sesterhenn6978c702006-03-24 18:45:21 +0100490 BUG_ON(!cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000492 /* Wake the per cpu threads */
493 smpboot_unpark_threads(cpu);
494
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 /* Now call notifier in preparation. */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700496 cpu_notify(CPU_ONLINE | mod, hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498out_notify:
499 if (ret != 0)
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700500 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000501out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100502 cpu_hotplug_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 return ret;
505}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700506
Paul Gortmaker0db06282013-06-19 14:53:51 -0400507int cpu_up(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700508{
509 int err = 0;
minskey guocf234222010-05-24 14:32:41 -0700510
Rusty Russelle0b582e2009-01-01 10:12:28 +1030511 if (!cpu_possible(cpu)) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700512 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
513 cpu);
Chen Gong87d5e022010-03-05 13:42:38 -0800514#if defined(CONFIG_IA64)
Fabian Frederick84117da2014-06-04 16:11:17 -0700515 pr_err("please check additional_cpus= boot parameter\n");
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -0700516#endif
517 return -EINVAL;
518 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700519
Toshi Kani01b0f192013-11-12 15:07:25 -0800520 err = try_online_node(cpu_to_node(cpu));
521 if (err)
522 return err;
minskey guocf234222010-05-24 14:32:41 -0700523
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100524 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700525
Max Krasnyanskye761b772008-07-15 04:43:49 -0700526 if (cpu_hotplug_disabled) {
527 err = -EBUSY;
528 goto out;
529 }
530
531 err = _cpu_up(cpu, 0);
532
Max Krasnyanskye761b772008-07-15 04:43:49 -0700533out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100534 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700535 return err;
536}
Paul E. McKenneya513f6b2011-12-11 21:54:45 -0800537EXPORT_SYMBOL_GPL(cpu_up);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700538
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700539#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030540static cpumask_var_t frozen_cpus;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700541
542int disable_nonboot_cpus(void)
543{
Rafael J. Wysockie9a5f422010-05-27 22:16:22 +0200544 int cpu, first_cpu, error = 0;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700545
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100546 cpu_maps_update_begin();
Rusty Russelle0b582e2009-01-01 10:12:28 +1030547 first_cpu = cpumask_first(cpu_online_mask);
Xiaotian Feng9ee349a2009-12-16 18:04:32 +0100548 /*
549 * We take down all of the non-boot CPUs in one shot to avoid races
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700550 * with the userspace trying to use the CPU hotplug at the same time
551 */
Rusty Russelle0b582e2009-01-01 10:12:28 +1030552 cpumask_clear(frozen_cpus);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +0100553
Fabian Frederick84117da2014-06-04 16:11:17 -0700554 pr_info("Disabling non-boot CPUs ...\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700555 for_each_online_cpu(cpu) {
556 if (cpu == first_cpu)
557 continue;
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700558 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700559 error = _cpu_down(cpu, 1);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700560 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
Mike Travisfeae3202009-11-17 18:22:13 -0600561 if (!error)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030562 cpumask_set_cpu(cpu, frozen_cpus);
Mike Travisfeae3202009-11-17 18:22:13 -0600563 else {
Fabian Frederick84117da2014-06-04 16:11:17 -0700564 pr_err("Error taking CPU%d down: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700565 break;
566 }
567 }
Joseph Cihula86886e52009-06-30 19:31:07 -0700568
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700569 if (!error) {
570 BUG_ON(num_online_cpus() > 1);
571 /* Make sure the CPUs won't be enabled by someone else */
572 cpu_hotplug_disabled = 1;
573 } else {
Fabian Frederick84117da2014-06-04 16:11:17 -0700574 pr_err("Non-boot CPUs are not disabled\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700575 }
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100576 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700577 return error;
578}
579
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700580void __weak arch_enable_nonboot_cpus_begin(void)
581{
582}
583
584void __weak arch_enable_nonboot_cpus_end(void)
585{
586}
587
Sam Ravnborgfa7303e2008-02-08 04:21:55 -0800588void __ref enable_nonboot_cpus(void)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700589{
590 int cpu, error;
591
592 /* Allow everyone to use the CPU hotplug again */
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100593 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700594 cpu_hotplug_disabled = 0;
Rusty Russelle0b582e2009-01-01 10:12:28 +1030595 if (cpumask_empty(frozen_cpus))
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700596 goto out;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700597
Fabian Frederick84117da2014-06-04 16:11:17 -0700598 pr_info("Enabling non-boot CPUs ...\n");
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700599
600 arch_enable_nonboot_cpus_begin();
601
Rusty Russelle0b582e2009-01-01 10:12:28 +1030602 for_each_cpu(cpu, frozen_cpus) {
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700603 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700604 error = _cpu_up(cpu, 1);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700605 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700606 if (!error) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700607 pr_info("CPU%d is up\n", cpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700608 continue;
609 }
Fabian Frederick84117da2014-06-04 16:11:17 -0700610 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700611 }
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700612
613 arch_enable_nonboot_cpus_end();
614
Rusty Russelle0b582e2009-01-01 10:12:28 +1030615 cpumask_clear(frozen_cpus);
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700616out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100617 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700618}
Rusty Russelle0b582e2009-01-01 10:12:28 +1030619
Fenghua Yud7268a32011-11-15 21:59:31 +0100620static int __init alloc_frozen_cpus(void)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030621{
622 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
623 return -ENOMEM;
624 return 0;
625}
626core_initcall(alloc_frozen_cpus);
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100627
628/*
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100629 * When callbacks for CPU hotplug notifications are being executed, we must
630 * ensure that the state of the system with respect to the tasks being frozen
631 * or not, as reported by the notification, remains unchanged *throughout the
632 * duration* of the execution of the callbacks.
633 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
634 *
635 * This synchronization is implemented by mutually excluding regular CPU
636 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
637 * Hibernate notifications.
638 */
639static int
640cpu_hotplug_pm_callback(struct notifier_block *nb,
641 unsigned long action, void *ptr)
642{
643 switch (action) {
644
645 case PM_SUSPEND_PREPARE:
646 case PM_HIBERNATION_PREPARE:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700647 cpu_hotplug_disable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100648 break;
649
650 case PM_POST_SUSPEND:
651 case PM_POST_HIBERNATION:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700652 cpu_hotplug_enable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100653 break;
654
655 default:
656 return NOTIFY_DONE;
657 }
658
659 return NOTIFY_OK;
660}
661
662
Fenghua Yud7268a32011-11-15 21:59:31 +0100663static int __init cpu_hotplug_pm_sync_init(void)
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100664{
Fenghua Yu6e32d472012-11-13 11:32:43 -0800665 /*
666 * cpu_hotplug_pm_callback has higher priority than x86
667 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
668 * to disable cpu hotplug to avoid cpu hotplug race.
669 */
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100670 pm_notifier(cpu_hotplug_pm_callback, 0);
671 return 0;
672}
673core_initcall(cpu_hotplug_pm_sync_init);
674
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700675#endif /* CONFIG_PM_SLEEP_SMP */
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700676
Manfred Spraule545a612008-09-07 16:57:22 +0200677/**
678 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
679 * @cpu: cpu that just started
680 *
681 * This function calls the cpu_chain notifiers with CPU_STARTING.
682 * It must be called by the arch code on the new cpu, before the new cpu
683 * enables interrupts and before the "boot" cpu returns from __cpu_up().
684 */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400685void notify_cpu_starting(unsigned int cpu)
Manfred Spraule545a612008-09-07 16:57:22 +0200686{
687 unsigned long val = CPU_STARTING;
688
689#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030690 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
Manfred Spraule545a612008-09-07 16:57:22 +0200691 val = CPU_STARTING_FROZEN;
692#endif /* CONFIG_PM_SLEEP_SMP */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700693 cpu_notify(val, (void *)(long)cpu);
Manfred Spraule545a612008-09-07 16:57:22 +0200694}
695
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700696#endif /* CONFIG_SMP */
Mike Travisb8d317d2008-07-24 18:21:29 -0700697
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700698/*
699 * cpu_bit_bitmap[] is a special, "compressed" data structure that
700 * represents all NR_CPUS bits binary values of 1<<nr.
701 *
Rusty Russelle0b582e2009-01-01 10:12:28 +1030702 * It is used by cpumask_of() to get a constant address to a CPU
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700703 * mask value that has a single bit set only.
704 */
Mike Travisb8d317d2008-07-24 18:21:29 -0700705
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700706/* cpu_bit_bitmap[0] is empty - so we can back into it */
Michael Rodriguez4d519852011-03-22 16:34:07 -0700707#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700708#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
709#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
710#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
Mike Travisb8d317d2008-07-24 18:21:29 -0700711
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700712const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
Mike Travisb8d317d2008-07-24 18:21:29 -0700713
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700714 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
715 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
716#if BITS_PER_LONG > 32
717 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
718 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
Mike Travisb8d317d2008-07-24 18:21:29 -0700719#endif
720};
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700721EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100722
723const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
724EXPORT_SYMBOL(cpu_all_bits);
Rusty Russellb3199c02008-12-30 09:05:14 +1030725
726#ifdef CONFIG_INIT_ALL_POSSIBLE
727static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
728 = CPU_BITS_ALL;
729#else
730static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
731#endif
732const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
733EXPORT_SYMBOL(cpu_possible_mask);
734
735static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
736const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
737EXPORT_SYMBOL(cpu_online_mask);
738
739static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
740const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
741EXPORT_SYMBOL(cpu_present_mask);
742
743static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
744const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
745EXPORT_SYMBOL(cpu_active_mask);
Rusty Russell3fa41522008-12-30 09:05:16 +1030746
747void set_cpu_possible(unsigned int cpu, bool possible)
748{
749 if (possible)
750 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
751 else
752 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
753}
754
755void set_cpu_present(unsigned int cpu, bool present)
756{
757 if (present)
758 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
759 else
760 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
761}
762
763void set_cpu_online(unsigned int cpu, bool online)
764{
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800765 if (online) {
Rusty Russell3fa41522008-12-30 09:05:16 +1030766 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800767 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
768 } else {
Rusty Russell3fa41522008-12-30 09:05:16 +1030769 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800770 }
Rusty Russell3fa41522008-12-30 09:05:16 +1030771}
772
773void set_cpu_active(unsigned int cpu, bool active)
774{
775 if (active)
776 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
777 else
778 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
779}
780
781void init_cpu_present(const struct cpumask *src)
782{
783 cpumask_copy(to_cpumask(cpu_present_bits), src);
784}
785
786void init_cpu_possible(const struct cpumask *src)
787{
788 cpumask_copy(to_cpumask(cpu_possible_bits), src);
789}
790
791void init_cpu_online(const struct cpumask *src)
792{
793 cpumask_copy(to_cpumask(cpu_online_bits), src);
794}