blob: 260eb7db281e2a67b92c49fe5c88257a99cb6bf2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
Anton Vorontsovcb792952012-05-31 16:26:22 -070013#include <linux/oom.h>
14#include <linux/rcupdate.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040015#include <linux/export.h>
Anton Vorontsove4cc2f82012-05-31 16:26:26 -070016#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kthread.h>
18#include <linux/stop_machine.h>
Ingo Molnar81615b62006-06-26 00:24:32 -070019#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/gfp.h>
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +010021#include <linux/suspend.h>
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053022#include <linux/lockdep.h>
Preeti U Murthy345527b2015-03-30 14:59:19 +053023#include <linux/tick.h>
Todd E Brandtbb3632c2014-06-06 05:40:17 -070024#include <trace/events/power.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Thomas Gleixner38498a62012-04-20 13:05:44 +000026#include "smpboot.h"
27
Rusty Russell98a79d62008-12-13 21:19:41 +103028#ifdef CONFIG_SMP
Rusty Russellb3199c02008-12-30 09:05:14 +103029/* Serializes the updates to cpu_online_mask, cpu_present_mask */
Linus Torvaldsaa953872006-07-23 12:12:16 -070030static DEFINE_MUTEX(cpu_add_remove_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070032/*
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053033 * The following two APIs (cpu_maps_update_begin/done) must be used when
34 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
35 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
36 * hotplug callback (un)registration performed using __register_cpu_notifier()
37 * or __unregister_cpu_notifier().
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070038 */
39void cpu_maps_update_begin(void)
40{
41 mutex_lock(&cpu_add_remove_lock);
42}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053043EXPORT_SYMBOL(cpu_notifier_register_begin);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070044
45void cpu_maps_update_done(void)
46{
47 mutex_unlock(&cpu_add_remove_lock);
48}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053049EXPORT_SYMBOL(cpu_notifier_register_done);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070050
Daniel J Blueman5c113fb2010-06-01 12:15:11 +010051static RAW_NOTIFIER_HEAD(cpu_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -070053/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
54 * Should always be manipulated under cpu_add_remove_lock
55 */
56static int cpu_hotplug_disabled;
57
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070058#ifdef CONFIG_HOTPLUG_CPU
59
Gautham R Shenoyd2219382008-01-25 21:08:01 +010060static struct {
61 struct task_struct *active_writer;
David Hildenbrand87af9e72014-12-12 10:11:44 +010062 /* wait queue to wake up the active_writer */
63 wait_queue_head_t wq;
64 /* verifies that no writer will get active while readers are active */
65 struct mutex lock;
Gautham R Shenoyd2219382008-01-25 21:08:01 +010066 /*
67 * Also blocks the new readers during
68 * an ongoing cpu hotplug operation.
69 */
David Hildenbrand87af9e72014-12-12 10:11:44 +010070 atomic_t refcount;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053071
72#ifdef CONFIG_DEBUG_LOCK_ALLOC
73 struct lockdep_map dep_map;
74#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -070075} cpu_hotplug = {
76 .active_writer = NULL,
David Hildenbrand87af9e72014-12-12 10:11:44 +010077 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
Linus Torvalds31950eb2009-06-22 21:18:12 -070078 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053079#ifdef CONFIG_DEBUG_LOCK_ALLOC
80 .dep_map = {.name = "cpu_hotplug.lock" },
81#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -070082};
Gautham R Shenoyd2219382008-01-25 21:08:01 +010083
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053084/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
85#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
Paul E. McKenneydd56af42014-08-25 20:25:06 -070086#define cpuhp_lock_acquire_tryread() \
87 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053088#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
89#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
90
Paul E. McKenney62db99f2014-10-22 14:51:49 -070091
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010092void get_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -080093{
Gautham R Shenoyd2219382008-01-25 21:08:01 +010094 might_sleep();
95 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -070096 return;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053097 cpuhp_lock_acquire_read();
Gautham R Shenoyd2219382008-01-25 21:08:01 +010098 mutex_lock(&cpu_hotplug.lock);
David Hildenbrand87af9e72014-12-12 10:11:44 +010099 atomic_inc(&cpu_hotplug.refcount);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100100 mutex_unlock(&cpu_hotplug.lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800101}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100102EXPORT_SYMBOL_GPL(get_online_cpus);
Ashok Raj90d45d12005-11-08 21:34:24 -0800103
Paul E. McKenneydd56af42014-08-25 20:25:06 -0700104bool try_get_online_cpus(void)
105{
106 if (cpu_hotplug.active_writer == current)
107 return true;
108 if (!mutex_trylock(&cpu_hotplug.lock))
109 return false;
110 cpuhp_lock_acquire_tryread();
David Hildenbrand87af9e72014-12-12 10:11:44 +0100111 atomic_inc(&cpu_hotplug.refcount);
Paul E. McKenneydd56af42014-08-25 20:25:06 -0700112 mutex_unlock(&cpu_hotplug.lock);
113 return true;
114}
115EXPORT_SYMBOL_GPL(try_get_online_cpus);
116
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100117void put_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800118{
David Hildenbrand87af9e72014-12-12 10:11:44 +0100119 int refcount;
120
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100121 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -0700122 return;
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700123
David Hildenbrand87af9e72014-12-12 10:11:44 +0100124 refcount = atomic_dec_return(&cpu_hotplug.refcount);
125 if (WARN_ON(refcount < 0)) /* try to fix things up */
126 atomic_inc(&cpu_hotplug.refcount);
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700127
David Hildenbrand87af9e72014-12-12 10:11:44 +0100128 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
129 wake_up(&cpu_hotplug.wq);
130
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530131 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100132
Ashok Raja9d9baa2005-11-28 13:43:46 -0800133}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100134EXPORT_SYMBOL_GPL(put_online_cpus);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800135
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100136/*
137 * This ensures that the hotplug operation can begin only when the
138 * refcount goes to zero.
139 *
140 * Note that during a cpu-hotplug operation, the new readers, if any,
141 * will be blocked by the cpu_hotplug.lock
142 *
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700143 * Since cpu_hotplug_begin() is always called after invoking
144 * cpu_maps_update_begin(), we can be sure that only one writer is active.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100145 *
146 * Note that theoretically, there is a possibility of a livelock:
147 * - Refcount goes to zero, last reader wakes up the sleeping
148 * writer.
149 * - Last reader unlocks the cpu_hotplug.lock.
150 * - A new reader arrives at this moment, bumps up the refcount.
151 * - The writer acquires the cpu_hotplug.lock finds the refcount
152 * non zero and goes to sleep again.
153 *
154 * However, this is very difficult to achieve in practice since
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100155 * get_online_cpus() not an api which is called all that often.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100156 *
157 */
Toshi Kanib9d10be2013-08-12 09:45:53 -0600158void cpu_hotplug_begin(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100159{
David Hildenbrand87af9e72014-12-12 10:11:44 +0100160 DEFINE_WAIT(wait);
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700161
David Hildenbrand87af9e72014-12-12 10:11:44 +0100162 cpu_hotplug.active_writer = current;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530163 cpuhp_lock_acquire();
David Hildenbrand87af9e72014-12-12 10:11:44 +0100164
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700165 for (;;) {
166 mutex_lock(&cpu_hotplug.lock);
David Hildenbrand87af9e72014-12-12 10:11:44 +0100167 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
168 if (likely(!atomic_read(&cpu_hotplug.refcount)))
169 break;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100170 mutex_unlock(&cpu_hotplug.lock);
171 schedule();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100172 }
David Hildenbrand87af9e72014-12-12 10:11:44 +0100173 finish_wait(&cpu_hotplug.wq, &wait);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100174}
175
Toshi Kanib9d10be2013-08-12 09:45:53 -0600176void cpu_hotplug_done(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100177{
178 cpu_hotplug.active_writer = NULL;
179 mutex_unlock(&cpu_hotplug.lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530180 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100181}
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700182
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700183/*
184 * Wait for currently running CPU hotplug operations to complete (if any) and
185 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
186 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
187 * hotplug path before performing hotplug operations. So acquiring that lock
188 * guarantees mutual exclusion from any currently running hotplug operations.
189 */
190void cpu_hotplug_disable(void)
191{
192 cpu_maps_update_begin();
193 cpu_hotplug_disabled = 1;
194 cpu_maps_update_done();
195}
196
197void cpu_hotplug_enable(void)
198{
199 cpu_maps_update_begin();
200 cpu_hotplug_disabled = 0;
201 cpu_maps_update_done();
202}
203
Toshi Kanib9d10be2013-08-12 09:45:53 -0600204#endif /* CONFIG_HOTPLUG_CPU */
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206/* Need to know about CPUs going up/down? */
Sam Ravnborgf7b16c12008-04-29 00:58:51 -0700207int __ref register_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208{
Neil Brownbd5349c2006-10-17 00:10:35 -0700209 int ret;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100210 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700211 ret = raw_notifier_chain_register(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100212 cpu_maps_update_done();
Neil Brownbd5349c2006-10-17 00:10:35 -0700213 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214}
Chandra Seetharaman65edc682006-06-27 02:54:08 -0700215
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530216int __ref __register_cpu_notifier(struct notifier_block *nb)
217{
218 return raw_notifier_chain_register(&cpu_chain, nb);
219}
220
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700221static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
222 int *nr_calls)
223{
Akinobu Mitae6bde732010-05-26 14:43:29 -0700224 int ret;
225
226 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700227 nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700228
229 return notifier_to_errno(ret);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700230}
231
232static int cpu_notify(unsigned long val, void *v)
233{
234 return __cpu_notify(val, v, -1, NULL);
235}
236
Linus Torvalds00b9b0a2010-05-27 10:32:08 -0700237#ifdef CONFIG_HOTPLUG_CPU
238
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700239static void cpu_notify_nofail(unsigned long val, void *v)
240{
Linus Torvalds00b9b0a2010-05-27 10:32:08 -0700241 BUG_ON(cpu_notify(val, v));
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700242}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243EXPORT_SYMBOL(register_cpu_notifier);
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530244EXPORT_SYMBOL(__register_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Sam Ravnborg96471552008-04-29 00:58:48 -0700246void __ref unregister_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100248 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700249 raw_notifier_chain_unregister(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100250 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251}
252EXPORT_SYMBOL(unregister_cpu_notifier);
253
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530254void __ref __unregister_cpu_notifier(struct notifier_block *nb)
255{
256 raw_notifier_chain_unregister(&cpu_chain, nb);
257}
258EXPORT_SYMBOL(__unregister_cpu_notifier);
259
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700260/**
261 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
262 * @cpu: a CPU id
263 *
264 * This function walks all processes, finds a valid mm struct for each one and
265 * then clears a corresponding bit in mm's cpumask. While this all sounds
266 * trivial, there are various non-obvious corner cases, which this function
267 * tries to solve in a safe manner.
268 *
269 * Also note that the function uses a somewhat relaxed locking scheme, so it may
270 * be called only for an already offlined CPU.
271 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700272void clear_tasks_mm_cpumask(int cpu)
273{
274 struct task_struct *p;
275
276 /*
277 * This function is called after the cpu is taken down and marked
278 * offline, so its not like new tasks will ever get this cpu set in
279 * their mm mask. -- Peter Zijlstra
280 * Thus, we may use rcu_read_lock() here, instead of grabbing
281 * full-fledged tasklist_lock.
282 */
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700283 WARN_ON(cpu_online(cpu));
Anton Vorontsovcb792952012-05-31 16:26:22 -0700284 rcu_read_lock();
285 for_each_process(p) {
286 struct task_struct *t;
287
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700288 /*
289 * Main thread might exit, but other threads may still have
290 * a valid mm. Find one.
291 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700292 t = find_lock_task_mm(p);
293 if (!t)
294 continue;
295 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
296 task_unlock(t);
297 }
298 rcu_read_unlock();
299}
300
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400301static inline void check_for_tasks(int dead_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302{
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400303 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400305 read_lock_irq(&tasklist_lock);
306 do_each_thread(g, p) {
307 if (!p->on_rq)
308 continue;
309 /*
310 * We do the check with unlocked task_rq(p)->lock.
311 * Order the reading to do not warn about a task,
312 * which was running on this cpu in the past, and
313 * it's just been woken on another cpu.
314 */
315 rmb();
316 if (task_cpu(p) != dead_cpu)
317 continue;
318
319 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
320 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
321 } while_each_thread(g, p);
322 read_unlock_irq(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Avi Kivitydb912f92007-05-24 12:23:10 +0300325struct take_cpu_down_param {
326 unsigned long mod;
327 void *hcpu;
328};
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330/* Take this CPU down. */
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700331static int __ref take_cpu_down(void *_param)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332{
Avi Kivitydb912f92007-05-24 12:23:10 +0300333 struct take_cpu_down_param *param = _param;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 int err;
335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 /* Ensure this CPU doesn't handle any more interrupts. */
337 err = __cpu_disable();
338 if (err < 0)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700339 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700341 cpu_notify(CPU_DYING | param->mod, param->hcpu);
Thomas Gleixner52c063d2015-04-03 02:37:24 +0200342 /* Give up timekeeping duties */
343 tick_handover_do_timer();
Thomas Gleixner14e568e2013-01-31 12:11:14 +0000344 /* Park the stopper thread */
345 kthread_park(current);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700346 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347}
348
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700349/* Requires cpu_add_remove_lock to be held */
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700350static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351{
Heiko Carstense7407dc2007-05-09 02:34:04 -0700352 int err, nr_calls = 0;
Heiko Carstense7407dc2007-05-09 02:34:04 -0700353 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700354 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Avi Kivitydb912f92007-05-24 12:23:10 +0300355 struct take_cpu_down_param tcd_param = {
356 .mod = mod,
357 .hcpu = hcpu,
358 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700360 if (num_online_cpus() == 1)
361 return -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700363 if (!cpu_online(cpu))
364 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100366 cpu_hotplug_begin();
Michael Rodriguez4d519852011-03-22 16:34:07 -0700367
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700368 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700369 if (err) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700370 nr_calls--;
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700371 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
Fabian Frederick84117da2014-06-04 16:11:17 -0700372 pr_warn("%s: attempt to take down CPU %u failed\n",
373 __func__, cpu);
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700374 goto out_release;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 }
376
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200377 /*
378 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
379 * and RCU users of this state to go away such that all new such users
380 * will observe it.
381 *
382 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
383 * not imply sync_sched(), so explicitly call both.
Michael wang106dd5a2013-11-13 11:10:56 +0800384 *
385 * Do sync before park smpboot threads to take care the rcu boost case.
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200386 */
387#ifdef CONFIG_PREEMPT
388 synchronize_sched();
389#endif
390 synchronize_rcu();
391
Michael wang106dd5a2013-11-13 11:10:56 +0800392 smpboot_park_threads(cpu);
393
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200394 /*
395 * So now all preempt/rcu users must observe !cpu_active().
396 */
397
Rusty Russelle0b582e2009-01-01 10:12:28 +1030398 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
Rusty Russell04321582008-07-28 12:16:29 -0500399 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 /* CPU didn't die: tell everyone. Can't complain. */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700401 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
Oleg Nesterov6a1bdc12010-03-15 10:10:23 +0100402 goto out_release;
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700403 }
Rusty Russell04321582008-07-28 12:16:29 -0500404 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100406 /*
407 * The migration_call() CPU_DYING callback will have removed all
408 * runnable tasks from the cpu, there's only the idle task left now
409 * that the migration thread is done doing the stop_machine thing.
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100410 *
411 * Wait for the stop thread to go away.
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100412 */
Paul E. McKenney528a25b2015-01-28 14:09:43 -0800413 while (!per_cpu(cpu_dead_idle, cpu))
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100414 cpu_relax();
Paul E. McKenney528a25b2015-01-28 14:09:43 -0800415 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
416 per_cpu(cpu_dead_idle, cpu) = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
Preeti U Murthy345527b2015-03-30 14:59:19 +0530418 hotplug_cpu__broadcast_tick_pull(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 /* This actually kills the CPU. */
420 __cpu_die(cpu);
421
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 /* CPU is completely dead: tell everyone. Too late to complain. */
Thomas Gleixnera49b1162015-04-03 02:38:05 +0200423 tick_cleanup_dead_cpu(cpu);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700424 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
426 check_for_tasks(cpu);
427
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700428out_release:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100429 cpu_hotplug_done();
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700430 if (!err)
431 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700432 return err;
433}
434
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700435int __ref cpu_down(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700436{
Heiko Carstens9ea09af2008-12-22 12:36:30 +0100437 int err;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700438
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100439 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700440
Max Krasnyanskye761b772008-07-15 04:43:49 -0700441 if (cpu_hotplug_disabled) {
442 err = -EBUSY;
443 goto out;
444 }
445
Max Krasnyanskye761b772008-07-15 04:43:49 -0700446 err = _cpu_down(cpu, 0);
447
Max Krasnyanskye761b772008-07-15 04:43:49 -0700448out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100449 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 return err;
451}
Zhang Ruib62b8ef2008-04-29 02:35:56 -0400452EXPORT_SYMBOL(cpu_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453#endif /*CONFIG_HOTPLUG_CPU*/
454
Paul E. McKenney00df35f2015-04-12 08:06:55 -0700455/*
456 * Unpark per-CPU smpboot kthreads at CPU-online time.
457 */
458static int smpboot_thread_call(struct notifier_block *nfb,
459 unsigned long action, void *hcpu)
460{
461 int cpu = (long)hcpu;
462
463 switch (action & ~CPU_TASKS_FROZEN) {
464
Paul E. McKenney64eaf9742015-04-15 12:45:41 -0700465 case CPU_DOWN_FAILED:
Paul E. McKenney00df35f2015-04-12 08:06:55 -0700466 case CPU_ONLINE:
467 smpboot_unpark_threads(cpu);
468 break;
469
470 default:
471 break;
472 }
473
474 return NOTIFY_OK;
475}
476
477static struct notifier_block smpboot_thread_notifier = {
478 .notifier_call = smpboot_thread_call,
479 .priority = CPU_PRI_SMPBOOT,
480};
481
482void __cpuinit smpboot_thread_init(void)
483{
484 register_cpu_notifier(&smpboot_thread_notifier);
485}
486
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700487/* Requires cpu_add_remove_lock to be held */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400488static int _cpu_up(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700490 int ret, nr_calls = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700492 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700493 struct task_struct *idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100495 cpu_hotplug_begin();
Thomas Gleixner38498a62012-04-20 13:05:44 +0000496
Yasuaki Ishimatsu5e5041f2012-10-23 01:30:54 +0200497 if (cpu_online(cpu) || !cpu_present(cpu)) {
498 ret = -EINVAL;
499 goto out;
500 }
501
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700502 idle = idle_thread_get(cpu);
503 if (IS_ERR(idle)) {
504 ret = PTR_ERR(idle);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000505 goto out;
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700506 }
Thomas Gleixner38498a62012-04-20 13:05:44 +0000507
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000508 ret = smpboot_create_threads(cpu);
509 if (ret)
510 goto out;
511
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700512 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700513 if (ret) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700514 nr_calls--;
Fabian Frederick84117da2014-06-04 16:11:17 -0700515 pr_warn("%s: attempt to bring up CPU %u failed\n",
516 __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 goto out_notify;
518 }
519
520 /* Arch-specific enabling code. */
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700521 ret = __cpu_up(cpu, idle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 if (ret != 0)
523 goto out_notify;
Eric Sesterhenn6978c702006-03-24 18:45:21 +0100524 BUG_ON(!cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
526 /* Now call notifier in preparation. */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700527 cpu_notify(CPU_ONLINE | mod, hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
529out_notify:
530 if (ret != 0)
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700531 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000532out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100533 cpu_hotplug_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700534
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 return ret;
536}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700537
Paul Gortmaker0db06282013-06-19 14:53:51 -0400538int cpu_up(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700539{
540 int err = 0;
minskey guocf234222010-05-24 14:32:41 -0700541
Rusty Russelle0b582e2009-01-01 10:12:28 +1030542 if (!cpu_possible(cpu)) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700543 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
544 cpu);
Chen Gong87d5e022010-03-05 13:42:38 -0800545#if defined(CONFIG_IA64)
Fabian Frederick84117da2014-06-04 16:11:17 -0700546 pr_err("please check additional_cpus= boot parameter\n");
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -0700547#endif
548 return -EINVAL;
549 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700550
Toshi Kani01b0f192013-11-12 15:07:25 -0800551 err = try_online_node(cpu_to_node(cpu));
552 if (err)
553 return err;
minskey guocf234222010-05-24 14:32:41 -0700554
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100555 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700556
Max Krasnyanskye761b772008-07-15 04:43:49 -0700557 if (cpu_hotplug_disabled) {
558 err = -EBUSY;
559 goto out;
560 }
561
562 err = _cpu_up(cpu, 0);
563
Max Krasnyanskye761b772008-07-15 04:43:49 -0700564out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100565 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700566 return err;
567}
Paul E. McKenneya513f6b2011-12-11 21:54:45 -0800568EXPORT_SYMBOL_GPL(cpu_up);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700569
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700570#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030571static cpumask_var_t frozen_cpus;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700572
573int disable_nonboot_cpus(void)
574{
Rafael J. Wysockie9a5f422010-05-27 22:16:22 +0200575 int cpu, first_cpu, error = 0;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700576
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100577 cpu_maps_update_begin();
Rusty Russelle0b582e2009-01-01 10:12:28 +1030578 first_cpu = cpumask_first(cpu_online_mask);
Xiaotian Feng9ee349a2009-12-16 18:04:32 +0100579 /*
580 * We take down all of the non-boot CPUs in one shot to avoid races
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700581 * with the userspace trying to use the CPU hotplug at the same time
582 */
Rusty Russelle0b582e2009-01-01 10:12:28 +1030583 cpumask_clear(frozen_cpus);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +0100584
Fabian Frederick84117da2014-06-04 16:11:17 -0700585 pr_info("Disabling non-boot CPUs ...\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700586 for_each_online_cpu(cpu) {
587 if (cpu == first_cpu)
588 continue;
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700589 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700590 error = _cpu_down(cpu, 1);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700591 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
Mike Travisfeae3202009-11-17 18:22:13 -0600592 if (!error)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030593 cpumask_set_cpu(cpu, frozen_cpus);
Mike Travisfeae3202009-11-17 18:22:13 -0600594 else {
Fabian Frederick84117da2014-06-04 16:11:17 -0700595 pr_err("Error taking CPU%d down: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700596 break;
597 }
598 }
Joseph Cihula86886e52009-06-30 19:31:07 -0700599
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700600 if (!error) {
601 BUG_ON(num_online_cpus() > 1);
602 /* Make sure the CPUs won't be enabled by someone else */
603 cpu_hotplug_disabled = 1;
604 } else {
Fabian Frederick84117da2014-06-04 16:11:17 -0700605 pr_err("Non-boot CPUs are not disabled\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700606 }
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100607 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700608 return error;
609}
610
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700611void __weak arch_enable_nonboot_cpus_begin(void)
612{
613}
614
615void __weak arch_enable_nonboot_cpus_end(void)
616{
617}
618
Sam Ravnborgfa7303e2008-02-08 04:21:55 -0800619void __ref enable_nonboot_cpus(void)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700620{
621 int cpu, error;
622
623 /* Allow everyone to use the CPU hotplug again */
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100624 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700625 cpu_hotplug_disabled = 0;
Rusty Russelle0b582e2009-01-01 10:12:28 +1030626 if (cpumask_empty(frozen_cpus))
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700627 goto out;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700628
Fabian Frederick84117da2014-06-04 16:11:17 -0700629 pr_info("Enabling non-boot CPUs ...\n");
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700630
631 arch_enable_nonboot_cpus_begin();
632
Rusty Russelle0b582e2009-01-01 10:12:28 +1030633 for_each_cpu(cpu, frozen_cpus) {
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700634 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700635 error = _cpu_up(cpu, 1);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700636 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700637 if (!error) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700638 pr_info("CPU%d is up\n", cpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700639 continue;
640 }
Fabian Frederick84117da2014-06-04 16:11:17 -0700641 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700642 }
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700643
644 arch_enable_nonboot_cpus_end();
645
Rusty Russelle0b582e2009-01-01 10:12:28 +1030646 cpumask_clear(frozen_cpus);
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700647out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100648 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700649}
Rusty Russelle0b582e2009-01-01 10:12:28 +1030650
Fenghua Yud7268a32011-11-15 21:59:31 +0100651static int __init alloc_frozen_cpus(void)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030652{
653 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
654 return -ENOMEM;
655 return 0;
656}
657core_initcall(alloc_frozen_cpus);
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100658
659/*
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100660 * When callbacks for CPU hotplug notifications are being executed, we must
661 * ensure that the state of the system with respect to the tasks being frozen
662 * or not, as reported by the notification, remains unchanged *throughout the
663 * duration* of the execution of the callbacks.
664 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
665 *
666 * This synchronization is implemented by mutually excluding regular CPU
667 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
668 * Hibernate notifications.
669 */
670static int
671cpu_hotplug_pm_callback(struct notifier_block *nb,
672 unsigned long action, void *ptr)
673{
674 switch (action) {
675
676 case PM_SUSPEND_PREPARE:
677 case PM_HIBERNATION_PREPARE:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700678 cpu_hotplug_disable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100679 break;
680
681 case PM_POST_SUSPEND:
682 case PM_POST_HIBERNATION:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700683 cpu_hotplug_enable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100684 break;
685
686 default:
687 return NOTIFY_DONE;
688 }
689
690 return NOTIFY_OK;
691}
692
693
Fenghua Yud7268a32011-11-15 21:59:31 +0100694static int __init cpu_hotplug_pm_sync_init(void)
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100695{
Fenghua Yu6e32d472012-11-13 11:32:43 -0800696 /*
697 * cpu_hotplug_pm_callback has higher priority than x86
698 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
699 * to disable cpu hotplug to avoid cpu hotplug race.
700 */
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100701 pm_notifier(cpu_hotplug_pm_callback, 0);
702 return 0;
703}
704core_initcall(cpu_hotplug_pm_sync_init);
705
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700706#endif /* CONFIG_PM_SLEEP_SMP */
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700707
Manfred Spraule545a612008-09-07 16:57:22 +0200708/**
709 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
710 * @cpu: cpu that just started
711 *
712 * This function calls the cpu_chain notifiers with CPU_STARTING.
713 * It must be called by the arch code on the new cpu, before the new cpu
714 * enables interrupts and before the "boot" cpu returns from __cpu_up().
715 */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400716void notify_cpu_starting(unsigned int cpu)
Manfred Spraule545a612008-09-07 16:57:22 +0200717{
718 unsigned long val = CPU_STARTING;
719
720#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030721 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
Manfred Spraule545a612008-09-07 16:57:22 +0200722 val = CPU_STARTING_FROZEN;
723#endif /* CONFIG_PM_SLEEP_SMP */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700724 cpu_notify(val, (void *)(long)cpu);
Manfred Spraule545a612008-09-07 16:57:22 +0200725}
726
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700727#endif /* CONFIG_SMP */
Mike Travisb8d317d2008-07-24 18:21:29 -0700728
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700729/*
730 * cpu_bit_bitmap[] is a special, "compressed" data structure that
731 * represents all NR_CPUS bits binary values of 1<<nr.
732 *
Rusty Russelle0b582e2009-01-01 10:12:28 +1030733 * It is used by cpumask_of() to get a constant address to a CPU
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700734 * mask value that has a single bit set only.
735 */
Mike Travisb8d317d2008-07-24 18:21:29 -0700736
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700737/* cpu_bit_bitmap[0] is empty - so we can back into it */
Michael Rodriguez4d519852011-03-22 16:34:07 -0700738#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700739#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
740#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
741#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
Mike Travisb8d317d2008-07-24 18:21:29 -0700742
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700743const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
Mike Travisb8d317d2008-07-24 18:21:29 -0700744
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700745 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
746 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
747#if BITS_PER_LONG > 32
748 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
749 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
Mike Travisb8d317d2008-07-24 18:21:29 -0700750#endif
751};
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700752EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100753
754const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
755EXPORT_SYMBOL(cpu_all_bits);
Rusty Russellb3199c02008-12-30 09:05:14 +1030756
757#ifdef CONFIG_INIT_ALL_POSSIBLE
758static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
759 = CPU_BITS_ALL;
760#else
761static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
762#endif
763const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
764EXPORT_SYMBOL(cpu_possible_mask);
765
766static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
767const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
768EXPORT_SYMBOL(cpu_online_mask);
769
770static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
771const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
772EXPORT_SYMBOL(cpu_present_mask);
773
774static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
775const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
776EXPORT_SYMBOL(cpu_active_mask);
Rusty Russell3fa41522008-12-30 09:05:16 +1030777
778void set_cpu_possible(unsigned int cpu, bool possible)
779{
780 if (possible)
781 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
782 else
783 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
784}
785
786void set_cpu_present(unsigned int cpu, bool present)
787{
788 if (present)
789 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
790 else
791 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
792}
793
794void set_cpu_online(unsigned int cpu, bool online)
795{
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800796 if (online) {
Rusty Russell3fa41522008-12-30 09:05:16 +1030797 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800798 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
799 } else {
Rusty Russell3fa41522008-12-30 09:05:16 +1030800 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800801 }
Rusty Russell3fa41522008-12-30 09:05:16 +1030802}
803
804void set_cpu_active(unsigned int cpu, bool active)
805{
806 if (active)
807 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
808 else
809 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
810}
811
812void init_cpu_present(const struct cpumask *src)
813{
814 cpumask_copy(to_cpumask(cpu_present_bits), src);
815}
816
817void init_cpu_possible(const struct cpumask *src)
818{
819 cpumask_copy(to_cpumask(cpu_possible_bits), src);
820}
821
822void init_cpu_online(const struct cpumask *src)
823{
824 cpumask_copy(to_cpumask(cpu_online_bits), src);
825}