blob: acf791c55b71591a684d09fc6de6850e2c19b1f8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
Anton Vorontsovcb792952012-05-31 16:26:22 -070013#include <linux/oom.h>
14#include <linux/rcupdate.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040015#include <linux/export.h>
Anton Vorontsove4cc2f82012-05-31 16:26:26 -070016#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kthread.h>
18#include <linux/stop_machine.h>
Ingo Molnar81615b62006-06-26 00:24:32 -070019#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/gfp.h>
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +010021#include <linux/suspend.h>
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053022#include <linux/lockdep.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Thomas Gleixner38498a62012-04-20 13:05:44 +000024#include "smpboot.h"
25
Rusty Russell98a79d62008-12-13 21:19:41 +103026#ifdef CONFIG_SMP
Rusty Russellb3199c02008-12-30 09:05:14 +103027/* Serializes the updates to cpu_online_mask, cpu_present_mask */
Linus Torvaldsaa953872006-07-23 12:12:16 -070028static DEFINE_MUTEX(cpu_add_remove_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070030/*
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053031 * The following two APIs (cpu_maps_update_begin/done) must be used when
32 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
33 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
34 * hotplug callback (un)registration performed using __register_cpu_notifier()
35 * or __unregister_cpu_notifier().
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070036 */
37void cpu_maps_update_begin(void)
38{
39 mutex_lock(&cpu_add_remove_lock);
40}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053041EXPORT_SYMBOL(cpu_notifier_register_begin);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070042
43void cpu_maps_update_done(void)
44{
45 mutex_unlock(&cpu_add_remove_lock);
46}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053047EXPORT_SYMBOL(cpu_notifier_register_done);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070048
Daniel J Blueman5c113fb2010-06-01 12:15:11 +010049static RAW_NOTIFIER_HEAD(cpu_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -070051/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
52 * Should always be manipulated under cpu_add_remove_lock
53 */
54static int cpu_hotplug_disabled;
55
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070056#ifdef CONFIG_HOTPLUG_CPU
57
Gautham R Shenoyd2219382008-01-25 21:08:01 +010058static struct {
59 struct task_struct *active_writer;
60 struct mutex lock; /* Synchronizes accesses to refcount, */
61 /*
62 * Also blocks the new readers during
63 * an ongoing cpu hotplug operation.
64 */
65 int refcount;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053066
67#ifdef CONFIG_DEBUG_LOCK_ALLOC
68 struct lockdep_map dep_map;
69#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -070070} cpu_hotplug = {
71 .active_writer = NULL,
72 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
73 .refcount = 0,
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053074#ifdef CONFIG_DEBUG_LOCK_ALLOC
75 .dep_map = {.name = "cpu_hotplug.lock" },
76#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -070077};
Gautham R Shenoyd2219382008-01-25 21:08:01 +010078
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053079/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
80#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
81#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
82#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
83
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010084void get_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -080085{
Gautham R Shenoyd2219382008-01-25 21:08:01 +010086 might_sleep();
87 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -070088 return;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053089 cpuhp_lock_acquire_read();
Gautham R Shenoyd2219382008-01-25 21:08:01 +010090 mutex_lock(&cpu_hotplug.lock);
91 cpu_hotplug.refcount++;
92 mutex_unlock(&cpu_hotplug.lock);
93
Ashok Raja9d9baa2005-11-28 13:43:46 -080094}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010095EXPORT_SYMBOL_GPL(get_online_cpus);
Ashok Raj90d45d12005-11-08 21:34:24 -080096
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010097void put_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -080098{
Gautham R Shenoyd2219382008-01-25 21:08:01 +010099 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -0700100 return;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100101 mutex_lock(&cpu_hotplug.lock);
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700102
103 if (WARN_ON(!cpu_hotplug.refcount))
104 cpu_hotplug.refcount++; /* try to fix things up */
105
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700106 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
107 wake_up_process(cpu_hotplug.active_writer);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100108 mutex_unlock(&cpu_hotplug.lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530109 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100110
Ashok Raja9d9baa2005-11-28 13:43:46 -0800111}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100112EXPORT_SYMBOL_GPL(put_online_cpus);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800113
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100114/*
115 * This ensures that the hotplug operation can begin only when the
116 * refcount goes to zero.
117 *
118 * Note that during a cpu-hotplug operation, the new readers, if any,
119 * will be blocked by the cpu_hotplug.lock
120 *
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700121 * Since cpu_hotplug_begin() is always called after invoking
122 * cpu_maps_update_begin(), we can be sure that only one writer is active.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100123 *
124 * Note that theoretically, there is a possibility of a livelock:
125 * - Refcount goes to zero, last reader wakes up the sleeping
126 * writer.
127 * - Last reader unlocks the cpu_hotplug.lock.
128 * - A new reader arrives at this moment, bumps up the refcount.
129 * - The writer acquires the cpu_hotplug.lock finds the refcount
130 * non zero and goes to sleep again.
131 *
132 * However, this is very difficult to achieve in practice since
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100133 * get_online_cpus() not an api which is called all that often.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100134 *
135 */
Toshi Kanib9d10be2013-08-12 09:45:53 -0600136void cpu_hotplug_begin(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100137{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100138 cpu_hotplug.active_writer = current;
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700139
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530140 cpuhp_lock_acquire();
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700141 for (;;) {
142 mutex_lock(&cpu_hotplug.lock);
143 if (likely(!cpu_hotplug.refcount))
144 break;
145 __set_current_state(TASK_UNINTERRUPTIBLE);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100146 mutex_unlock(&cpu_hotplug.lock);
147 schedule();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100148 }
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100149}
150
Toshi Kanib9d10be2013-08-12 09:45:53 -0600151void cpu_hotplug_done(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100152{
153 cpu_hotplug.active_writer = NULL;
154 mutex_unlock(&cpu_hotplug.lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530155 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100156}
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700157
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700158/*
159 * Wait for currently running CPU hotplug operations to complete (if any) and
160 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
161 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
162 * hotplug path before performing hotplug operations. So acquiring that lock
163 * guarantees mutual exclusion from any currently running hotplug operations.
164 */
165void cpu_hotplug_disable(void)
166{
167 cpu_maps_update_begin();
168 cpu_hotplug_disabled = 1;
169 cpu_maps_update_done();
170}
171
172void cpu_hotplug_enable(void)
173{
174 cpu_maps_update_begin();
175 cpu_hotplug_disabled = 0;
176 cpu_maps_update_done();
177}
178
Toshi Kanib9d10be2013-08-12 09:45:53 -0600179#endif /* CONFIG_HOTPLUG_CPU */
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181/* Need to know about CPUs going up/down? */
Sam Ravnborgf7b16c12008-04-29 00:58:51 -0700182int __ref register_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183{
Neil Brownbd5349c2006-10-17 00:10:35 -0700184 int ret;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100185 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700186 ret = raw_notifier_chain_register(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100187 cpu_maps_update_done();
Neil Brownbd5349c2006-10-17 00:10:35 -0700188 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
Chandra Seetharaman65edc682006-06-27 02:54:08 -0700190
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530191int __ref __register_cpu_notifier(struct notifier_block *nb)
192{
193 return raw_notifier_chain_register(&cpu_chain, nb);
194}
195
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700196static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
197 int *nr_calls)
198{
Akinobu Mitae6bde732010-05-26 14:43:29 -0700199 int ret;
200
201 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700202 nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700203
204 return notifier_to_errno(ret);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700205}
206
207static int cpu_notify(unsigned long val, void *v)
208{
209 return __cpu_notify(val, v, -1, NULL);
210}
211
Linus Torvalds00b9b0a2010-05-27 10:32:08 -0700212#ifdef CONFIG_HOTPLUG_CPU
213
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700214static void cpu_notify_nofail(unsigned long val, void *v)
215{
Linus Torvalds00b9b0a2010-05-27 10:32:08 -0700216 BUG_ON(cpu_notify(val, v));
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700217}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218EXPORT_SYMBOL(register_cpu_notifier);
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530219EXPORT_SYMBOL(__register_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Sam Ravnborg96471552008-04-29 00:58:48 -0700221void __ref unregister_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100223 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700224 raw_notifier_chain_unregister(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100225 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226}
227EXPORT_SYMBOL(unregister_cpu_notifier);
228
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530229void __ref __unregister_cpu_notifier(struct notifier_block *nb)
230{
231 raw_notifier_chain_unregister(&cpu_chain, nb);
232}
233EXPORT_SYMBOL(__unregister_cpu_notifier);
234
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700235/**
236 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
237 * @cpu: a CPU id
238 *
239 * This function walks all processes, finds a valid mm struct for each one and
240 * then clears a corresponding bit in mm's cpumask. While this all sounds
241 * trivial, there are various non-obvious corner cases, which this function
242 * tries to solve in a safe manner.
243 *
244 * Also note that the function uses a somewhat relaxed locking scheme, so it may
245 * be called only for an already offlined CPU.
246 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700247void clear_tasks_mm_cpumask(int cpu)
248{
249 struct task_struct *p;
250
251 /*
252 * This function is called after the cpu is taken down and marked
253 * offline, so its not like new tasks will ever get this cpu set in
254 * their mm mask. -- Peter Zijlstra
255 * Thus, we may use rcu_read_lock() here, instead of grabbing
256 * full-fledged tasklist_lock.
257 */
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700258 WARN_ON(cpu_online(cpu));
Anton Vorontsovcb792952012-05-31 16:26:22 -0700259 rcu_read_lock();
260 for_each_process(p) {
261 struct task_struct *t;
262
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700263 /*
264 * Main thread might exit, but other threads may still have
265 * a valid mm. Find one.
266 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700267 t = find_lock_task_mm(p);
268 if (!t)
269 continue;
270 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
271 task_unlock(t);
272 }
273 rcu_read_unlock();
274}
275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276static inline void check_for_tasks(int cpu)
277{
278 struct task_struct *p;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +0100279 cputime_t utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
281 write_lock_irq(&tasklist_lock);
282 for_each_process(p) {
Frederic Weisbecker6fac4822012-11-13 14:20:55 +0100283 task_cputime(p, &utime, &stime);
Peter Zijlstra11854242010-01-21 16:34:27 +0100284 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
Frederic Weisbecker6fac4822012-11-13 14:20:55 +0100285 (utime || stime))
Fabian Frederick84117da2014-06-04 16:11:17 -0700286 pr_warn("Task %s (pid = %d) is on cpu %d (state = %ld, flags = %x)\n",
Frans Pop9d3cfc42010-01-25 14:56:34 +0100287 p->comm, task_pid_nr(p), cpu,
288 p->state, p->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 }
290 write_unlock_irq(&tasklist_lock);
291}
292
Avi Kivitydb912f92007-05-24 12:23:10 +0300293struct take_cpu_down_param {
294 unsigned long mod;
295 void *hcpu;
296};
297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298/* Take this CPU down. */
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700299static int __ref take_cpu_down(void *_param)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300{
Avi Kivitydb912f92007-05-24 12:23:10 +0300301 struct take_cpu_down_param *param = _param;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 int err;
303
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 /* Ensure this CPU doesn't handle any more interrupts. */
305 err = __cpu_disable();
306 if (err < 0)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700307 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700309 cpu_notify(CPU_DYING | param->mod, param->hcpu);
Thomas Gleixner14e568e2013-01-31 12:11:14 +0000310 /* Park the stopper thread */
311 kthread_park(current);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700312 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700315/* Requires cpu_add_remove_lock to be held */
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700316static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317{
Heiko Carstense7407dc2007-05-09 02:34:04 -0700318 int err, nr_calls = 0;
Heiko Carstense7407dc2007-05-09 02:34:04 -0700319 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700320 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Avi Kivitydb912f92007-05-24 12:23:10 +0300321 struct take_cpu_down_param tcd_param = {
322 .mod = mod,
323 .hcpu = hcpu,
324 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700326 if (num_online_cpus() == 1)
327 return -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700329 if (!cpu_online(cpu))
330 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100332 cpu_hotplug_begin();
Michael Rodriguez4d519852011-03-22 16:34:07 -0700333
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700334 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700335 if (err) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700336 nr_calls--;
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700337 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
Fabian Frederick84117da2014-06-04 16:11:17 -0700338 pr_warn("%s: attempt to take down CPU %u failed\n",
339 __func__, cpu);
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700340 goto out_release;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 }
342
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200343 /*
344 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
345 * and RCU users of this state to go away such that all new such users
346 * will observe it.
347 *
348 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
349 * not imply sync_sched(), so explicitly call both.
Michael wang106dd5a2013-11-13 11:10:56 +0800350 *
351 * Do sync before park smpboot threads to take care the rcu boost case.
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200352 */
353#ifdef CONFIG_PREEMPT
354 synchronize_sched();
355#endif
356 synchronize_rcu();
357
Michael wang106dd5a2013-11-13 11:10:56 +0800358 smpboot_park_threads(cpu);
359
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200360 /*
361 * So now all preempt/rcu users must observe !cpu_active().
362 */
363
Rusty Russelle0b582e2009-01-01 10:12:28 +1030364 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
Rusty Russell04321582008-07-28 12:16:29 -0500365 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 /* CPU didn't die: tell everyone. Can't complain. */
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000367 smpboot_unpark_threads(cpu);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700368 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
Oleg Nesterov6a1bdc12010-03-15 10:10:23 +0100369 goto out_release;
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700370 }
Rusty Russell04321582008-07-28 12:16:29 -0500371 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100373 /*
374 * The migration_call() CPU_DYING callback will have removed all
375 * runnable tasks from the cpu, there's only the idle task left now
376 * that the migration thread is done doing the stop_machine thing.
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100377 *
378 * Wait for the stop thread to go away.
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100379 */
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100380 while (!idle_cpu(cpu))
381 cpu_relax();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382
383 /* This actually kills the CPU. */
384 __cpu_die(cpu);
385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 /* CPU is completely dead: tell everyone. Too late to complain. */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700387 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
389 check_for_tasks(cpu);
390
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700391out_release:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100392 cpu_hotplug_done();
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700393 if (!err)
394 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700395 return err;
396}
397
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700398int __ref cpu_down(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700399{
Heiko Carstens9ea09af2008-12-22 12:36:30 +0100400 int err;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700401
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100402 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700403
Max Krasnyanskye761b772008-07-15 04:43:49 -0700404 if (cpu_hotplug_disabled) {
405 err = -EBUSY;
406 goto out;
407 }
408
Max Krasnyanskye761b772008-07-15 04:43:49 -0700409 err = _cpu_down(cpu, 0);
410
Max Krasnyanskye761b772008-07-15 04:43:49 -0700411out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100412 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 return err;
414}
Zhang Ruib62b8ef2008-04-29 02:35:56 -0400415EXPORT_SYMBOL(cpu_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416#endif /*CONFIG_HOTPLUG_CPU*/
417
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700418/* Requires cpu_add_remove_lock to be held */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400419static int _cpu_up(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420{
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700421 int ret, nr_calls = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700423 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700424 struct task_struct *idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100426 cpu_hotplug_begin();
Thomas Gleixner38498a62012-04-20 13:05:44 +0000427
Yasuaki Ishimatsu5e5041f2012-10-23 01:30:54 +0200428 if (cpu_online(cpu) || !cpu_present(cpu)) {
429 ret = -EINVAL;
430 goto out;
431 }
432
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700433 idle = idle_thread_get(cpu);
434 if (IS_ERR(idle)) {
435 ret = PTR_ERR(idle);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000436 goto out;
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700437 }
Thomas Gleixner38498a62012-04-20 13:05:44 +0000438
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000439 ret = smpboot_create_threads(cpu);
440 if (ret)
441 goto out;
442
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700443 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700444 if (ret) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700445 nr_calls--;
Fabian Frederick84117da2014-06-04 16:11:17 -0700446 pr_warn("%s: attempt to bring up CPU %u failed\n",
447 __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 goto out_notify;
449 }
450
451 /* Arch-specific enabling code. */
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700452 ret = __cpu_up(cpu, idle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 if (ret != 0)
454 goto out_notify;
Eric Sesterhenn6978c702006-03-24 18:45:21 +0100455 BUG_ON(!cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000457 /* Wake the per cpu threads */
458 smpboot_unpark_threads(cpu);
459
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 /* Now call notifier in preparation. */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700461 cpu_notify(CPU_ONLINE | mod, hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
463out_notify:
464 if (ret != 0)
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700465 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000466out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100467 cpu_hotplug_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700468
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 return ret;
470}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700471
Paul Gortmaker0db06282013-06-19 14:53:51 -0400472int cpu_up(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700473{
474 int err = 0;
minskey guocf234222010-05-24 14:32:41 -0700475
Rusty Russelle0b582e2009-01-01 10:12:28 +1030476 if (!cpu_possible(cpu)) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700477 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
478 cpu);
Chen Gong87d5e022010-03-05 13:42:38 -0800479#if defined(CONFIG_IA64)
Fabian Frederick84117da2014-06-04 16:11:17 -0700480 pr_err("please check additional_cpus= boot parameter\n");
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -0700481#endif
482 return -EINVAL;
483 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700484
Toshi Kani01b0f192013-11-12 15:07:25 -0800485 err = try_online_node(cpu_to_node(cpu));
486 if (err)
487 return err;
minskey guocf234222010-05-24 14:32:41 -0700488
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100489 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700490
Max Krasnyanskye761b772008-07-15 04:43:49 -0700491 if (cpu_hotplug_disabled) {
492 err = -EBUSY;
493 goto out;
494 }
495
496 err = _cpu_up(cpu, 0);
497
Max Krasnyanskye761b772008-07-15 04:43:49 -0700498out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100499 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700500 return err;
501}
Paul E. McKenneya513f6b2011-12-11 21:54:45 -0800502EXPORT_SYMBOL_GPL(cpu_up);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700503
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700504#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030505static cpumask_var_t frozen_cpus;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700506
507int disable_nonboot_cpus(void)
508{
Rafael J. Wysockie9a5f422010-05-27 22:16:22 +0200509 int cpu, first_cpu, error = 0;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700510
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100511 cpu_maps_update_begin();
Rusty Russelle0b582e2009-01-01 10:12:28 +1030512 first_cpu = cpumask_first(cpu_online_mask);
Xiaotian Feng9ee349a2009-12-16 18:04:32 +0100513 /*
514 * We take down all of the non-boot CPUs in one shot to avoid races
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700515 * with the userspace trying to use the CPU hotplug at the same time
516 */
Rusty Russelle0b582e2009-01-01 10:12:28 +1030517 cpumask_clear(frozen_cpus);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +0100518
Fabian Frederick84117da2014-06-04 16:11:17 -0700519 pr_info("Disabling non-boot CPUs ...\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700520 for_each_online_cpu(cpu) {
521 if (cpu == first_cpu)
522 continue;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700523 error = _cpu_down(cpu, 1);
Mike Travisfeae3202009-11-17 18:22:13 -0600524 if (!error)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030525 cpumask_set_cpu(cpu, frozen_cpus);
Mike Travisfeae3202009-11-17 18:22:13 -0600526 else {
Fabian Frederick84117da2014-06-04 16:11:17 -0700527 pr_err("Error taking CPU%d down: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700528 break;
529 }
530 }
Joseph Cihula86886e52009-06-30 19:31:07 -0700531
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700532 if (!error) {
533 BUG_ON(num_online_cpus() > 1);
534 /* Make sure the CPUs won't be enabled by someone else */
535 cpu_hotplug_disabled = 1;
536 } else {
Fabian Frederick84117da2014-06-04 16:11:17 -0700537 pr_err("Non-boot CPUs are not disabled\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700538 }
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100539 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700540 return error;
541}
542
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700543void __weak arch_enable_nonboot_cpus_begin(void)
544{
545}
546
547void __weak arch_enable_nonboot_cpus_end(void)
548{
549}
550
Sam Ravnborgfa7303e2008-02-08 04:21:55 -0800551void __ref enable_nonboot_cpus(void)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700552{
553 int cpu, error;
554
555 /* Allow everyone to use the CPU hotplug again */
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100556 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700557 cpu_hotplug_disabled = 0;
Rusty Russelle0b582e2009-01-01 10:12:28 +1030558 if (cpumask_empty(frozen_cpus))
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700559 goto out;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700560
Fabian Frederick84117da2014-06-04 16:11:17 -0700561 pr_info("Enabling non-boot CPUs ...\n");
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700562
563 arch_enable_nonboot_cpus_begin();
564
Rusty Russelle0b582e2009-01-01 10:12:28 +1030565 for_each_cpu(cpu, frozen_cpus) {
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700566 error = _cpu_up(cpu, 1);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700567 if (!error) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700568 pr_info("CPU%d is up\n", cpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700569 continue;
570 }
Fabian Frederick84117da2014-06-04 16:11:17 -0700571 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700572 }
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700573
574 arch_enable_nonboot_cpus_end();
575
Rusty Russelle0b582e2009-01-01 10:12:28 +1030576 cpumask_clear(frozen_cpus);
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700577out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100578 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700579}
Rusty Russelle0b582e2009-01-01 10:12:28 +1030580
Fenghua Yud7268a32011-11-15 21:59:31 +0100581static int __init alloc_frozen_cpus(void)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030582{
583 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
584 return -ENOMEM;
585 return 0;
586}
587core_initcall(alloc_frozen_cpus);
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100588
589/*
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100590 * When callbacks for CPU hotplug notifications are being executed, we must
591 * ensure that the state of the system with respect to the tasks being frozen
592 * or not, as reported by the notification, remains unchanged *throughout the
593 * duration* of the execution of the callbacks.
594 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
595 *
596 * This synchronization is implemented by mutually excluding regular CPU
597 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
598 * Hibernate notifications.
599 */
600static int
601cpu_hotplug_pm_callback(struct notifier_block *nb,
602 unsigned long action, void *ptr)
603{
604 switch (action) {
605
606 case PM_SUSPEND_PREPARE:
607 case PM_HIBERNATION_PREPARE:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700608 cpu_hotplug_disable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100609 break;
610
611 case PM_POST_SUSPEND:
612 case PM_POST_HIBERNATION:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700613 cpu_hotplug_enable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100614 break;
615
616 default:
617 return NOTIFY_DONE;
618 }
619
620 return NOTIFY_OK;
621}
622
623
Fenghua Yud7268a32011-11-15 21:59:31 +0100624static int __init cpu_hotplug_pm_sync_init(void)
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100625{
Fenghua Yu6e32d472012-11-13 11:32:43 -0800626 /*
627 * cpu_hotplug_pm_callback has higher priority than x86
628 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
629 * to disable cpu hotplug to avoid cpu hotplug race.
630 */
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100631 pm_notifier(cpu_hotplug_pm_callback, 0);
632 return 0;
633}
634core_initcall(cpu_hotplug_pm_sync_init);
635
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700636#endif /* CONFIG_PM_SLEEP_SMP */
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700637
Manfred Spraule545a612008-09-07 16:57:22 +0200638/**
639 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
640 * @cpu: cpu that just started
641 *
642 * This function calls the cpu_chain notifiers with CPU_STARTING.
643 * It must be called by the arch code on the new cpu, before the new cpu
644 * enables interrupts and before the "boot" cpu returns from __cpu_up().
645 */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400646void notify_cpu_starting(unsigned int cpu)
Manfred Spraule545a612008-09-07 16:57:22 +0200647{
648 unsigned long val = CPU_STARTING;
649
650#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030651 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
Manfred Spraule545a612008-09-07 16:57:22 +0200652 val = CPU_STARTING_FROZEN;
653#endif /* CONFIG_PM_SLEEP_SMP */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700654 cpu_notify(val, (void *)(long)cpu);
Manfred Spraule545a612008-09-07 16:57:22 +0200655}
656
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700657#endif /* CONFIG_SMP */
Mike Travisb8d317d2008-07-24 18:21:29 -0700658
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700659/*
660 * cpu_bit_bitmap[] is a special, "compressed" data structure that
661 * represents all NR_CPUS bits binary values of 1<<nr.
662 *
Rusty Russelle0b582e2009-01-01 10:12:28 +1030663 * It is used by cpumask_of() to get a constant address to a CPU
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700664 * mask value that has a single bit set only.
665 */
Mike Travisb8d317d2008-07-24 18:21:29 -0700666
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700667/* cpu_bit_bitmap[0] is empty - so we can back into it */
Michael Rodriguez4d519852011-03-22 16:34:07 -0700668#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700669#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
670#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
671#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
Mike Travisb8d317d2008-07-24 18:21:29 -0700672
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700673const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
Mike Travisb8d317d2008-07-24 18:21:29 -0700674
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700675 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
676 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
677#if BITS_PER_LONG > 32
678 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
679 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
Mike Travisb8d317d2008-07-24 18:21:29 -0700680#endif
681};
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700682EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100683
684const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
685EXPORT_SYMBOL(cpu_all_bits);
Rusty Russellb3199c02008-12-30 09:05:14 +1030686
687#ifdef CONFIG_INIT_ALL_POSSIBLE
688static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
689 = CPU_BITS_ALL;
690#else
691static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
692#endif
693const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
694EXPORT_SYMBOL(cpu_possible_mask);
695
696static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
697const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
698EXPORT_SYMBOL(cpu_online_mask);
699
700static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
701const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
702EXPORT_SYMBOL(cpu_present_mask);
703
704static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
705const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
706EXPORT_SYMBOL(cpu_active_mask);
Rusty Russell3fa41522008-12-30 09:05:16 +1030707
708void set_cpu_possible(unsigned int cpu, bool possible)
709{
710 if (possible)
711 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
712 else
713 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
714}
715
716void set_cpu_present(unsigned int cpu, bool present)
717{
718 if (present)
719 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
720 else
721 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
722}
723
724void set_cpu_online(unsigned int cpu, bool online)
725{
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800726 if (online) {
Rusty Russell3fa41522008-12-30 09:05:16 +1030727 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800728 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
729 } else {
Rusty Russell3fa41522008-12-30 09:05:16 +1030730 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800731 }
Rusty Russell3fa41522008-12-30 09:05:16 +1030732}
733
734void set_cpu_active(unsigned int cpu, bool active)
735{
736 if (active)
737 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
738 else
739 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
740}
741
742void init_cpu_present(const struct cpumask *src)
743{
744 cpumask_copy(to_cpumask(cpu_present_bits), src);
745}
746
747void init_cpu_possible(const struct cpumask *src)
748{
749 cpumask_copy(to_cpumask(cpu_possible_bits), src);
750}
751
752void init_cpu_online(const struct cpumask *src)
753{
754 cpumask_copy(to_cpumask(cpu_online_bits), src);
755}