blob: 33caf5e97701b74a1383986571d86b7e4e532734 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
Anton Vorontsovcb792952012-05-31 16:26:22 -070013#include <linux/oom.h>
14#include <linux/rcupdate.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040015#include <linux/export.h>
Anton Vorontsove4cc2f82012-05-31 16:26:26 -070016#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kthread.h>
18#include <linux/stop_machine.h>
Ingo Molnar81615b62006-06-26 00:24:32 -070019#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/gfp.h>
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +010021#include <linux/suspend.h>
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053022#include <linux/lockdep.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Thomas Gleixner38498a62012-04-20 13:05:44 +000024#include "smpboot.h"
25
Rusty Russell98a79d62008-12-13 21:19:41 +103026#ifdef CONFIG_SMP
Rusty Russellb3199c02008-12-30 09:05:14 +103027/* Serializes the updates to cpu_online_mask, cpu_present_mask */
Linus Torvaldsaa953872006-07-23 12:12:16 -070028static DEFINE_MUTEX(cpu_add_remove_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070030/*
31 * The following two API's must be used when attempting
32 * to serialize the updates to cpu_online_mask, cpu_present_mask.
33 */
34void cpu_maps_update_begin(void)
35{
36 mutex_lock(&cpu_add_remove_lock);
37}
38
39void cpu_maps_update_done(void)
40{
41 mutex_unlock(&cpu_add_remove_lock);
42}
43
Daniel J Blueman5c113fb2010-06-01 12:15:11 +010044static RAW_NOTIFIER_HEAD(cpu_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -070046/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
47 * Should always be manipulated under cpu_add_remove_lock
48 */
49static int cpu_hotplug_disabled;
50
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070051#ifdef CONFIG_HOTPLUG_CPU
52
Gautham R Shenoyd2219382008-01-25 21:08:01 +010053static struct {
54 struct task_struct *active_writer;
55 struct mutex lock; /* Synchronizes accesses to refcount, */
56 /*
57 * Also blocks the new readers during
58 * an ongoing cpu hotplug operation.
59 */
60 int refcount;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053061
62#ifdef CONFIG_DEBUG_LOCK_ALLOC
63 struct lockdep_map dep_map;
64#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -070065} cpu_hotplug = {
66 .active_writer = NULL,
67 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
68 .refcount = 0,
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053069#ifdef CONFIG_DEBUG_LOCK_ALLOC
70 .dep_map = {.name = "cpu_hotplug.lock" },
71#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -070072};
Gautham R Shenoyd2219382008-01-25 21:08:01 +010073
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053074/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
75#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
76#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
77#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
78
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010079void get_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -080080{
Gautham R Shenoyd2219382008-01-25 21:08:01 +010081 might_sleep();
82 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -070083 return;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053084 cpuhp_lock_acquire_read();
Gautham R Shenoyd2219382008-01-25 21:08:01 +010085 mutex_lock(&cpu_hotplug.lock);
86 cpu_hotplug.refcount++;
87 mutex_unlock(&cpu_hotplug.lock);
88
Ashok Raja9d9baa2005-11-28 13:43:46 -080089}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010090EXPORT_SYMBOL_GPL(get_online_cpus);
Ashok Raj90d45d12005-11-08 21:34:24 -080091
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010092void put_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -080093{
Gautham R Shenoyd2219382008-01-25 21:08:01 +010094 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -070095 return;
Gautham R Shenoyd2219382008-01-25 21:08:01 +010096 mutex_lock(&cpu_hotplug.lock);
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -070097
98 if (WARN_ON(!cpu_hotplug.refcount))
99 cpu_hotplug.refcount++; /* try to fix things up */
100
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700101 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
102 wake_up_process(cpu_hotplug.active_writer);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100103 mutex_unlock(&cpu_hotplug.lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530104 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100105
Ashok Raja9d9baa2005-11-28 13:43:46 -0800106}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100107EXPORT_SYMBOL_GPL(put_online_cpus);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800108
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100109/*
110 * This ensures that the hotplug operation can begin only when the
111 * refcount goes to zero.
112 *
113 * Note that during a cpu-hotplug operation, the new readers, if any,
114 * will be blocked by the cpu_hotplug.lock
115 *
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700116 * Since cpu_hotplug_begin() is always called after invoking
117 * cpu_maps_update_begin(), we can be sure that only one writer is active.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100118 *
119 * Note that theoretically, there is a possibility of a livelock:
120 * - Refcount goes to zero, last reader wakes up the sleeping
121 * writer.
122 * - Last reader unlocks the cpu_hotplug.lock.
123 * - A new reader arrives at this moment, bumps up the refcount.
124 * - The writer acquires the cpu_hotplug.lock finds the refcount
125 * non zero and goes to sleep again.
126 *
127 * However, this is very difficult to achieve in practice since
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100128 * get_online_cpus() not an api which is called all that often.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100129 *
130 */
Toshi Kanib9d10be2013-08-12 09:45:53 -0600131void cpu_hotplug_begin(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100132{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100133 cpu_hotplug.active_writer = current;
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700134
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530135 cpuhp_lock_acquire();
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700136 for (;;) {
137 mutex_lock(&cpu_hotplug.lock);
138 if (likely(!cpu_hotplug.refcount))
139 break;
140 __set_current_state(TASK_UNINTERRUPTIBLE);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100141 mutex_unlock(&cpu_hotplug.lock);
142 schedule();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100143 }
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100144}
145
Toshi Kanib9d10be2013-08-12 09:45:53 -0600146void cpu_hotplug_done(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100147{
148 cpu_hotplug.active_writer = NULL;
149 mutex_unlock(&cpu_hotplug.lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530150 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100151}
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700152
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700153/*
154 * Wait for currently running CPU hotplug operations to complete (if any) and
155 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
156 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
157 * hotplug path before performing hotplug operations. So acquiring that lock
158 * guarantees mutual exclusion from any currently running hotplug operations.
159 */
160void cpu_hotplug_disable(void)
161{
162 cpu_maps_update_begin();
163 cpu_hotplug_disabled = 1;
164 cpu_maps_update_done();
165}
166
167void cpu_hotplug_enable(void)
168{
169 cpu_maps_update_begin();
170 cpu_hotplug_disabled = 0;
171 cpu_maps_update_done();
172}
173
Toshi Kanib9d10be2013-08-12 09:45:53 -0600174#endif /* CONFIG_HOTPLUG_CPU */
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176/* Need to know about CPUs going up/down? */
Sam Ravnborgf7b16c12008-04-29 00:58:51 -0700177int __ref register_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178{
Neil Brownbd5349c2006-10-17 00:10:35 -0700179 int ret;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100180 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700181 ret = raw_notifier_chain_register(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100182 cpu_maps_update_done();
Neil Brownbd5349c2006-10-17 00:10:35 -0700183 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184}
Chandra Seetharaman65edc682006-06-27 02:54:08 -0700185
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700186static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
187 int *nr_calls)
188{
Akinobu Mitae6bde732010-05-26 14:43:29 -0700189 int ret;
190
191 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700192 nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700193
194 return notifier_to_errno(ret);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700195}
196
197static int cpu_notify(unsigned long val, void *v)
198{
199 return __cpu_notify(val, v, -1, NULL);
200}
201
Linus Torvalds00b9b0a2010-05-27 10:32:08 -0700202#ifdef CONFIG_HOTPLUG_CPU
203
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700204static void cpu_notify_nofail(unsigned long val, void *v)
205{
Linus Torvalds00b9b0a2010-05-27 10:32:08 -0700206 BUG_ON(cpu_notify(val, v));
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700207}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208EXPORT_SYMBOL(register_cpu_notifier);
209
Sam Ravnborg96471552008-04-29 00:58:48 -0700210void __ref unregister_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100212 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700213 raw_notifier_chain_unregister(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100214 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215}
216EXPORT_SYMBOL(unregister_cpu_notifier);
217
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700218/**
219 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
220 * @cpu: a CPU id
221 *
222 * This function walks all processes, finds a valid mm struct for each one and
223 * then clears a corresponding bit in mm's cpumask. While this all sounds
224 * trivial, there are various non-obvious corner cases, which this function
225 * tries to solve in a safe manner.
226 *
227 * Also note that the function uses a somewhat relaxed locking scheme, so it may
228 * be called only for an already offlined CPU.
229 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700230void clear_tasks_mm_cpumask(int cpu)
231{
232 struct task_struct *p;
233
234 /*
235 * This function is called after the cpu is taken down and marked
236 * offline, so its not like new tasks will ever get this cpu set in
237 * their mm mask. -- Peter Zijlstra
238 * Thus, we may use rcu_read_lock() here, instead of grabbing
239 * full-fledged tasklist_lock.
240 */
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700241 WARN_ON(cpu_online(cpu));
Anton Vorontsovcb792952012-05-31 16:26:22 -0700242 rcu_read_lock();
243 for_each_process(p) {
244 struct task_struct *t;
245
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700246 /*
247 * Main thread might exit, but other threads may still have
248 * a valid mm. Find one.
249 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700250 t = find_lock_task_mm(p);
251 if (!t)
252 continue;
253 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
254 task_unlock(t);
255 }
256 rcu_read_unlock();
257}
258
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259static inline void check_for_tasks(int cpu)
260{
261 struct task_struct *p;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +0100262 cputime_t utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
264 write_lock_irq(&tasklist_lock);
265 for_each_process(p) {
Frederic Weisbecker6fac4822012-11-13 14:20:55 +0100266 task_cputime(p, &utime, &stime);
Peter Zijlstra11854242010-01-21 16:34:27 +0100267 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
Frederic Weisbecker6fac4822012-11-13 14:20:55 +0100268 (utime || stime))
Frans Pop9d3cfc42010-01-25 14:56:34 +0100269 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
270 "(state = %ld, flags = %x)\n",
271 p->comm, task_pid_nr(p), cpu,
272 p->state, p->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 }
274 write_unlock_irq(&tasklist_lock);
275}
276
Avi Kivitydb912f92007-05-24 12:23:10 +0300277struct take_cpu_down_param {
278 unsigned long mod;
279 void *hcpu;
280};
281
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282/* Take this CPU down. */
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700283static int __ref take_cpu_down(void *_param)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284{
Avi Kivitydb912f92007-05-24 12:23:10 +0300285 struct take_cpu_down_param *param = _param;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 int err;
287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 /* Ensure this CPU doesn't handle any more interrupts. */
289 err = __cpu_disable();
290 if (err < 0)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700291 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700293 cpu_notify(CPU_DYING | param->mod, param->hcpu);
Thomas Gleixner14e568e2013-01-31 12:11:14 +0000294 /* Park the stopper thread */
295 kthread_park(current);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700296 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297}
298
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700299/* Requires cpu_add_remove_lock to be held */
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700300static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301{
Heiko Carstense7407dc2007-05-09 02:34:04 -0700302 int err, nr_calls = 0;
Heiko Carstense7407dc2007-05-09 02:34:04 -0700303 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700304 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Avi Kivitydb912f92007-05-24 12:23:10 +0300305 struct take_cpu_down_param tcd_param = {
306 .mod = mod,
307 .hcpu = hcpu,
308 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700310 if (num_online_cpus() == 1)
311 return -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700313 if (!cpu_online(cpu))
314 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100316 cpu_hotplug_begin();
Michael Rodriguez4d519852011-03-22 16:34:07 -0700317
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700318 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700319 if (err) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700320 nr_calls--;
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700321 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 printk("%s: attempt to take down CPU %u failed\n",
Harvey Harrisonaf1f16d2008-04-30 00:55:08 -0700323 __func__, cpu);
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700324 goto out_release;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 }
326
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200327 /*
328 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
329 * and RCU users of this state to go away such that all new such users
330 * will observe it.
331 *
332 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
333 * not imply sync_sched(), so explicitly call both.
Michael wang106dd5a2013-11-13 11:10:56 +0800334 *
335 * Do sync before park smpboot threads to take care the rcu boost case.
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200336 */
337#ifdef CONFIG_PREEMPT
338 synchronize_sched();
339#endif
340 synchronize_rcu();
341
Michael wang106dd5a2013-11-13 11:10:56 +0800342 smpboot_park_threads(cpu);
343
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200344 /*
345 * So now all preempt/rcu users must observe !cpu_active().
346 */
347
Rusty Russelle0b582e2009-01-01 10:12:28 +1030348 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
Rusty Russell04321582008-07-28 12:16:29 -0500349 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 /* CPU didn't die: tell everyone. Can't complain. */
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000351 smpboot_unpark_threads(cpu);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700352 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
Oleg Nesterov6a1bdc12010-03-15 10:10:23 +0100353 goto out_release;
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700354 }
Rusty Russell04321582008-07-28 12:16:29 -0500355 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100357 /*
358 * The migration_call() CPU_DYING callback will have removed all
359 * runnable tasks from the cpu, there's only the idle task left now
360 * that the migration thread is done doing the stop_machine thing.
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100361 *
362 * Wait for the stop thread to go away.
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100363 */
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100364 while (!idle_cpu(cpu))
365 cpu_relax();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
367 /* This actually kills the CPU. */
368 __cpu_die(cpu);
369
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 /* CPU is completely dead: tell everyone. Too late to complain. */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700371 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
373 check_for_tasks(cpu);
374
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700375out_release:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100376 cpu_hotplug_done();
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700377 if (!err)
378 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700379 return err;
380}
381
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700382int __ref cpu_down(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700383{
Heiko Carstens9ea09af2008-12-22 12:36:30 +0100384 int err;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700385
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100386 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700387
Max Krasnyanskye761b772008-07-15 04:43:49 -0700388 if (cpu_hotplug_disabled) {
389 err = -EBUSY;
390 goto out;
391 }
392
Max Krasnyanskye761b772008-07-15 04:43:49 -0700393 err = _cpu_down(cpu, 0);
394
Max Krasnyanskye761b772008-07-15 04:43:49 -0700395out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100396 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 return err;
398}
Zhang Ruib62b8ef2008-04-29 02:35:56 -0400399EXPORT_SYMBOL(cpu_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400#endif /*CONFIG_HOTPLUG_CPU*/
401
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700402/* Requires cpu_add_remove_lock to be held */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400403static int _cpu_up(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404{
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700405 int ret, nr_calls = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700407 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700408 struct task_struct *idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100410 cpu_hotplug_begin();
Thomas Gleixner38498a62012-04-20 13:05:44 +0000411
Yasuaki Ishimatsu5e5041f2012-10-23 01:30:54 +0200412 if (cpu_online(cpu) || !cpu_present(cpu)) {
413 ret = -EINVAL;
414 goto out;
415 }
416
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700417 idle = idle_thread_get(cpu);
418 if (IS_ERR(idle)) {
419 ret = PTR_ERR(idle);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000420 goto out;
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700421 }
Thomas Gleixner38498a62012-04-20 13:05:44 +0000422
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000423 ret = smpboot_create_threads(cpu);
424 if (ret)
425 goto out;
426
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700427 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700428 if (ret) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700429 nr_calls--;
Michael Rodriguez4d519852011-03-22 16:34:07 -0700430 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
Harvey Harrisonaf1f16d2008-04-30 00:55:08 -0700431 __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 goto out_notify;
433 }
434
435 /* Arch-specific enabling code. */
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700436 ret = __cpu_up(cpu, idle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 if (ret != 0)
438 goto out_notify;
Eric Sesterhenn6978c702006-03-24 18:45:21 +0100439 BUG_ON(!cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000441 /* Wake the per cpu threads */
442 smpboot_unpark_threads(cpu);
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 /* Now call notifier in preparation. */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700445 cpu_notify(CPU_ONLINE | mod, hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
447out_notify:
448 if (ret != 0)
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700449 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000450out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100451 cpu_hotplug_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 return ret;
454}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700455
Paul Gortmaker0db06282013-06-19 14:53:51 -0400456int cpu_up(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700457{
458 int err = 0;
minskey guocf234222010-05-24 14:32:41 -0700459
Rusty Russelle0b582e2009-01-01 10:12:28 +1030460 if (!cpu_possible(cpu)) {
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -0700461 printk(KERN_ERR "can't online cpu %d because it is not "
462 "configured as may-hotadd at boot time\n", cpu);
Chen Gong87d5e022010-03-05 13:42:38 -0800463#if defined(CONFIG_IA64)
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -0700464 printk(KERN_ERR "please check additional_cpus= boot "
465 "parameter\n");
466#endif
467 return -EINVAL;
468 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700469
Toshi Kani01b0f192013-11-12 15:07:25 -0800470 err = try_online_node(cpu_to_node(cpu));
471 if (err)
472 return err;
minskey guocf234222010-05-24 14:32:41 -0700473
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100474 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700475
Max Krasnyanskye761b772008-07-15 04:43:49 -0700476 if (cpu_hotplug_disabled) {
477 err = -EBUSY;
478 goto out;
479 }
480
481 err = _cpu_up(cpu, 0);
482
Max Krasnyanskye761b772008-07-15 04:43:49 -0700483out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100484 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700485 return err;
486}
Paul E. McKenneya513f6b2011-12-11 21:54:45 -0800487EXPORT_SYMBOL_GPL(cpu_up);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700488
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700489#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030490static cpumask_var_t frozen_cpus;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700491
492int disable_nonboot_cpus(void)
493{
Rafael J. Wysockie9a5f422010-05-27 22:16:22 +0200494 int cpu, first_cpu, error = 0;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700495
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100496 cpu_maps_update_begin();
Rusty Russelle0b582e2009-01-01 10:12:28 +1030497 first_cpu = cpumask_first(cpu_online_mask);
Xiaotian Feng9ee349a2009-12-16 18:04:32 +0100498 /*
499 * We take down all of the non-boot CPUs in one shot to avoid races
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700500 * with the userspace trying to use the CPU hotplug at the same time
501 */
Rusty Russelle0b582e2009-01-01 10:12:28 +1030502 cpumask_clear(frozen_cpus);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +0100503
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700504 printk("Disabling non-boot CPUs ...\n");
505 for_each_online_cpu(cpu) {
506 if (cpu == first_cpu)
507 continue;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700508 error = _cpu_down(cpu, 1);
Mike Travisfeae3202009-11-17 18:22:13 -0600509 if (!error)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030510 cpumask_set_cpu(cpu, frozen_cpus);
Mike Travisfeae3202009-11-17 18:22:13 -0600511 else {
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700512 printk(KERN_ERR "Error taking CPU%d down: %d\n",
513 cpu, error);
514 break;
515 }
516 }
Joseph Cihula86886e52009-06-30 19:31:07 -0700517
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700518 if (!error) {
519 BUG_ON(num_online_cpus() > 1);
520 /* Make sure the CPUs won't be enabled by someone else */
521 cpu_hotplug_disabled = 1;
522 } else {
Ingo Molnare1d9fd22006-12-23 16:55:29 +0100523 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700524 }
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100525 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700526 return error;
527}
528
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700529void __weak arch_enable_nonboot_cpus_begin(void)
530{
531}
532
533void __weak arch_enable_nonboot_cpus_end(void)
534{
535}
536
Sam Ravnborgfa7303e2008-02-08 04:21:55 -0800537void __ref enable_nonboot_cpus(void)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700538{
539 int cpu, error;
540
541 /* Allow everyone to use the CPU hotplug again */
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100542 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700543 cpu_hotplug_disabled = 0;
Rusty Russelle0b582e2009-01-01 10:12:28 +1030544 if (cpumask_empty(frozen_cpus))
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700545 goto out;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700546
Michael Rodriguez4d519852011-03-22 16:34:07 -0700547 printk(KERN_INFO "Enabling non-boot CPUs ...\n");
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700548
549 arch_enable_nonboot_cpus_begin();
550
Rusty Russelle0b582e2009-01-01 10:12:28 +1030551 for_each_cpu(cpu, frozen_cpus) {
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700552 error = _cpu_up(cpu, 1);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700553 if (!error) {
Michael Rodriguez4d519852011-03-22 16:34:07 -0700554 printk(KERN_INFO "CPU%d is up\n", cpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700555 continue;
556 }
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700557 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700558 }
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700559
560 arch_enable_nonboot_cpus_end();
561
Rusty Russelle0b582e2009-01-01 10:12:28 +1030562 cpumask_clear(frozen_cpus);
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700563out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100564 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700565}
Rusty Russelle0b582e2009-01-01 10:12:28 +1030566
Fenghua Yud7268a32011-11-15 21:59:31 +0100567static int __init alloc_frozen_cpus(void)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030568{
569 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
570 return -ENOMEM;
571 return 0;
572}
573core_initcall(alloc_frozen_cpus);
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100574
575/*
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100576 * When callbacks for CPU hotplug notifications are being executed, we must
577 * ensure that the state of the system with respect to the tasks being frozen
578 * or not, as reported by the notification, remains unchanged *throughout the
579 * duration* of the execution of the callbacks.
580 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
581 *
582 * This synchronization is implemented by mutually excluding regular CPU
583 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
584 * Hibernate notifications.
585 */
586static int
587cpu_hotplug_pm_callback(struct notifier_block *nb,
588 unsigned long action, void *ptr)
589{
590 switch (action) {
591
592 case PM_SUSPEND_PREPARE:
593 case PM_HIBERNATION_PREPARE:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700594 cpu_hotplug_disable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100595 break;
596
597 case PM_POST_SUSPEND:
598 case PM_POST_HIBERNATION:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700599 cpu_hotplug_enable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100600 break;
601
602 default:
603 return NOTIFY_DONE;
604 }
605
606 return NOTIFY_OK;
607}
608
609
Fenghua Yud7268a32011-11-15 21:59:31 +0100610static int __init cpu_hotplug_pm_sync_init(void)
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100611{
Fenghua Yu6e32d472012-11-13 11:32:43 -0800612 /*
613 * cpu_hotplug_pm_callback has higher priority than x86
614 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
615 * to disable cpu hotplug to avoid cpu hotplug race.
616 */
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100617 pm_notifier(cpu_hotplug_pm_callback, 0);
618 return 0;
619}
620core_initcall(cpu_hotplug_pm_sync_init);
621
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700622#endif /* CONFIG_PM_SLEEP_SMP */
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700623
Manfred Spraule545a612008-09-07 16:57:22 +0200624/**
625 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
626 * @cpu: cpu that just started
627 *
628 * This function calls the cpu_chain notifiers with CPU_STARTING.
629 * It must be called by the arch code on the new cpu, before the new cpu
630 * enables interrupts and before the "boot" cpu returns from __cpu_up().
631 */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400632void notify_cpu_starting(unsigned int cpu)
Manfred Spraule545a612008-09-07 16:57:22 +0200633{
634 unsigned long val = CPU_STARTING;
635
636#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030637 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
Manfred Spraule545a612008-09-07 16:57:22 +0200638 val = CPU_STARTING_FROZEN;
639#endif /* CONFIG_PM_SLEEP_SMP */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700640 cpu_notify(val, (void *)(long)cpu);
Manfred Spraule545a612008-09-07 16:57:22 +0200641}
642
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700643#endif /* CONFIG_SMP */
Mike Travisb8d317d2008-07-24 18:21:29 -0700644
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700645/*
646 * cpu_bit_bitmap[] is a special, "compressed" data structure that
647 * represents all NR_CPUS bits binary values of 1<<nr.
648 *
Rusty Russelle0b582e2009-01-01 10:12:28 +1030649 * It is used by cpumask_of() to get a constant address to a CPU
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700650 * mask value that has a single bit set only.
651 */
Mike Travisb8d317d2008-07-24 18:21:29 -0700652
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700653/* cpu_bit_bitmap[0] is empty - so we can back into it */
Michael Rodriguez4d519852011-03-22 16:34:07 -0700654#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700655#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
656#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
657#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
Mike Travisb8d317d2008-07-24 18:21:29 -0700658
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700659const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
Mike Travisb8d317d2008-07-24 18:21:29 -0700660
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700661 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
662 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
663#if BITS_PER_LONG > 32
664 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
665 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
Mike Travisb8d317d2008-07-24 18:21:29 -0700666#endif
667};
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700668EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100669
670const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
671EXPORT_SYMBOL(cpu_all_bits);
Rusty Russellb3199c02008-12-30 09:05:14 +1030672
673#ifdef CONFIG_INIT_ALL_POSSIBLE
674static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
675 = CPU_BITS_ALL;
676#else
677static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
678#endif
679const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
680EXPORT_SYMBOL(cpu_possible_mask);
681
682static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
683const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
684EXPORT_SYMBOL(cpu_online_mask);
685
686static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
687const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
688EXPORT_SYMBOL(cpu_present_mask);
689
690static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
691const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
692EXPORT_SYMBOL(cpu_active_mask);
Rusty Russell3fa41522008-12-30 09:05:16 +1030693
694void set_cpu_possible(unsigned int cpu, bool possible)
695{
696 if (possible)
697 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
698 else
699 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
700}
701
702void set_cpu_present(unsigned int cpu, bool present)
703{
704 if (present)
705 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
706 else
707 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
708}
709
710void set_cpu_online(unsigned int cpu, bool online)
711{
712 if (online)
713 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
714 else
715 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
716}
717
718void set_cpu_active(unsigned int cpu, bool active)
719{
720 if (active)
721 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
722 else
723 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
724}
725
726void init_cpu_present(const struct cpumask *src)
727{
728 cpumask_copy(to_cpumask(cpu_present_bits), src);
729}
730
731void init_cpu_possible(const struct cpumask *src)
732{
733 cpumask_copy(to_cpumask(cpu_possible_bits), src);
734}
735
736void init_cpu_online(const struct cpumask *src)
737{
738 cpumask_copy(to_cpumask(cpu_online_bits), src);
739}